isabelle update_cartouches;
authorwenzelm
Thu, 23 Jul 2015 14:25:05 +0200
changeset 60770 240563fbf41d
parent 60769 cf7f3465eaf1
child 60771 8558e4a37b48
isabelle update_cartouches;
src/CCL/CCL.thy
src/CCL/Fix.thy
src/CCL/Gfp.thy
src/CCL/Hered.thy
src/CCL/Lfp.thy
src/CCL/Set.thy
src/CCL/Term.thy
src/CCL/Trancl.thy
src/CCL/Type.thy
src/CCL/Wfd.thy
src/CCL/ex/Flag.thy
src/CCL/ex/List.thy
src/CCL/ex/Nat.thy
src/CCL/ex/Stream.thy
src/CTT/Arith.thy
src/CTT/Bool.thy
src/CTT/CTT.thy
src/CTT/Main.thy
src/CTT/ex/Elimination.thy
src/CTT/ex/Equality.thy
src/CTT/ex/Typechecking.thy
src/FOL/FOL.thy
src/FOL/IFOL.thy
src/FOL/ex/Classical.thy
src/FOL/ex/First_Order_Logic.thy
src/FOL/ex/Foundation.thy
src/FOL/ex/If.thy
src/FOL/ex/Intro.thy
src/FOL/ex/Intuitionistic.thy
src/FOL/ex/Locale_Test/Locale_Test.thy
src/FOL/ex/Locale_Test/Locale_Test1.thy
src/FOL/ex/Miniscope.thy
src/FOL/ex/Nat.thy
src/FOL/ex/Nat_Class.thy
src/FOL/ex/Natural_Numbers.thy
src/FOL/ex/Prolog.thy
src/FOL/ex/Propositional_Cla.thy
src/FOL/ex/Propositional_Int.thy
src/FOL/ex/Quantifiers_Cla.thy
src/FOL/ex/Quantifiers_Int.thy
src/FOLP/FOLP.thy
src/FOLP/IFOLP.thy
src/FOLP/ex/Classical.thy
src/FOLP/ex/Foundation.thy
src/FOLP/ex/If.thy
src/FOLP/ex/Intro.thy
src/FOLP/ex/Intuitionistic.thy
src/FOLP/ex/Nat.thy
src/FOLP/ex/Propositional_Cla.thy
src/FOLP/ex/Propositional_Int.thy
src/FOLP/ex/Quantifiers_Cla.thy
src/FOLP/ex/Quantifiers_Int.thy
src/LCF/LCF.thy
src/LCF/ex/Ex1.thy
src/LCF/ex/Ex2.thy
src/LCF/ex/Ex3.thy
src/LCF/ex/Ex4.thy
src/Sequents/ILL.thy
src/Sequents/LK.thy
src/Sequents/LK/Nat.thy
src/Sequents/LK/Propositional.thy
src/Sequents/LK0.thy
src/Sequents/Modal0.thy
src/Sequents/S4.thy
src/Sequents/S43.thy
src/Sequents/Sequents.thy
src/Sequents/T.thy
src/Sequents/Washing.thy
src/ZF/AC.thy
src/ZF/AC/Cardinal_aux.thy
src/ZF/AC/DC.thy
src/ZF/AC/HH.thy
src/ZF/Arith.thy
src/ZF/ArithSimp.thy
src/ZF/Bin.thy
src/ZF/Bool.thy
src/ZF/Cardinal.thy
src/ZF/CardinalArith.thy
src/ZF/Cardinal_AC.thy
src/ZF/Coind/Language.thy
src/ZF/Constructible/AC_in_L.thy
src/ZF/Constructible/DPow_absolute.thy
src/ZF/Constructible/Datatype_absolute.thy
src/ZF/Constructible/Formula.thy
src/ZF/Constructible/Internalize.thy
src/ZF/Constructible/L_axioms.thy
src/ZF/Constructible/MetaExists.thy
src/ZF/Constructible/Normal.thy
src/ZF/Constructible/Rank.thy
src/ZF/Constructible/Rank_Separation.thy
src/ZF/Constructible/Rec_Separation.thy
src/ZF/Constructible/Reflection.thy
src/ZF/Constructible/Relative.thy
src/ZF/Constructible/Satisfies_absolute.thy
src/ZF/Constructible/Separation.thy
src/ZF/Constructible/WF_absolute.thy
src/ZF/Constructible/WFrec.thy
src/ZF/Constructible/Wellorderings.thy
src/ZF/Datatype_ZF.thy
src/ZF/Epsilon.thy
src/ZF/EquivClass.thy
src/ZF/Finite.thy
src/ZF/Fixedpt.thy
src/ZF/IMP/Com.thy
src/ZF/IMP/Denotation.thy
src/ZF/IMP/Equiv.thy
src/ZF/Induct/Acc.thy
src/ZF/Induct/Binary_Trees.thy
src/ZF/Induct/Brouwer.thy
src/ZF/Induct/Comb.thy
src/ZF/Induct/Datatypes.thy
src/ZF/Induct/FoldSet.thy
src/ZF/Induct/ListN.thy
src/ZF/Induct/Multiset.thy
src/ZF/Induct/Mutil.thy
src/ZF/Induct/Ntree.thy
src/ZF/Induct/Primrec.thy
src/ZF/Induct/PropLog.thy
src/ZF/Induct/Rmap.thy
src/ZF/Induct/Term.thy
src/ZF/Induct/Tree_Forest.thy
src/ZF/Inductive_ZF.thy
src/ZF/InfDatatype.thy
src/ZF/IntDiv_ZF.thy
src/ZF/Int_ZF.thy
src/ZF/List_ZF.thy
src/ZF/Main_ZF.thy
src/ZF/Nat_ZF.thy
src/ZF/OrdQuant.thy
src/ZF/Order.thy
src/ZF/OrderArith.thy
src/ZF/OrderType.thy
src/ZF/Ordinal.thy
src/ZF/Perm.thy
src/ZF/QPair.thy
src/ZF/QUniv.thy
src/ZF/Resid/Residuals.thy
src/ZF/Sum.thy
src/ZF/Trancl.thy
src/ZF/UNITY/AllocBase.thy
src/ZF/UNITY/AllocImpl.thy
src/ZF/UNITY/Comp.thy
src/ZF/UNITY/Constrains.thy
src/ZF/UNITY/Distributor.thy
src/ZF/UNITY/FP.thy
src/ZF/UNITY/Follows.thy
src/ZF/UNITY/GenPrefix.thy
src/ZF/UNITY/Guar.thy
src/ZF/UNITY/Increasing.thy
src/ZF/UNITY/Merge.thy
src/ZF/UNITY/Monotonicity.thy
src/ZF/UNITY/MultisetSum.thy
src/ZF/UNITY/Mutex.thy
src/ZF/UNITY/State.thy
src/ZF/UNITY/SubstAx.thy
src/ZF/UNITY/UNITY.thy
src/ZF/UNITY/Union.thy
src/ZF/UNITY/WFair.thy
src/ZF/Univ.thy
src/ZF/WF.thy
src/ZF/ZF.thy
src/ZF/Zorn.thy
src/ZF/equalities.thy
src/ZF/ex/CoUnit.thy
src/ZF/ex/Group.thy
src/ZF/ex/LList.thy
src/ZF/ex/Limit.thy
src/ZF/ex/Primes.thy
src/ZF/ex/Ring.thy
src/ZF/ex/misc.thy
src/ZF/func.thy
src/ZF/pair.thy
src/ZF/upair.thy
--- a/src/CCL/CCL.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/CCL.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,18 +3,18 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Classical Computational Logic for Untyped Lambda Calculus
-  with reduction to weak head-normal form *}
+section \<open>Classical Computational Logic for Untyped Lambda Calculus
+  with reduction to weak head-normal form\<close>
 
 theory CCL
 imports Gfp
 begin
 
-text {*
+text \<open>
   Based on FOL extended with set collection, a primitive higher-order
   logic.  HOL is too strong - descriptions prevent a type of programs
   being defined which contains only executable terms.
-*}
+\<close>
 
 class prog = "term"
 default_sort prog
@@ -150,7 +150,7 @@
 definition Trm :: "i \<Rightarrow> o"
   where "Trm(t) == \<not> Dvg(t)"
 
-text {*
+text \<open>
 Would be interesting to build a similar theory for a typed programming language:
     ie.     true :: bool,      fix :: ('a\<Rightarrow>'a)\<Rightarrow>'a  etc......
 
@@ -158,7 +158,7 @@
 What are the advantages of this approach?
         - less axiomatic
         - wfd induction / coinduction and fixed point induction available
-*}
+\<close>
 
 
 lemmas ccl_data_defs = apply_def fix_def
@@ -166,7 +166,7 @@
 declare po_refl [simp]
 
 
-subsection {* Congruence Rules *}
+subsection \<open>Congruence Rules\<close>
 
 (*similar to AP_THM in Gordon's HOL*)
 lemma fun_cong: "(f::'a\<Rightarrow>'b) = g \<Longrightarrow> f(x)=g(x)"
@@ -184,7 +184,7 @@
 lemmas caseBs = caseBtrue caseBfalse caseBpair caseBlam caseBbot
 
 
-subsection {* Termination and Divergence *}
+subsection \<open>Termination and Divergence\<close>
 
 lemma Trm_iff: "Trm(t) \<longleftrightarrow> \<not> t = bot"
   by (simp add: Trm_def Dvg_def)
@@ -193,12 +193,12 @@
   by (simp add: Trm_def Dvg_def)
 
 
-subsection {* Constructors are injective *}
+subsection \<open>Constructors are injective\<close>
 
 lemma eq_lemma: "\<lbrakk>x=a; y=b; x=y\<rbrakk> \<Longrightarrow> a=b"
   by simp
 
-ML {*
+ML \<open>
   fun inj_rl_tac ctxt rews i =
     let
       fun mk_inj_lemmas r = [@{thm arg_cong}] RL [r RS (r RS @{thm eq_lemma})]
@@ -209,11 +209,11 @@
         eresolve_tac ctxt inj_lemmas i ORELSE
         asm_simp_tac (ctxt addsimps rews) i))
     end;
-*}
+\<close>
 
-method_setup inj_rl = {*
+method_setup inj_rl = \<open>
   Attrib.thms >> (fn rews => fn ctxt => SIMPLE_METHOD' (inj_rl_tac ctxt rews))
-*}
+\<close>
 
 lemma ccl_injs:
   "<a,b> = <a',b'> \<longleftrightarrow> (a=a' \<and> b=b')"
@@ -225,12 +225,12 @@
   by (simp add: ccl_injs)
 
 
-subsection {* Constructors are distinct *}
+subsection \<open>Constructors are distinct\<close>
 
 lemma lem: "t=t' \<Longrightarrow> case(t,b,c,d,e) = case(t',b,c,d,e)"
   by simp
 
-ML {*
+ML \<open>
 local
   fun pairs_of f x [] = []
     | pairs_of f x (y::ys) = (f x y) :: (f y x) :: (pairs_of f x ys)
@@ -259,9 +259,9 @@
   fun mk_lemmas rls = maps mk_lemma (mk_combs pair rls)
   fun mk_dstnct_rls thy xs = mk_combs (mk_thm_str thy) xs
 end
-*}
+\<close>
 
-ML {*
+ML \<open>
 val caseB_lemmas = mk_lemmas @{thms caseBs}
 
 val ccl_dstncts =
@@ -296,14 +296,14 @@
 ML_Thms.bind_thms ("ccl_rews", @{thms caseBs} @ @{thms ccl_injs} @ ccl_dstncts);
 ML_Thms.bind_thms ("ccl_dstnctsEs", ccl_dstncts RL [@{thm notE}]);
 ML_Thms.bind_thms ("ccl_injDs", XH_to_Ds @{thms ccl_injs});
-*}
+\<close>
 
 lemmas [simp] = ccl_rews
   and [elim!] = pair_inject ccl_dstnctsEs
   and [dest!] = ccl_injDs
 
 
-subsection {* Facts from gfp Definition of @{text "[="} and @{text "="} *}
+subsection \<open>Facts from gfp Definition of @{text "[="} and @{text "="}\<close>
 
 lemma XHlemma1: "\<lbrakk>A=B; a:B \<longleftrightarrow> P\<rbrakk> \<Longrightarrow> a:A \<longleftrightarrow> P"
   by simp
@@ -312,7 +312,7 @@
   by blast
 
 
-subsection {* Pre-Order *}
+subsection \<open>Pre-Order\<close>
 
 lemma POgen_mono: "mono(\<lambda>X. POgen(X))"
   apply (unfold POgen_def SIM_def)
@@ -422,7 +422,7 @@
 lemmas npo_rls = npo_pair_lam npo_lam_pair npo_rls1
 
 
-subsection {* Coinduction for @{text "[="} *}
+subsection \<open>Coinduction for @{text "[="}\<close>
 
 lemma po_coinduct: "\<lbrakk><t,u> : R; R <= POgen(R)\<rbrakk> \<Longrightarrow> t [= u"
   apply (rule PO_def [THEN def_coinduct, THEN PO_iff [THEN iffD2]])
@@ -430,7 +430,7 @@
   done
 
 
-subsection {* Equality *}
+subsection \<open>Equality\<close>
 
 lemma EQgen_mono: "mono(\<lambda>X. EQgen(X))"
   apply (unfold EQgen_def SIM_def)
@@ -473,14 +473,14 @@
   apply (rule EQgen_mono | assumption)+
   done
 
-method_setup eq_coinduct3 = {*
+method_setup eq_coinduct3 = \<open>
   Scan.lift Args.name_inner_syntax >> (fn s => fn ctxt =>
     SIMPLE_METHOD'
       (Rule_Insts.res_inst_tac ctxt [((("R", 0), Position.none), s)] [] @{thm eq_coinduct3}))
-*}
+\<close>
 
 
-subsection {* Untyped Case Analysis and Other Facts *}
+subsection \<open>Untyped Case Analysis and Other Facts\<close>
 
 lemma cond_eta: "(EX f. t=lam x. f(x)) \<Longrightarrow> t = lam x.(t ` x)"
   by (auto simp: apply_def)
--- a/src/CCL/Fix.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Fix.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Tentative attempt at including fixed point induction; justified by Smith *}
+section \<open>Tentative attempt at including fixed point induction; justified by Smith\<close>
 
 theory Fix
 imports Type
@@ -18,7 +18,7 @@
   INCL_subst: "INCL(P) \<Longrightarrow> INCL(\<lambda>x. P((g::i\<Rightarrow>i)(x)))"
 
 
-subsection {* Fixed Point Induction *}
+subsection \<open>Fixed Point Induction\<close>
 
 lemma fix_ind:
   assumes base: "P(bot)"
@@ -33,7 +33,7 @@
   done
 
 
-subsection {* Inclusive Predicates *}
+subsection \<open>Inclusive Predicates\<close>
 
 lemma inclXH: "INCL(P) \<longleftrightarrow> (ALL f. (ALL n:Nat. P(f ^ n ` bot)) \<longrightarrow> P(fix(f)))"
   by (simp add: INCL_def)
@@ -48,7 +48,7 @@
   by (blast dest: inclD)
 
 
-subsection {* Lemmas for Inclusive Predicates *}
+subsection \<open>Lemmas for Inclusive Predicates\<close>
 
 lemma npo_INCL: "INCL(\<lambda>x. \<not> a(x) [= t)"
   apply (rule inclI)
@@ -77,7 +77,7 @@
   done
 
 
-subsection {* Derivation of Reachability Condition *}
+subsection \<open>Derivation of Reachability Condition\<close>
 
 (* Fixed points of idgen *)
 
--- a/src/CCL/Gfp.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Gfp.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Greatest fixed points *}
+section \<open>Greatest fixed points\<close>
 
 theory Gfp
 imports Lfp
@@ -90,7 +90,7 @@
   done
 
 
-subsection {* Definition forms of @{text "gfp_Tarski"}, to control unfolding *}
+subsection \<open>Definition forms of @{text "gfp_Tarski"}, to control unfolding\<close>
 
 lemma def_gfp_Tarski: "\<lbrakk>h == gfp(f); mono(f)\<rbrakk> \<Longrightarrow> h = f(h)"
   apply unfold
--- a/src/CCL/Hered.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Hered.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,17 +3,17 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Hereditary Termination -- cf. Martin Lo\"f *}
+section \<open>Hereditary Termination -- cf. Martin Lo\"f\<close>
 
 theory Hered
 imports Type
 begin
 
-text {*
+text \<open>
   Note that this is based on an untyped equality and so @{text "lam
   x. b(x)"} is only hereditarily terminating if @{text "ALL x. b(x)"}
   is.  Not so useful for functions!
-*}
+\<close>
 
 definition HTTgen :: "i set \<Rightarrow> i set" where
   "HTTgen(R) ==
@@ -24,7 +24,7 @@
   where "HTT == gfp(HTTgen)"
 
 
-subsection {* Hereditary Termination *}
+subsection \<open>Hereditary Termination\<close>
 
 lemma HTTgen_mono: "mono(\<lambda>X. HTTgen(X))"
   apply (unfold HTTgen_def)
@@ -47,7 +47,7 @@
   done
 
 
-subsection {* Introduction Rules for HTT *}
+subsection \<open>Introduction Rules for HTT\<close>
 
 lemma HTT_bot: "\<not> bot : HTT"
   by (blast dest: HTTXH [THEN iffD1])
@@ -83,7 +83,7 @@
 lemmas HTT_rews = HTT_rews1 HTT_rews2
 
 
-subsection {* Coinduction for HTT *}
+subsection \<open>Coinduction for HTT\<close>
 
 lemma HTT_coinduct: "\<lbrakk>t : R; R <= HTTgen(R)\<rbrakk> \<Longrightarrow> t : HTT"
   apply (erule HTT_def [THEN def_coinduct])
@@ -111,7 +111,7 @@
   unfolding data_defs by (genIs HTTgenXH HTTgen_mono)+
 
 
-subsection {* Formation Rules for Types *}
+subsection \<open>Formation Rules for Types\<close>
 
 lemma UnitF: "Unit <= HTT"
   by (simp add: subsetXH UnitXH HTT_rews)
--- a/src/CCL/Lfp.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Lfp.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* The Knaster-Tarski Theorem *}
+section \<open>The Knaster-Tarski Theorem\<close>
 
 theory Lfp
 imports Set
--- a/src/CCL/Set.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Set.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,4 +1,4 @@
-section {* Extending FOL by a modified version of HOL set theory *}
+section \<open>Extending FOL by a modified version of HOL set theory\<close>
 
 theory Set
 imports "~~/src/FOL/FOL"
@@ -83,7 +83,7 @@
   done
 
 
-subsection {* Bounded quantifiers *}
+subsection \<open>Bounded quantifiers\<close>
 
 lemma ballI: "(\<And>x. x:A \<Longrightarrow> P(x)) \<Longrightarrow> ALL x:A. P(x)"
   by (simp add: Ball_def)
@@ -108,7 +108,7 @@
   by (blast intro: ballI)
 
 
-subsection {* Congruence rules *}
+subsection \<open>Congruence rules\<close>
 
 lemma ball_cong:
   "\<lbrakk>A = A'; \<And>x. x:A' \<Longrightarrow> P(x) \<longleftrightarrow> P'(x)\<rbrakk> \<Longrightarrow>
@@ -121,7 +121,7 @@
   by (blast intro: bexI elim: bexE)
 
 
-subsection {* Rules for subsets *}
+subsection \<open>Rules for subsets\<close>
 
 lemma subsetI: "(\<And>x. x:A \<Longrightarrow> x:B) \<Longrightarrow> A <= B"
   unfolding subset_def by (blast intro: ballI)
@@ -141,7 +141,7 @@
   by (blast intro: subsetI dest: subsetD)
 
 
-subsection {* Rules for equality *}
+subsection \<open>Rules for equality\<close>
 
 (*Anti-symmetry of the subset relation*)
 lemma subset_antisym: "\<lbrakk>A <= B; B <= A\<rbrakk> \<Longrightarrow> A = B"
@@ -164,7 +164,7 @@
   by (blast intro: equalityI subsetI CollectI dest: CollectD)
 
 
-subsection {* Rules for binary union *}
+subsection \<open>Rules for binary union\<close>
 
 lemma UnI1: "c:A \<Longrightarrow> c : A Un B"
   and UnI2: "c:B \<Longrightarrow> c : A Un B"
@@ -178,7 +178,7 @@
   unfolding Un_def by (blast dest: CollectD)
 
 
-subsection {* Rules for small intersection *}
+subsection \<open>Rules for small intersection\<close>
 
 lemma IntI: "\<lbrakk>c:A; c:B\<rbrakk> \<Longrightarrow> c : A Int B"
   unfolding Int_def by (blast intro: CollectI)
@@ -191,7 +191,7 @@
   by (blast dest: IntD1 IntD2)
 
 
-subsection {* Rules for set complement *}
+subsection \<open>Rules for set complement\<close>
 
 lemma ComplI: "(c:A \<Longrightarrow> False) \<Longrightarrow> c : Compl(A)"
   unfolding Compl_def by (blast intro: CollectI)
@@ -205,7 +205,7 @@
 lemmas ComplE = ComplD [elim_format]
 
 
-subsection {* Empty sets *}
+subsection \<open>Empty sets\<close>
 
 lemma empty_eq: "{x. False} = {}"
   by (simp add: empty_def)
@@ -225,7 +225,7 @@
 qed
 
 
-subsection {* Singleton sets *}
+subsection \<open>Singleton sets\<close>
 
 lemma singletonI: "a : {a}"
   unfolding singleton_def by (blast intro: CollectI)
@@ -236,7 +236,7 @@
 lemmas singletonE = singletonD [elim_format]
 
 
-subsection {* Unions of families *}
+subsection \<open>Unions of families\<close>
 
 (*The order of the premises presupposes that A is rigid; b may be flexible*)
 lemma UN_I: "\<lbrakk>a:A; b: B(a)\<rbrakk> \<Longrightarrow> b: (UN x:A. B(x))"
@@ -249,7 +249,7 @@
   by (simp add: UNION_def cong: bex_cong)
 
 
-subsection {* Intersections of families *}
+subsection \<open>Intersections of families\<close>
 
 lemma INT_I: "(\<And>x. x:A \<Longrightarrow> b: B(x)) \<Longrightarrow> b : (INT x:A. B(x))"
   unfolding INTER_def by (blast intro: CollectI ballI)
@@ -265,7 +265,7 @@
   by (simp add: INTER_def cong: ball_cong)
 
 
-subsection {* Rules for Unions *}
+subsection \<open>Rules for Unions\<close>
 
 (*The order of the premises presupposes that C is rigid; A may be flexible*)
 lemma UnionI: "\<lbrakk>X:C; A:X\<rbrakk> \<Longrightarrow> A : Union(C)"
@@ -275,7 +275,7 @@
   unfolding Union_def by (blast elim: UN_E)
 
 
-subsection {* Rules for Inter *}
+subsection \<open>Rules for Inter\<close>
 
 lemma InterI: "(\<And>X. X:C \<Longrightarrow> A:X) \<Longrightarrow> A : Inter(C)"
   unfolding Inter_def by (blast intro: INT_I)
@@ -290,9 +290,9 @@
   unfolding Inter_def by (blast elim: INT_E)
 
 
-section {* Derived rules involving subsets; Union and Intersection as lattice operations *}
+section \<open>Derived rules involving subsets; Union and Intersection as lattice operations\<close>
 
-subsection {* Big Union -- least upper bound of a set *}
+subsection \<open>Big Union -- least upper bound of a set\<close>
 
 lemma Union_upper: "B:A \<Longrightarrow> B <= Union(A)"
   by (blast intro: subsetI UnionI)
@@ -301,7 +301,7 @@
   by (blast intro: subsetI dest: subsetD elim: UnionE)
 
 
-subsection {* Big Intersection -- greatest lower bound of a set *}
+subsection \<open>Big Intersection -- greatest lower bound of a set\<close>
 
 lemma Inter_lower: "B:A \<Longrightarrow> Inter(A) <= B"
   by (blast intro: subsetI dest: InterD)
@@ -310,7 +310,7 @@
   by (blast intro: subsetI InterI dest: subsetD)
 
 
-subsection {* Finite Union -- the least upper bound of 2 sets *}
+subsection \<open>Finite Union -- the least upper bound of 2 sets\<close>
 
 lemma Un_upper1: "A <= A Un B"
   by (blast intro: subsetI UnI1)
@@ -322,7 +322,7 @@
   by (blast intro: subsetI elim: UnE dest: subsetD)
 
 
-subsection {* Finite Intersection -- the greatest lower bound of 2 sets *}
+subsection \<open>Finite Intersection -- the greatest lower bound of 2 sets\<close>
 
 lemma Int_lower1: "A Int B <= A"
   by (blast intro: subsetI elim: IntE)
@@ -334,7 +334,7 @@
   by (blast intro: subsetI IntI dest: subsetD)
 
 
-subsection {* Monotonicity *}
+subsection \<open>Monotonicity\<close>
 
 lemma monoI: "(\<And>A B. A <= B \<Longrightarrow> f(A) <= f(B)) \<Longrightarrow> mono(f)"
   unfolding mono_def by blast
@@ -349,7 +349,7 @@
   by (blast intro: Int_greatest dest: monoD intro: Int_lower1 Int_lower2)
 
 
-subsection {* Automated reasoning setup *}
+subsection \<open>Automated reasoning setup\<close>
 
 lemmas [intro!] = ballI subsetI InterI INT_I CollectI ComplI IntI UnCI singletonI
   and [intro] = bexI UnionI UN_I
@@ -369,9 +369,9 @@
   and [cong] = ball_cong bex_cong INT_cong UN_cong
 
 
-section {* Equalities involving union, intersection, inclusion, etc. *}
+section \<open>Equalities involving union, intersection, inclusion, etc.\<close>
 
-subsection {* Binary Intersection *}
+subsection \<open>Binary Intersection\<close>
 
 lemma Int_absorb: "A Int A = A"
   by (blast intro: equalityI)
@@ -389,7 +389,7 @@
   by (blast intro: equalityI elim: equalityE)
 
 
-subsection {* Binary Union *}
+subsection \<open>Binary Union\<close>
 
 lemma Un_absorb: "A Un A = A"
   by (blast intro: equalityI)
@@ -411,7 +411,7 @@
   by (blast intro: equalityI elim: equalityE)
 
 
-subsection {* Simple properties of @{text "Compl"} -- complement of a set *}
+subsection \<open>Simple properties of @{text "Compl"} -- complement of a set\<close>
 
 lemma Compl_disjoint: "A Int Compl(A) = {x. False}"
   by (blast intro: equalityI)
@@ -439,7 +439,7 @@
   by (blast intro: equalityI elim: equalityE)
 
 
-subsection {* Big Union and Intersection *}
+subsection \<open>Big Union and Intersection\<close>
 
 lemma Union_Un_distrib: "Union(A Un B) = Union(A) Un Union(B)"
   by (blast intro: equalityI)
@@ -452,7 +452,7 @@
   by (blast intro: equalityI)
 
 
-subsection {* Unions and Intersections of Families *}
+subsection \<open>Unions and Intersections of Families\<close>
 
 lemma UN_eq: "(UN x:A. B(x)) = Union({Y. EX x:A. Y=B(x)})"
   by (blast intro: equalityI)
@@ -468,7 +468,7 @@
   by (blast intro: equalityI)
 
 
-section {* Monotonicity of various operations *}
+section \<open>Monotonicity of various operations\<close>
 
 lemma Union_mono: "A<=B \<Longrightarrow> Union(A) <= Union(B)"
   by blast
--- a/src/CCL/Term.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Term.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Definitions of usual program constructs in CCL *}
+section \<open>Definitions of usual program constructs in CCL\<close>
 
 theory Term
 imports CCL
@@ -52,7 +52,7 @@
   "_letrec3" :: "[id,id,id,id,i,i]\<Rightarrow>i" ("(3letrec _ _ _ _ be _/ in _)"
                         [0,0,0,0,0,60] 60)
 
-ML {*
+ML \<open>
 (** Quantifier translations: variable binding **)
 
 (* FIXME does not handle "_idtdummy" *)
@@ -91,21 +91,21 @@
          val (x',a') = Syntax_Trans.variant_abs(x,T,a3)
      in Const(@{syntax_const "_letrec3"},dummyT) $ Free(f',SS) $ Free(x',T) $ Free(y',U) $ Free(z',V) $ a' $ b'
       end;
-*}
+\<close>
 
-parse_translation {*
+parse_translation \<open>
  [(@{syntax_const "_let"}, K let_tr),
   (@{syntax_const "_letrec"}, K letrec_tr),
   (@{syntax_const "_letrec2"}, K letrec2_tr),
   (@{syntax_const "_letrec3"}, K letrec3_tr)]
-*}
+\<close>
 
-print_translation {*
+print_translation \<open>
  [(@{const_syntax let}, K let_tr'),
   (@{const_syntax letrec}, K letrec_tr'),
   (@{const_syntax letrec2}, K letrec2_tr'),
   (@{const_syntax letrec3}, K letrec3_tr')]
-*}
+\<close>
 
 consts
   napply     :: "[i\<Rightarrow>i,i,i]\<Rightarrow>i"            ("(_ ^ _ ` _)" [56,56,56] 56)
@@ -156,7 +156,7 @@
   and genrec_defs = letrec_def letrec2_def letrec3_def
 
 
-subsection {* Beta Rules, including strictness *}
+subsection \<open>Beta Rules, including strictness\<close>
 
 lemma letB: "\<not> t=bot \<Longrightarrow> let x be t in f(x) = f(t)"
   apply (unfold let_def)
@@ -200,11 +200,11 @@
 
 lemmas rawBs = caseBs applyB applyBbot
 
-method_setup beta_rl = {*
+method_setup beta_rl = \<open>
   Scan.succeed (fn ctxt =>
     SIMPLE_METHOD' (CHANGED o
       simp_tac (ctxt addsimps @{thms rawBs} setloop (fn _ => stac ctxt @{thm letrecB}))))
-*}
+\<close>
 
 lemma ifBtrue: "if true then t else u = t"
   and ifBfalse: "if false then t else u = u"
@@ -272,7 +272,7 @@
   napplyBzero napplyBsucc
 
 
-subsection {* Constructors are injective *}
+subsection \<open>Constructors are injective\<close>
 
 lemma term_injs:
   "(inl(a) = inl(a')) \<longleftrightarrow> (a=a')"
@@ -282,16 +282,16 @@
   by (inj_rl applyB splitB whenBinl whenBinr ncaseBsucc lcaseBcons)
 
 
-subsection {* Constructors are distinct *}
+subsection \<open>Constructors are distinct\<close>
 
-ML {*
+ML \<open>
 ML_Thms.bind_thms ("term_dstncts",
   mkall_dstnct_thms @{context} @{thms data_defs} (@{thms ccl_injs} @ @{thms term_injs})
     [["bot","inl","inr"], ["bot","zero","succ"], ["bot","nil","cons"]]);
-*}
+\<close>
 
 
-subsection {* Rules for pre-order @{text "[="} *}
+subsection \<open>Rules for pre-order @{text "[="}\<close>
 
 lemma term_porews:
   "inl(a) [= inl(a') \<longleftrightarrow> a [= a'"
@@ -301,11 +301,11 @@
   by (simp_all add: data_defs ccl_porews)
 
 
-subsection {* Rewriting and Proving *}
+subsection \<open>Rewriting and Proving\<close>
 
-ML {*
+ML \<open>
   ML_Thms.bind_thms ("term_injDs", XH_to_Ds @{thms term_injs});
-*}
+\<close>
 
 lemmas term_rews = termBs term_injs term_dstncts ccl_porews term_porews
 
--- a/src/CCL/Trancl.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Trancl.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Transitive closure of a relation *}
+section \<open>Transitive closure of a relation\<close>
 
 theory Trancl
 imports CCL
@@ -25,7 +25,7 @@
   where "r^+ == r O rtrancl(r)"
 
 
-subsection {* Natural deduction for @{text "trans(r)"} *}
+subsection \<open>Natural deduction for @{text "trans(r)"}\<close>
 
 lemma transI: "(\<And>x y z. \<lbrakk><x,y>:r; <y,z>:r\<rbrakk> \<Longrightarrow> <x,z>:r) \<Longrightarrow> trans(r)"
   unfolding trans_def by blast
@@ -34,7 +34,7 @@
   unfolding trans_def by blast
 
 
-subsection {* Identity relation *}
+subsection \<open>Identity relation\<close>
 
 lemma idI: "<a,a> : id"
   apply (unfold id_def)
@@ -50,7 +50,7 @@
   done
 
 
-subsection {* Composition of two relations *}
+subsection \<open>Composition of two relations\<close>
 
 lemma compI: "\<lbrakk><a,b>:s; <b,c>:r\<rbrakk> \<Longrightarrow> <a,c> : r O s"
   unfolding relcomp_def by blast
@@ -72,7 +72,7 @@
   by blast
 
 
-subsection {* The relation rtrancl *}
+subsection \<open>The relation rtrancl\<close>
 
 lemma rtrancl_fun_mono: "mono(\<lambda>s. id Un (r O s))"
   apply (rule monoI)
@@ -102,7 +102,7 @@
   done
 
 
-subsection {* standard induction rule *}
+subsection \<open>standard induction rule\<close>
 
 lemma rtrancl_full_induct:
   "\<lbrakk><a,b> : r^*;
@@ -149,9 +149,9 @@
   done
 
 
-subsection {* The relation trancl *}
+subsection \<open>The relation trancl\<close>
 
-subsubsection {* Conversions between trancl and rtrancl *}
+subsubsection \<open>Conversions between trancl and rtrancl\<close>
 
 lemma trancl_into_rtrancl: "<a,b> : r^+ \<Longrightarrow> <a,b> : r^*"
   apply (unfold trancl_def)
--- a/src/CCL/Type.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Type.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Types in CCL are defined as sets of terms *}
+section \<open>Types in CCL are defined as sets of terms\<close>
 
 theory Type
 imports Term
@@ -45,12 +45,12 @@
   "A * B"       => "CONST Sigma(A, \<lambda>_. B)"
   "{x: A. B}"   == "CONST Subtype(A, \<lambda>x. B)"
 
-print_translation {*
+print_translation \<open>
  [(@{const_syntax Pi},
     fn _ => Syntax_Trans.dependent_tr' (@{syntax_const "_Pi"}, @{syntax_const "_arrow"})),
   (@{const_syntax Sigma},
     fn _ => Syntax_Trans.dependent_tr' (@{syntax_const "_Sigma"}, @{syntax_const "_star"}))]
-*}
+\<close>
 
 defs
   Subtype_def: "{x:A. P(x)} == {x. x:A \<and> P(x)}"
@@ -82,7 +82,7 @@
   by blast
 
 
-subsection {* Exhaustion Rules *}
+subsection \<open>Exhaustion Rules\<close>
 
 lemma EmptyXH: "\<And>a. a : {} \<longleftrightarrow> False"
   and SubtypeXH: "\<And>a A P. a : {x:A. P(x)} \<longleftrightarrow> (a:A \<and> P(a))"
@@ -100,10 +100,10 @@
   and TexXH: "a : TEX X. B(X) \<longleftrightarrow> (EX X. a:B(X))"
   unfolding simp_type_defs by blast+
 
-ML {* ML_Thms.bind_thms ("case_rls", XH_to_Es @{thms XHs}) *}
+ML \<open>ML_Thms.bind_thms ("case_rls", XH_to_Es @{thms XHs})\<close>
 
 
-subsection {* Canonical Type Rules *}
+subsection \<open>Canonical Type Rules\<close>
 
 lemma oneT: "one : Unit"
   and trueT: "true : Bool"
@@ -117,13 +117,13 @@
 lemmas canTs = oneT trueT falseT pairT lamT inlT inrT
 
 
-subsection {* Non-Canonical Type Rules *}
+subsection \<open>Non-Canonical Type Rules\<close>
 
 lemma lem: "\<lbrakk>a:B(u); u = v\<rbrakk> \<Longrightarrow> a : B(v)"
   by blast
 
 
-ML {*
+ML \<open>
 fun mk_ncanT_tac top_crls crls =
   SUBPROOF (fn {context = ctxt, prems = major :: prems, ...} =>
     resolve_tac ctxt ([major] RL top_crls) 1 THEN
@@ -132,11 +132,11 @@
     ALLGOALS (assume_tac ctxt ORELSE' resolve_tac ctxt (prems RL [@{thm lem}])
       ORELSE' eresolve_tac ctxt @{thms bspec}) THEN
     safe_tac (ctxt addSIs prems))
-*}
+\<close>
 
-method_setup ncanT = {*
+method_setup ncanT = \<open>
   Scan.succeed (SIMPLE_METHOD' o mk_ncanT_tac @{thms case_rls} @{thms case_rls})
-*}
+\<close>
 
 lemma ifT: "\<lbrakk>b:Bool; b=true \<Longrightarrow> t:A(true); b=false \<Longrightarrow> u:A(false)\<rbrakk> \<Longrightarrow> if b then t else u : A(b)"
   by ncanT
@@ -156,7 +156,7 @@
 lemmas ncanTs = ifT applyT splitT whenT
 
 
-subsection {* Subtypes *}
+subsection \<open>Subtypes\<close>
 
 lemma SubtypeD1: "a : Subtype(A, P) \<Longrightarrow> a : A"
   and SubtypeD2: "a : Subtype(A, P) \<Longrightarrow> P(a)"
@@ -169,7 +169,7 @@
   by (simp add: SubtypeXH)
 
 
-subsection {* Monotonicity *}
+subsection \<open>Monotonicity\<close>
 
 lemma idM: "mono (\<lambda>X. X)"
   apply (rule monoI)
@@ -206,9 +206,9 @@
     dest!: monoD [THEN subsetD])
 
 
-subsection {* Recursive types *}
+subsection \<open>Recursive types\<close>
 
-subsubsection {* Conversion Rules for Fixed Points via monotonicity and Tarski *}
+subsubsection \<open>Conversion Rules for Fixed Points via monotonicity and Tarski\<close>
 
 lemma NatM: "mono(\<lambda>X. Unit+X)"
   apply (rule PlusM constM idM)+
@@ -245,7 +245,7 @@
 lemmas ind_type_eqs = def_NatB def_ListB def_ListsB def_IListsB
 
 
-subsection {* Exhaustion Rules *}
+subsection \<open>Exhaustion Rules\<close>
 
 lemma NatXH: "a : Nat \<longleftrightarrow> (a=zero | (EX x:Nat. a=succ(x)))"
   and ListXH: "a : List(A) \<longleftrightarrow> (a=[] | (EX x:A. EX xs:List(A).a=x$xs))"
@@ -256,10 +256,10 @@
 
 lemmas iXHs = NatXH ListXH
 
-ML {* ML_Thms.bind_thms ("icase_rls", XH_to_Es @{thms iXHs}) *}
+ML \<open>ML_Thms.bind_thms ("icase_rls", XH_to_Es @{thms iXHs})\<close>
 
 
-subsection {* Type Rules *}
+subsection \<open>Type Rules\<close>
 
 lemma zeroT: "zero : Nat"
   and succT: "n:Nat \<Longrightarrow> succ(n) : Nat"
@@ -270,9 +270,9 @@
 lemmas icanTs = zeroT succT nilT consT
 
 
-method_setup incanT = {*
+method_setup incanT = \<open>
   Scan.succeed (SIMPLE_METHOD' o mk_ncanT_tac @{thms icase_rls} @{thms case_rls})
-*}
+\<close>
 
 lemma ncaseT: "\<lbrakk>n:Nat; n=zero \<Longrightarrow> b:C(zero); \<And>x. \<lbrakk>x:Nat; n=succ(x)\<rbrakk> \<Longrightarrow> c(x):C(succ(x))\<rbrakk>
     \<Longrightarrow> ncase(n,b,c) : C(n)"
@@ -285,7 +285,7 @@
 lemmas incanTs = ncaseT lcaseT
 
 
-subsection {* Induction Rules *}
+subsection \<open>Induction Rules\<close>
 
 lemmas ind_Ms = NatM ListM
 
@@ -304,7 +304,7 @@
 lemmas inds = Nat_ind List_ind
 
 
-subsection {* Primitive Recursive Rules *}
+subsection \<open>Primitive Recursive Rules\<close>
 
 lemma nrecT: "\<lbrakk>n:Nat; b:C(zero); \<And>x g. \<lbrakk>x:Nat; g:C(x)\<rbrakk> \<Longrightarrow> c(x,g):C(succ(x))\<rbrakk>
     \<Longrightarrow> nrec(n,b,c) : C(n)"
@@ -317,7 +317,7 @@
 lemmas precTs = nrecT lrecT
 
 
-subsection {* Theorem proving *}
+subsection \<open>Theorem proving\<close>
 
 lemma SgE2: "\<lbrakk><a,b> : Sigma(A,B); \<lbrakk>a:A; b:B(a)\<rbrakk> \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
   unfolding SgXH by blast
@@ -326,13 +326,13 @@
 (*         - intro rules are type rules for canonical terms                *)
 (*         - elim rules are case rules (no non-canonical terms appear)     *)
 
-ML {* ML_Thms.bind_thms ("XHEs", XH_to_Es @{thms XHs}) *}
+ML \<open>ML_Thms.bind_thms ("XHEs", XH_to_Es @{thms XHs})\<close>
 
 lemmas [intro!] = SubtypeI canTs icanTs
   and [elim!] = SubtypeE XHEs
 
 
-subsection {* Infinite Data Types *}
+subsection \<open>Infinite Data Types\<close>
 
 lemma lfp_subset_gfp: "mono(f) \<Longrightarrow> lfp(f) <= gfp(f)"
   apply (rule lfp_lowerbound [THEN subset_trans])
@@ -364,8 +364,8 @@
   done
 
 
-subsection {* Lemmas and tactics for using the rule @{text
-  "coinduct3"} on @{text "[="} and @{text "="} *}
+subsection \<open>Lemmas and tactics for using the rule @{text
+  "coinduct3"} on @{text "[="} and @{text "="}\<close>
 
 lemma lfpI: "\<lbrakk>mono(f); a : f(lfp(f))\<rbrakk> \<Longrightarrow> a : lfp(f)"
   apply (erule lfp_Tarski [THEN ssubst])
@@ -379,12 +379,12 @@
   by simp
 
 
-ML {*
+ML \<open>
   val coinduct3_tac = SUBPROOF (fn {context = ctxt, prems = mono :: prems, ...} =>
     fast_tac (ctxt addIs (mono RS @{thm coinduct3_mono_lemma} RS @{thm lfpI}) :: prems) 1);
-*}
+\<close>
 
-method_setup coinduct3 = {* Scan.succeed (SIMPLE_METHOD' o coinduct3_tac) *}
+method_setup coinduct3 = \<open>Scan.succeed (SIMPLE_METHOD' o coinduct3_tac)\<close>
 
 lemma ci3_RI: "\<lbrakk>mono(Agen); a : R\<rbrakk> \<Longrightarrow> a : lfp(\<lambda>x. Agen(x) Un R Un A)"
   by coinduct3
@@ -396,21 +396,21 @@
 lemma ci3_AI: "\<lbrakk>mono(Agen); a : A\<rbrakk> \<Longrightarrow> a : lfp(\<lambda>x. Agen(x) Un R Un A)"
   by coinduct3
 
-ML {*
+ML \<open>
 fun genIs_tac ctxt genXH gen_mono =
   resolve_tac ctxt [genXH RS @{thm iffD2}] THEN'
   simp_tac ctxt THEN'
   TRY o fast_tac
     (ctxt addIs [genXH RS @{thm iffD2}, gen_mono RS @{thm coinduct3_mono_lemma} RS @{thm lfpI}])
-*}
+\<close>
 
-method_setup genIs = {*
+method_setup genIs = \<open>
   Attrib.thm -- Attrib.thm >>
     (fn (genXH, gen_mono) => fn ctxt => SIMPLE_METHOD' (genIs_tac ctxt genXH gen_mono))
-*}
+\<close>
 
 
-subsection {* POgen *}
+subsection \<open>POgen\<close>
 
 lemma PO_refl: "<a,a> : PO"
   by (rule po_refl [THEN PO_iff [THEN iffD1]])
@@ -433,7 +433,7 @@
     \<Longrightarrow> <h$t,h'$t'> : POgen(lfp(\<lambda>x. POgen(x) Un R Un PO))"
   unfolding data_defs by (genIs POgenXH POgen_mono)+
 
-ML {*
+ML \<open>
 fun POgen_tac ctxt (rla, rlb) i =
   SELECT_GOAL (safe_tac ctxt) i THEN
   resolve_tac ctxt [rlb RS (rla RS @{thm ssubst_pair})] i THEN
@@ -441,10 +441,10 @@
       (@{thms POgenIs} @ [@{thm PO_refl} RS (@{thm POgen_mono} RS @{thm ci3_AI})] @
         (@{thms POgenIs} RL [@{thm POgen_mono} RS @{thm ci3_AgenI}]) @
         [@{thm POgen_mono} RS @{thm ci3_RI}]) i))
-*}
+\<close>
 
 
-subsection {* EQgen *}
+subsection \<open>EQgen\<close>
 
 lemma EQ_refl: "<a,a> : EQ"
   by (rule refl [THEN EQ_iff [THEN iffD1]])
@@ -467,7 +467,7 @@
     \<Longrightarrow> <h$t,h'$t'> : EQgen(lfp(\<lambda>x. EQgen(x) Un R Un EQ))"
   unfolding data_defs by (genIs EQgenXH EQgen_mono)+
 
-ML {*
+ML \<open>
 fun EQgen_raw_tac ctxt i =
   (REPEAT (resolve_tac ctxt (@{thms EQgenIs} @
         [@{thm EQ_refl} RS (@{thm EQgen_mono} RS @{thm ci3_AI})] @
@@ -484,10 +484,10 @@
     resolve_tac ctxt ((rews @ [@{thm refl}]) RL ((rews @ [@{thm refl}]) RL [@{thm ssubst_pair}])) i THEN
     ALLGOALS (simp_tac ctxt) THEN
     ALLGOALS (EQgen_raw_tac ctxt)) i
-*}
+\<close>
 
-method_setup EQgen = {*
+method_setup EQgen = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (EQgen_tac ctxt ths))
-*}
+\<close>
 
 end
--- a/src/CCL/Wfd.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/Wfd.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Well-founded relations in CCL *}
+section \<open>Well-founded relations in CCL\<close>
 
 theory Wfd
 imports Trancl Type Hered
@@ -46,12 +46,12 @@
   apply blast
   done
 
-method_setup wfd_strengthen = {*
+method_setup wfd_strengthen = \<open>
   Scan.lift Args.name_inner_syntax >> (fn s => fn ctxt =>
     SIMPLE_METHOD' (fn i =>
       Rule_Insts.res_inst_tac ctxt [((("Q", 0), Position.none), s)] [] @{thm wfd_strengthen_lemma} i
         THEN assume_tac ctxt (i + 1)))
-*}
+\<close>
 
 lemma wf_anti_sym: "\<lbrakk>Wfd(r); <a,x>:r; <x,a>:r\<rbrakk> \<Longrightarrow> P"
   apply (subgoal_tac "ALL x. <a,x>:r \<longrightarrow> <x,a>:r \<longrightarrow> P")
@@ -66,7 +66,7 @@
   done
 
 
-subsection {* Irreflexive transitive closure *}
+subsection \<open>Irreflexive transitive closure\<close>
 
 lemma trancl_wf:
   assumes 1: "Wfd(R)"
@@ -85,7 +85,7 @@
   done
 
 
-subsection {* Lexicographic Ordering *}
+subsection \<open>Lexicographic Ordering\<close>
 
 lemma lexXH:
   "p : ra**rb \<longleftrightarrow> (EX a a' b b'. p = <<a,b>,<a',b'>> \<and> (<a,a'> : ra | a=a' \<and> <b,b'> : rb))"
@@ -128,7 +128,7 @@
   done
 
 
-subsection {* Mapping *}
+subsection \<open>Mapping\<close>
 
 lemma wmapXH: "p : wmap(f,r) \<longleftrightarrow> (EX x y. p=<x,y> \<and> <f(x),f(y)> : r)"
   unfolding wmap_def by blast
@@ -156,7 +156,7 @@
   done
 
 
-subsection {* Projections *}
+subsection \<open>Projections\<close>
 
 lemma wfstI: "<xa,ya> : r \<Longrightarrow> <<xa,xb>,<ya,yb>> : wmap(fst,r)"
   apply (rule wmapI)
@@ -174,7 +174,7 @@
   done
 
 
-subsection {* Ground well-founded relations *}
+subsection \<open>Ground well-founded relations\<close>
 
 lemma wfI: "\<lbrakk>Wfd(r);  a : r\<rbrakk> \<Longrightarrow> a : wf(r)"
   unfolding wf_def by blast
@@ -220,7 +220,7 @@
   done
 
 
-subsection {* General Recursive Functions *}
+subsection \<open>General Recursive Functions\<close>
 
 lemma letrecT:
   assumes 1: "a : A"
@@ -282,7 +282,7 @@
 lemmas letrecTs = letrecT letrec2T letrec3T
 
 
-subsection {* Type Checking for Recursive Calls *}
+subsection \<open>Type Checking for Recursive Calls\<close>
 
 lemma rcallT:
   "\<lbrakk>ALL x:{x:A.<x,p>:wf(R)}.g(x):D(x);  
@@ -303,7 +303,7 @@
 lemmas rcallTs = rcallT rcall2T rcall3T
 
 
-subsection {* Instantiating an induction hypothesis with an equality assumption *}
+subsection \<open>Instantiating an induction hypothesis with an equality assumption\<close>
 
 lemma hyprcallT:
   assumes 1: "g(a) = b"
@@ -360,7 +360,7 @@
 lemmas hyprcallTs = hyprcallT hyprcall2T hyprcall3T
 
 
-subsection {* Rules to Remove Induction Hypotheses after Type Checking *}
+subsection \<open>Rules to Remove Induction Hypotheses after Type Checking\<close>
 
 lemma rmIH1: "\<lbrakk>ALL x:{x:A.<x,p>:wf(R)}.g(x):D(x); P\<rbrakk> \<Longrightarrow> P" .
 
@@ -372,7 +372,7 @@
 lemmas rmIHs = rmIH1 rmIH2 rmIH3
 
 
-subsection {* Lemmas for constructors and subtypes *}
+subsection \<open>Lemmas for constructors and subtypes\<close>
 
 (* 0-ary constructors do not need additional rules as they are handled *)
 (*                                      correctly by applying SubtypeI *)
@@ -404,9 +404,9 @@
 lemmas rcall_lemmas = asm_rl rcall_lemma1 SubtypeD1 rcall_lemma2
 
 
-subsection {* Typechecking *}
+subsection \<open>Typechecking\<close>
 
-ML {*
+ML \<open>
 local
 
 val type_rls =
@@ -478,35 +478,35 @@
   SELECT_GOAL (REPEAT_FIRST (tc_step_tac ctxt rls) THEN clean_ccs_tac ctxt) i
 
 end
-*}
+\<close>
 
-method_setup typechk = {*
+method_setup typechk = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (typechk_tac ctxt ths))
-*}
+\<close>
 
-method_setup clean_ccs = {*
+method_setup clean_ccs = \<open>
   Scan.succeed (SIMPLE_METHOD o clean_ccs_tac)
-*}
+\<close>
 
-method_setup gen_ccs = {*
+method_setup gen_ccs = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (gen_ccs_tac ctxt ths))
-*}
+\<close>
 
 
-subsection {* Evaluation *}
+subsection \<open>Evaluation\<close>
 
 named_theorems eval "evaluation rules"
 
-ML {*
+ML \<open>
 fun eval_tac ths =
   Subgoal.FOCUS_PREMS (fn {context = ctxt, prems, ...} =>
     let val eval_rules = Named_Theorems.get ctxt @{named_theorems eval}
     in DEPTH_SOLVE_1 (resolve_tac ctxt (ths @ prems @ rev eval_rules) 1) end)
-*}
+\<close>
 
-method_setup eval = {*
+method_setup eval = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (CHANGED o eval_tac ths ctxt))
-*}
+\<close>
 
 
 lemmas eval_rls [eval] = trueV falseV pairV lamV caseVtrue caseVfalse caseVpair caseVlam
@@ -523,9 +523,9 @@
   shows "let x be t in f(x) ---> c"
   apply (unfold let_def)
   apply (rule 1 [THEN canonical])
-  apply (tactic {*
+  apply (tactic \<open>
     REPEAT (DEPTH_SOLVE_1 (resolve_tac @{context} (@{thms assms} @ @{thms eval_rls}) 1 ORELSE
-      eresolve_tac @{context} @{thms substitute} 1)) *})
+      eresolve_tac @{context} @{thms substitute} 1))\<close>)
   done
 
 lemma fixV: "f(fix(f)) ---> c \<Longrightarrow> fix(f) ---> c"
@@ -566,7 +566,7 @@
   unfolding data_defs by eval+
 
 
-subsection {* Factorial *}
+subsection \<open>Factorial\<close>
 
 schematic_lemma
   "letrec f n be ncase(n,succ(zero),\<lambda>x. nrec(n,zero,\<lambda>y g. nrec(f(x),g,\<lambda>z h. succ(h))))  
@@ -578,7 +578,7 @@
    in f(succ(succ(succ(zero)))) ---> ?a"
   by eval
 
-subsection {* Less Than Or Equal *}
+subsection \<open>Less Than Or Equal\<close>
 
 schematic_lemma
   "letrec f p be split(p,\<lambda>m n. ncase(m,true,\<lambda>x. ncase(n,false,\<lambda>y. f(<x,y>))))
@@ -596,7 +596,7 @@
   by eval
 
 
-subsection {* Reverse *}
+subsection \<open>Reverse\<close>
 
 schematic_lemma
   "letrec id l be lcase(l,[],\<lambda>x xs. x$id(xs))  
--- a/src/CCL/ex/Flag.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/ex/Flag.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,8 +3,8 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Dutch national flag program -- except that the point of Dijkstra's example was to use
-  arrays and this uses lists. *}
+section \<open>Dutch national flag program -- except that the point of Dijkstra's example was to use
+  arrays and this uses lists.\<close>
 
 theory Flag
 imports List
--- a/src/CCL/ex/List.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/ex/List.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Programs defined over lists *}
+section \<open>Programs defined over lists\<close>
 
 theory List
 imports Nat
--- a/src/CCL/ex/Nat.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/ex/Nat.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Programs defined over the natural numbers *}
+section \<open>Programs defined over the natural numbers\<close>
 
 theory Nat
 imports "../Wfd"
@@ -96,7 +96,7 @@
   done
 
 
-subsection {* Termination Conditions for Ackermann's Function *}
+subsection \<open>Termination Conditions for Ackermann's Function\<close>
 
 lemmas relI = NatPR_wf [THEN NatPR_wf [THEN lex_wf, THEN wfI]]
 
--- a/src/CCL/ex/Stream.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CCL/ex/Stream.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Programs defined over streams *}
+section \<open>Programs defined over streams\<close>
 
 theory Stream
 imports List
@@ -22,7 +22,7 @@
 *)
 
 
-subsection {* Map of composition is composition of maps *}
+subsection \<open>Map of composition is composition of maps\<close>
 
 lemma map_comp:
   assumes 1: "l:Lists(A)"
@@ -49,7 +49,7 @@
   done
 
 
-subsection {* Mapping distributes over append *}
+subsection \<open>Mapping distributes over append\<close>
 
 lemma map_append:
   assumes "l:Lists(A)"
@@ -67,7 +67,7 @@
   done
 
 
-subsection {* Append is associative *}
+subsection \<open>Append is associative\<close>
 
 lemma append_assoc:
   assumes "k:Lists(A)"
@@ -82,12 +82,12 @@
   apply EQgen
    prefer 2
    apply blast
-  apply (tactic {* DEPTH_SOLVE (eresolve_tac @{context} [XH_to_E @{thm ListsXH}] 1
-    THEN EQgen_tac @{context} [] 1) *})
+  apply (tactic \<open>DEPTH_SOLVE (eresolve_tac @{context} [XH_to_E @{thm ListsXH}] 1
+    THEN EQgen_tac @{context} [] 1)\<close>)
   done
 
 
-subsection {* Appending anything to an infinite list doesn't alter it *}
+subsection \<open>Appending anything to an infinite list doesn't alter it\<close>
 
 lemma ilist_append:
   assumes "l:ILists(A)"
--- a/src/CTT/Arith.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/Arith.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,13 +3,13 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* Elementary arithmetic *}
+section \<open>Elementary arithmetic\<close>
 
 theory Arith
 imports Bool
 begin
 
-subsection {* Arithmetic operators and their definitions *}
+subsection \<open>Arithmetic operators and their definitions\<close>
 
 definition
   add :: "[i,i]\<Rightarrow>i"   (infixr "#+" 65) where
@@ -46,7 +46,7 @@
 lemmas arith_defs = add_def diff_def absdiff_def mult_def mod_def div_def
 
 
-subsection {* Proofs about elementary arithmetic: addition, multiplication, etc. *}
+subsection \<open>Proofs about elementary arithmetic: addition, multiplication, etc.\<close>
 
 (** Addition *)
 
@@ -144,7 +144,7 @@
 done
 
 
-subsection {* Simplification *}
+subsection \<open>Simplification\<close>
 
 lemmas arith_typing_rls = add_typing mult_typing diff_typing
   and arith_congr_rls = add_typingL mult_typingL diff_typingL
@@ -155,7 +155,7 @@
   multC0 multC_succ
   diffC0 diff_0_eq_0 diff_succ_succ
 
-ML {*
+ML \<open>
 
 structure Arith_simp_data: TSIMP_DATA =
   struct
@@ -180,18 +180,18 @@
   (Arith_simp.cond_norm_tac ctxt (prove_cond_tac ctxt, congr_rls, prems))
 
 end
-*}
+\<close>
 
-method_setup arith_rew = {*
+method_setup arith_rew = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (arith_rew_tac ctxt ths))
-*}
+\<close>
 
-method_setup hyp_arith_rew = {*
+method_setup hyp_arith_rew = \<open>
   Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_arith_rew_tac ctxt ths))
-*}
+\<close>
 
 
-subsection {* Addition *}
+subsection \<open>Addition\<close>
 
 (*Associative law for addition*)
 lemma add_assoc: "\<lbrakk>a:N; b:N; c:N\<rbrakk> \<Longrightarrow> (a #+ b) #+ c = a #+ (b #+ c) : N"
@@ -214,7 +214,7 @@
 done
 
 
-subsection {* Multiplication *}
+subsection \<open>Multiplication\<close>
 
 (*right annihilation in product*)
 lemma mult_0_right: "a:N \<Longrightarrow> a #* 0 = 0 : N"
@@ -248,11 +248,11 @@
 done
 
 
-subsection {* Difference *}
+subsection \<open>Difference\<close>
 
-text {*
+text \<open>
 Difference on natural numbers, without negative numbers
-  a - b = 0  iff  a<=b    a - b = succ(c) iff a>b   *}
+  a - b = 0  iff  a<=b    a - b = succ(c) iff a>b\<close>
 
 lemma diff_self_eq_0: "a:N \<Longrightarrow> a - a = 0 : N"
 apply (NE a)
@@ -299,7 +299,7 @@
 done
 
 
-subsection {* Absolute difference *}
+subsection \<open>Absolute difference\<close>
 
 (*typing of absolute difference: short and long versions*)
 
@@ -380,7 +380,7 @@
 done
 
 
-subsection {* Remainder and Quotient *}
+subsection \<open>Remainder and Quotient\<close>
 
 (*typing of remainder: short and long versions*)
 
--- a/src/CTT/Bool.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/Bool.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* The two-element type (booleans and conditionals) *}
+section \<open>The two-element type (booleans and conditionals)\<close>
 
 theory Bool
 imports CTT
@@ -28,7 +28,7 @@
 lemmas bool_defs = Bool_def true_def false_def cond_def
 
 
-subsection {* Derivation of rules for the type Bool *}
+subsection \<open>Derivation of rules for the type Bool\<close>
 
 (*formation rule*)
 lemma boolF: "Bool type"
--- a/src/CTT/CTT.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/CTT.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Constructive Type Theory *}
+section \<open>Constructive Type Theory\<close>
 
 theory CTT
 imports Pure
@@ -314,9 +314,9 @@
 done
 
 
-subsection {* Tactics for type checking *}
+subsection \<open>Tactics for type checking\<close>
 
-ML {*
+ML \<open>
 
 local
 
@@ -336,13 +336,13 @@
 
 end;
 
-*}
+\<close>
 
 (*For simplification: type formation and checking,
   but no equalities between terms*)
 lemmas routine_rls = form_rls formL_rls refl_type element_rls
 
-ML {*
+ML \<open>
 local
   val equal_rls = @{thms form_rls} @ @{thms element_rls} @ @{thms intrL_rls} @
     @{thms elimL_rls} @ @{thms refl_elem}
@@ -378,15 +378,15 @@
     (ASSUME ctxt (filt_resolve_from_net_tac ctxt 3 (Tactic.build_net (thms @ equal_rls))))
 
 end
-*}
+\<close>
 
-method_setup form = {* Scan.succeed (fn ctxt => SIMPLE_METHOD (form_tac ctxt)) *}
-method_setup typechk = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (typechk_tac ctxt ths)) *}
-method_setup intr = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (intr_tac ctxt ths)) *}
-method_setup equal = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (equal_tac ctxt ths)) *}
+method_setup form = \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (form_tac ctxt))\<close>
+method_setup typechk = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (typechk_tac ctxt ths))\<close>
+method_setup intr = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (intr_tac ctxt ths))\<close>
+method_setup equal = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (equal_tac ctxt ths))\<close>
 
 
-subsection {* Simplification *}
+subsection \<open>Simplification\<close>
 
 (*To simplify the type in a goal*)
 lemma replace_type: "\<lbrakk>B = A; a : A\<rbrakk> \<Longrightarrow> a : B"
@@ -409,7 +409,7 @@
 (*Simplification rules for Constructive Type Theory*)
 lemmas reduction_rls = comp_rls [THEN trans_elem]
 
-ML {*
+ML \<open>
 (*Converts each goal "e : Eq(A,a,b)" into "a=b:A" for simplification.
   Uses other intro rules to avoid changing flexible goals.*)
 val eqintr_net = Tactic.build_net @{thms EqI intr_rls}
@@ -466,21 +466,21 @@
 
 (*Fails unless it solves the goal!*)
 fun pc_tac ctxt thms = DEPTH_SOLVE_1 o (step_tac ctxt thms)
-*}
+\<close>
 
-method_setup eqintr = {* Scan.succeed (SIMPLE_METHOD o eqintr_tac) *}
-method_setup NE = {*
+method_setup eqintr = \<open>Scan.succeed (SIMPLE_METHOD o eqintr_tac)\<close>
+method_setup NE = \<open>
   Scan.lift Args.name_inner_syntax >> (fn s => fn ctxt => SIMPLE_METHOD' (NE_tac ctxt s))
-*}
-method_setup pc = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (pc_tac ctxt ths)) *}
-method_setup add_mp = {* Scan.succeed (SIMPLE_METHOD' o add_mp_tac) *}
+\<close>
+method_setup pc = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD' (pc_tac ctxt ths))\<close>
+method_setup add_mp = \<open>Scan.succeed (SIMPLE_METHOD' o add_mp_tac)\<close>
 
 ML_file "rew.ML"
-method_setup rew = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (rew_tac ctxt ths)) *}
-method_setup hyp_rew = {* Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_rew_tac ctxt ths)) *}
+method_setup rew = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (rew_tac ctxt ths))\<close>
+method_setup hyp_rew = \<open>Attrib.thms >> (fn ths => fn ctxt => SIMPLE_METHOD (hyp_rew_tac ctxt ths))\<close>
 
 
-subsection {* The elimination rules for fst/snd *}
+subsection \<open>The elimination rules for fst/snd\<close>
 
 lemma SumE_fst: "p : Sum(A,B) \<Longrightarrow> fst(p) : A"
 apply (unfold basic_defs)
--- a/src/CTT/Main.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/Main.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,4 +1,4 @@
-section {* Main includes everything *}
+section \<open>Main includes everything\<close>
 
 theory Main
 imports CTT Arith Bool
--- a/src/CTT/ex/Elimination.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/ex/Elimination.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -182,7 +182,7 @@
     and "\<And>z. z:A*B \<Longrightarrow> C(z) type"
   shows "?a : (SUM z:A*B. C(z)) --> (SUM u:A. SUM v:B. C(<u,v>))"
 apply (rule intr_rls)
-apply (tactic {* biresolve_tac @{context} safe_brls 2 *})
+apply (tactic \<open>biresolve_tac @{context} safe_brls 2\<close>)
 (*Now must convert assumption C(z) into antecedent C(<kd,ke>) *)
 apply (rule_tac [2] a = "y" in ProdE)
 apply (typechk assms)
--- a/src/CTT/ex/Equality.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/ex/Equality.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -47,7 +47,7 @@
 lemma "p : Sum(A,B) \<Longrightarrow> <split(p,\<lambda>x y. x), split(p,\<lambda>x y. y)> = p : Sum(A,B)"
 apply (rule EqE)
 apply (rule elim_rls, assumption)
-apply (tactic {* DEPTH_SOLVE_1 (rew_tac @{context} []) *}) (*!!!!!!!*)
+apply (tactic \<open>DEPTH_SOLVE_1 (rew_tac @{context} [])\<close>) (*!!!!!!!*)
 done
 
 lemma "\<lbrakk>a : A; b : B\<rbrakk> \<Longrightarrow> (lam u. split(u, \<lambda>v w.<w,v>)) ` <a,b> = <b,a> : SUM x:B. A"
--- a/src/CTT/ex/Typechecking.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/CTT/ex/Typechecking.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
 imports "../CTT"
 begin
 
-subsection {* Single-step proofs: verifying that a type is well-formed *}
+subsection \<open>Single-step proofs: verifying that a type is well-formed\<close>
 
 schematic_lemma "?A type"
 apply (rule form_rls)
@@ -31,7 +31,7 @@
 done
 
 
-subsection {* Multi-step proofs: Type inference *}
+subsection \<open>Multi-step proofs: Type inference\<close>
 
 lemma "PROD w:N. N + N type"
 apply form
@@ -67,7 +67,7 @@
 (*Proofs involving arbitrary types.
   For concreteness, every type variable left over is forced to be N*)
 method_setup N =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD (TRYALL (resolve_tac ctxt @{thms NF}))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (TRYALL (resolve_tac ctxt @{thms NF})))\<close>
 
 schematic_lemma "lam w. <w,w> : ?A"
 apply typechk
--- a/src/FOL/FOL.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/FOL.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
     Author:     Lawrence C Paulson and Markus Wenzel
 *)
 
-section {* Classical first-order logic *}
+section \<open>Classical first-order logic\<close>
 
 theory FOL
 imports IFOL
@@ -14,13 +14,13 @@
 ML_file "~~/src/Provers/clasimp.ML"
 
 
-subsection {* The classical axiom *}
+subsection \<open>The classical axiom\<close>
 
 axiomatization where
   classical: "(~P ==> P) ==> P"
 
 
-subsection {* Lemmas and proof tools *}
+subsection \<open>Lemmas and proof tools\<close>
 
 lemma ccontr: "(\<not> P \<Longrightarrow> False) \<Longrightarrow> P"
   by (erule FalseE [THEN classical])
@@ -65,15 +65,15 @@
   apply (erule r1)
   done
 
-ML {*
+ML \<open>
   fun case_tac ctxt a fixes =
     Rule_Insts.res_inst_tac ctxt [((("P", 0), Position.none), a)] fixes @{thm case_split}
-*}
+\<close>
 
-method_setup case_tac = {*
+method_setup case_tac = \<open>
   Args.goal_spec -- Scan.lift (Args.name_inner_syntax -- Parse.for_fixes) >>
     (fn (quant, (s, fixes)) => fn ctxt => SIMPLE_METHOD'' quant (case_tac ctxt s fixes))
-*} "case_tac emulation (dynamic instantiation!)"
+\<close> "case_tac emulation (dynamic instantiation!)"
 
 
 (*** Special elimination rules *)
@@ -163,9 +163,9 @@
   by (rule classical) iprover
 
 
-section {* Classical Reasoner *}
+section \<open>Classical Reasoner\<close>
 
-ML {*
+ML \<open>
 structure Cla = Classical
 (
   val imp_elim = @{thm imp_elim}
@@ -178,21 +178,21 @@
 
 structure Basic_Classical: BASIC_CLASSICAL = Cla;
 open Basic_Classical;
-*}
+\<close>
 
 (*Propositional rules*)
 lemmas [intro!] = refl TrueI conjI disjCI impI notI iffI
   and [elim!] = conjE disjE impCE FalseE iffCE
-ML {* val prop_cs = claset_of @{context} *}
+ML \<open>val prop_cs = claset_of @{context}\<close>
 
 (*Quantifier rules*)
 lemmas [intro!] = allI ex_ex1I
   and [intro] = exI
   and [elim!] = exE alt_ex1E
   and [elim] = allE
-ML {* val FOL_cs = claset_of @{context} *}
+ML \<open>val FOL_cs = claset_of @{context}\<close>
 
-ML {*
+ML \<open>
   structure Blast = Blast
   (
     structure Classical = Cla
@@ -204,7 +204,7 @@
     val hyp_subst_tac = Hypsubst.blast_hyp_subst_tac
   );
   val blast_tac = Blast.blast_tac;
-*}
+\<close>
 
 
 lemma ex1_functional: "[| EX! z. P(a,z);  P(a,b);  P(a,c) |] ==> b = c"
@@ -320,10 +320,10 @@
 
 ML_file "simpdata.ML"
 
-simproc_setup defined_Ex ("EX x. P(x)") = {* fn _ => Quantifier1.rearrange_ex *}
-simproc_setup defined_All ("ALL x. P(x)") = {* fn _ => Quantifier1.rearrange_all *}
+simproc_setup defined_Ex ("EX x. P(x)") = \<open>fn _ => Quantifier1.rearrange_ex\<close>
+simproc_setup defined_All ("ALL x. P(x)") = \<open>fn _ => Quantifier1.rearrange_all\<close>
 
-ML {*
+ML \<open>
 (*intuitionistic simprules only*)
 val IFOL_ss =
   put_simpset FOL_basic_ss @{context}
@@ -337,17 +337,17 @@
   put_simpset IFOL_ss @{context}
   addsimps @{thms cla_simps cla_ex_simps cla_all_simps}
   |> simpset_of;
-*}
+\<close>
 
-setup {*
+setup \<open>
   map_theory_simpset (put_simpset FOL_ss) #>
   Simplifier.method_setup Splitter.split_modifiers
-*}
+\<close>
 
 ML_file "~~/src/Tools/eqsubst.ML"
 
 
-subsection {* Other simple lemmas *}
+subsection \<open>Other simple lemmas\<close>
 
 lemma [simp]: "((P-->R) <-> (Q-->R)) <-> ((P<->Q) | R)"
 by blast
@@ -380,9 +380,9 @@
 by blast
 
 
-subsection {* Proof by cases and induction *}
+subsection \<open>Proof by cases and induction\<close>
 
-text {* Proper handling of non-atomic rule statements. *}
+text \<open>Proper handling of non-atomic rule statements.\<close>
 
 context
 begin
@@ -409,10 +409,10 @@
 lemmas induct_rulify_fallback =
   induct_forall_def induct_implies_def induct_equal_def induct_conj_def
 
-text {* Method setup. *}
+text \<open>Method setup.\<close>
 
 ML_file "~~/src/Tools/induct.ML"
-ML {*
+ML \<open>
   structure Induct = Induct
   (
     val cases_default = @{thm case_split}
@@ -423,7 +423,7 @@
     fun dest_def _ = NONE
     fun trivial_tac _ _ = no_tac
   );
-*}
+\<close>
 
 declare case_split [cases type: o]
 
--- a/src/FOL/IFOL.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/IFOL.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
     Author:     Lawrence C Paulson and Markus Wenzel
 *)
 
-section {* Intuitionistic first-order logic *}
+section \<open>Intuitionistic first-order logic\<close>
 
 theory IFOL
 imports Pure
@@ -20,7 +20,7 @@
 ML_file "~~/src/Tools/atomize_elim.ML"
 
 
-subsection {* Syntax and axiomatic basis *}
+subsection \<open>Syntax and axiomatic basis\<close>
 
 setup Pure_Thy.old_appl_syntax_setup
 
@@ -33,7 +33,7 @@
   Trueprop      :: "o => prop"                  ("(_)" 5)
 
 
-subsubsection {* Equality *}
+subsubsection \<open>Equality\<close>
 
 axiomatization
   eq :: "['a, 'a] => o"  (infixl "=" 50)
@@ -42,7 +42,7 @@
   subst:        "a=b \<Longrightarrow> P(a) \<Longrightarrow> P(b)"
 
 
-subsubsection {* Propositional logic *}
+subsubsection \<open>Propositional logic\<close>
 
 axiomatization
   False :: o and
@@ -64,7 +64,7 @@
   FalseE: "False ==> P"
 
 
-subsubsection {* Quantifiers *}
+subsubsection \<open>Quantifiers\<close>
 
 axiomatization
   All :: "('a => o) => o"  (binder "ALL " 10) and
@@ -76,7 +76,7 @@
   exE: "[| EX x. P(x);  !!x. P(x) ==> R |] ==> R"
 
 
-subsubsection {* Definitions *}
+subsubsection \<open>Definitions\<close>
 
 definition "True == False-->False"
 definition Not ("~ _" [40] 40) where not_def: "~P == P-->False"
@@ -85,12 +85,12 @@
 definition Ex1 :: "('a => o) => o"  (binder "EX! " 10)
   where ex1_def: "EX! x. P(x) == EX x. P(x) & (ALL y. P(y) --> y=x)"
 
-axiomatization where  -- {* Reflection, admissible *}
+axiomatization where  -- \<open>Reflection, admissible\<close>
   eq_reflection: "(x=y) ==> (x==y)" and
   iff_reflection: "(P<->Q) ==> (P==Q)"
 
 
-subsubsection {* Additional notation *}
+subsubsection \<open>Additional notation\<close>
 
 abbreviation not_equal :: "['a, 'a] => o"  (infixl "~=" 50)
   where "x ~= y == ~ (x = y)"
@@ -120,7 +120,7 @@
   Ex1  (binder "\<exists>!" 10)
 
 
-subsection {* Lemmas and proof tools *}
+subsection \<open>Lemmas and proof tools\<close>
 
 lemmas strip = impI allI
 
@@ -146,7 +146,7 @@
   shows R
   apply (rule r)
   apply (rule major [THEN mp])
-  apply (rule `P`)
+  apply (rule \<open>P\<close>)
   done
 
 lemma allE:
@@ -186,7 +186,7 @@
   shows Q
   apply (rule r)
   apply (rule impI)
-  apply (erule notE [OF `~P`])
+  apply (erule notE [OF \<open>~P\<close>])
   done
 
 (* For substitution into an assumption P, reduce Q to P-->Q, substitute into
@@ -207,10 +207,10 @@
 (*** Modus Ponens Tactics ***)
 
 (*Finds P-->Q and P in the assumptions, replaces implication by Q *)
-ML {*
+ML \<open>
   fun mp_tac ctxt i = eresolve_tac ctxt @{thms notE impE} i THEN assume_tac ctxt i
   fun eq_mp_tac ctxt i = eresolve_tac ctxt @{thms notE impE} i THEN eq_assume_tac i
-*}
+\<close>
 
 
 (*** If-and-only-if ***)
@@ -303,11 +303,11 @@
 (*** <-> congruence rules for simplification ***)
 
 (*Use iffE on a premise.  For conj_cong, imp_cong, all_cong, ex_cong*)
-ML {*
+ML \<open>
   fun iff_tac ctxt prems i =
     resolve_tac ctxt (prems RL @{thms iffE}) i THEN
     REPEAT1 (eresolve_tac ctxt @{thms asm_rl mp} i)
-*}
+\<close>
 
 method_setup iff =
   \<open>Attrib.thms >> (fn prems => fn ctxt => SIMPLE_METHOD' (iff_tac ctxt prems))\<close>
@@ -558,20 +558,20 @@
   apply (rule disjI2) apply assumption
   done
 
-ML {*
+ML \<open>
 structure Project_Rule = Project_Rule
 (
   val conjunct1 = @{thm conjunct1}
   val conjunct2 = @{thm conjunct2}
   val mp = @{thm mp}
 )
-*}
+\<close>
 
 ML_file "fologic.ML"
 
 lemma thin_refl: "[|x=x; PROP W|] ==> PROP W" .
 
-ML {*
+ML \<open>
 structure Hypsubst = Hypsubst
 (
   val dest_eq = FOLogic.dest_eq
@@ -586,14 +586,14 @@
   val thin_refl = @{thm thin_refl}
 );
 open Hypsubst;
-*}
+\<close>
 
 ML_file "intprover.ML"
 
 
-subsection {* Intuitionistic Reasoning *}
+subsection \<open>Intuitionistic Reasoning\<close>
 
-setup {* Intuitionistic.method_setup @{binding iprover} *}
+setup \<open>Intuitionistic.method_setup @{binding iprover}\<close>
 
 lemma impE':
   assumes 1: "P --> Q"
@@ -629,7 +629,7 @@
   and [Pure.elim 2] = allE notE' impE'
   and [Pure.intro] = exI disjI2 disjI1
 
-setup {* Context_Rules.addSWrapper (fn ctxt => fn tac => hyp_subst_tac ctxt ORELSE' tac) *}
+setup \<open>Context_Rules.addSWrapper (fn ctxt => fn tac => hyp_subst_tac ctxt ORELSE' tac)\<close>
 
 
 lemma iff_not_sym: "~ (Q <-> P) ==> ~ (P <-> Q)"
@@ -645,7 +645,7 @@
 done
 
 
-subsection {* Atomizing meta-level rules *}
+subsection \<open>Atomizing meta-level rules\<close>
 
 lemma atomize_all [atomize]: "(!!x. P(x)) == Trueprop (ALL x. P(x))"
 proof
@@ -668,7 +668,7 @@
 lemma atomize_eq [atomize]: "(x == y) == Trueprop (x = y)"
 proof
   assume "x == y"
-  show "x = y" unfolding `x == y` by (rule refl)
+  show "x = y" unfolding \<open>x == y\<close> by (rule refl)
 next
   assume "x = y"
   then show "x == y" by (rule eq_reflection)
@@ -677,7 +677,7 @@
 lemma atomize_iff [atomize]: "(A == B) == Trueprop (A <-> B)"
 proof
   assume "A == B"
-  show "A <-> B" unfolding `A == B` by (rule iff_refl)
+  show "A <-> B" unfolding \<open>A == B\<close> by (rule iff_refl)
 next
   assume "A <-> B"
   then show "A == B" by (rule iff_reflection)
@@ -704,7 +704,7 @@
   and [symmetric, defn] = atomize_all atomize_imp atomize_eq atomize_iff
 
 
-subsection {* Atomizing elimination rules *}
+subsection \<open>Atomizing elimination rules\<close>
 
 lemma atomize_exL[atomize_elim]: "(!!x. P(x) ==> Q) == ((EX x. P(x)) ==> Q)"
   by rule iprover+
@@ -718,7 +718,7 @@
 lemma atomize_elimL[atomize_elim]: "(!!B. (A ==> B) ==> B) == Trueprop(A)" ..
 
 
-subsection {* Calculational rules *}
+subsection \<open>Calculational rules\<close>
 
 lemma forw_subst: "a = b ==> P(b) ==> P(a)"
   by (rule ssubst)
@@ -726,9 +726,9 @@
 lemma back_subst: "P(a) ==> a = b ==> P(b)"
   by (rule subst)
 
-text {*
+text \<open>
   Note that this list of rules is in reverse order of priorities.
-*}
+\<close>
 
 lemmas basic_trans_rules [trans] =
   forw_subst
@@ -737,7 +737,7 @@
   mp
   trans
 
-subsection {* ``Let'' declarations *}
+subsection \<open>``Let'' declarations\<close>
 
 nonterminal letbinds and letbind
 
@@ -763,7 +763,7 @@
   done
 
 
-subsection {* Intuitionistic simplification rules *}
+subsection \<open>Intuitionistic simplification rules\<close>
 
 lemma conj_simps:
   "P & True <-> P"
@@ -830,7 +830,7 @@
   by iprover+
 
 
-text {* Conversion into rewrite rules *}
+text \<open>Conversion into rewrite rules\<close>
 
 lemma P_iff_F: "~P ==> (P <-> False)" by iprover
 lemma iff_reflection_F: "~P ==> (P == False)" by (rule P_iff_F [THEN iff_reflection])
@@ -839,7 +839,7 @@
 lemma iff_reflection_T: "P ==> (P == True)" by (rule P_iff_T [THEN iff_reflection])
 
 
-text {* More rewrite rules *}
+text \<open>More rewrite rules\<close>
 
 lemma conj_commute: "P&Q <-> Q&P" by iprover
 lemma conj_left_commute: "P&(Q&R) <-> Q&(P&R)" by iprover
--- a/src/FOL/ex/Classical.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Classical.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Classical Predicate Calculus Problems*}
+section\<open>Classical Predicate Calculus Problems\<close>
 
 theory Classical imports FOL begin
 
 lemma "(P --> Q | R) --> (P-->Q) | (P-->R)"
 by blast
 
-text{*If and only if*}
+text\<open>If and only if\<close>
 
 lemma "(P<->Q) <-> (Q<->P)"
 by blast
@@ -19,7 +19,7 @@
 by blast
 
 
-text{*Sample problems from 
+text\<open>Sample problems from 
   F. J. Pelletier, 
   Seventy-Five Problems for Testing Automatic Theorem Provers,
   J. Automated Reasoning 2 (1986), 191-216.
@@ -27,79 +27,79 @@
 
 The hardest problems -- judging by experience with several theorem provers,
 including matrix ones -- are 34 and 43.
-*}
+\<close>
 
-subsection{*Pelletier's examples*}
+subsection\<open>Pelletier's examples\<close>
 
-text{*1*}
+text\<open>1\<close>
 lemma "(P-->Q)  <->  (~Q --> ~P)"
 by blast
 
-text{*2*}
+text\<open>2\<close>
 lemma "~ ~ P  <->  P"
 by blast
 
-text{*3*}
+text\<open>3\<close>
 lemma "~(P-->Q) --> (Q-->P)"
 by blast
 
-text{*4*}
+text\<open>4\<close>
 lemma "(~P-->Q)  <->  (~Q --> P)"
 by blast
 
-text{*5*}
+text\<open>5\<close>
 lemma "((P|Q)-->(P|R)) --> (P|(Q-->R))"
 by blast
 
-text{*6*}
+text\<open>6\<close>
 lemma "P | ~ P"
 by blast
 
-text{*7*}
+text\<open>7\<close>
 lemma "P | ~ ~ ~ P"
 by blast
 
-text{*8.  Peirce's law*}
+text\<open>8.  Peirce's law\<close>
 lemma "((P-->Q) --> P)  -->  P"
 by blast
 
-text{*9*}
+text\<open>9\<close>
 lemma "((P|Q) & (~P|Q) & (P| ~Q)) --> ~ (~P | ~Q)"
 by blast
 
-text{*10*}
+text\<open>10\<close>
 lemma "(Q-->R) & (R-->P&Q) & (P-->Q|R) --> (P<->Q)"
 by blast
 
-text{*11.  Proved in each direction (incorrectly, says Pelletier!!)  *}
+text\<open>11.  Proved in each direction (incorrectly, says Pelletier!!)\<close>
 lemma "P<->P"
 by blast
 
-text{*12.  "Dijkstra's law"*}
+text\<open>12.  "Dijkstra's law"\<close>
 lemma "((P <-> Q) <-> R)  <->  (P <-> (Q <-> R))"
 by blast
 
-text{*13.  Distributive law*}
+text\<open>13.  Distributive law\<close>
 lemma "P | (Q & R)  <-> (P | Q) & (P | R)"
 by blast
 
-text{*14*}
+text\<open>14\<close>
 lemma "(P <-> Q) <-> ((Q | ~P) & (~Q|P))"
 by blast
 
-text{*15*}
+text\<open>15\<close>
 lemma "(P --> Q) <-> (~P | Q)"
 by blast
 
-text{*16*}
+text\<open>16\<close>
 lemma "(P-->Q) | (Q-->P)"
 by blast
 
-text{*17*}
+text\<open>17\<close>
 lemma "((P & (Q-->R))-->S) <-> ((~P | Q | S) & (~P | ~R | S))"
 by blast
 
-subsection{*Classical Logic: examples with quantifiers*}
+subsection\<open>Classical Logic: examples with quantifiers\<close>
 
 lemma "(\<forall>x. P(x) & Q(x)) <-> (\<forall>x. P(x))  &  (\<forall>x. Q(x))"
 by blast
@@ -113,23 +113,23 @@
 lemma "(\<forall>x. P(x)) | Q  <->  (\<forall>x. P(x) | Q)"
 by blast
 
-text{*Discussed in Avron, Gentzen-Type Systems, Resolution and Tableaux,
-  JAR 10 (265-281), 1993.  Proof is trivial!*}
+text\<open>Discussed in Avron, Gentzen-Type Systems, Resolution and Tableaux,
+  JAR 10 (265-281), 1993.  Proof is trivial!\<close>
 lemma "~((\<exists>x.~P(x)) & ((\<exists>x. P(x)) | (\<exists>x. P(x) & Q(x))) & ~ (\<exists>x. P(x)))"
 by blast
 
-subsection{*Problems requiring quantifier duplication*}
+subsection\<open>Problems requiring quantifier duplication\<close>
 
-text{*Theorem B of Peter Andrews, Theorem Proving via General Matings, 
-  JACM 28 (1981).*}
+text\<open>Theorem B of Peter Andrews, Theorem Proving via General Matings, 
+  JACM 28 (1981).\<close>
 lemma "(\<exists>x. \<forall>y. P(x) <-> P(y)) --> ((\<exists>x. P(x)) <-> (\<forall>y. P(y)))"
 by blast
 
-text{*Needs multiple instantiation of ALL.*}
+text\<open>Needs multiple instantiation of ALL.\<close>
 lemma "(\<forall>x. P(x)-->P(f(x)))  &  P(d)-->P(f(f(f(d))))"
 by blast
 
-text{*Needs double instantiation of the quantifier*}
+text\<open>Needs double instantiation of the quantifier\<close>
 lemma "\<exists>x. P(x) --> P(a) & P(b)"
 by blast
 
@@ -139,7 +139,7 @@
 lemma "\<exists>x. (\<exists>y. P(y)) --> P(x)"
 by blast
 
-text{*V. Lifschitz, What Is the Inverse Method?, JAR 5 (1989), 1--23.  NOT PROVED*}
+text\<open>V. Lifschitz, What Is the Inverse Method?, JAR 5 (1989), 1--23.  NOT PROVED\<close>
 lemma "\<exists>x x'. \<forall>y. \<exists>z z'.  
                 (~P(y,y) | P(x,x) | ~S(z,x)) &  
                 (S(x,y) | ~S(y,z) | Q(z',z'))  &  
@@ -148,40 +148,40 @@
 
 
 
-subsection{*Hard examples with quantifiers*}
+subsection\<open>Hard examples with quantifiers\<close>
 
-text{*18*}
+text\<open>18\<close>
 lemma "\<exists>y. \<forall>x. P(y)-->P(x)"
 by blast
 
-text{*19*}
+text\<open>19\<close>
 lemma "\<exists>x. \<forall>y z. (P(y)-->Q(z)) --> (P(x)-->Q(x))"
 by blast
 
-text{*20*}
+text\<open>20\<close>
 lemma "(\<forall>x y. \<exists>z. \<forall>w. (P(x)&Q(y)-->R(z)&S(w)))      
     --> (\<exists>x y. P(x) & Q(y)) --> (\<exists>z. R(z))"
 by blast
 
-text{*21*}
+text\<open>21\<close>
 lemma "(\<exists>x. P-->Q(x)) & (\<exists>x. Q(x)-->P) --> (\<exists>x. P<->Q(x))"
 by blast
 
-text{*22*}
+text\<open>22\<close>
 lemma "(\<forall>x. P <-> Q(x))  -->  (P <-> (\<forall>x. Q(x)))"
 by blast
 
-text{*23*}
+text\<open>23\<close>
 lemma "(\<forall>x. P | Q(x))  <->  (P | (\<forall>x. Q(x)))"
 by blast
 
-text{*24*}
+text\<open>24\<close>
 lemma "~(\<exists>x. S(x)&Q(x)) & (\<forall>x. P(x) --> Q(x)|R(x)) &   
       (~(\<exists>x. P(x)) --> (\<exists>x. Q(x))) & (\<forall>x. Q(x)|R(x) --> S(x))   
     --> (\<exists>x. P(x)&R(x))"
 by blast
 
-text{*25*}
+text\<open>25\<close>
 lemma "(\<exists>x. P(x)) &   
       (\<forall>x. L(x) --> ~ (M(x) & R(x))) &   
       (\<forall>x. P(x) --> (M(x) & L(x))) &    
@@ -189,13 +189,13 @@
     --> (\<exists>x. Q(x)&P(x))"
 by blast
 
-text{*26*}
+text\<open>26\<close>
 lemma "((\<exists>x. p(x)) <-> (\<exists>x. q(x))) &  
       (\<forall>x. \<forall>y. p(x) & q(y) --> (r(x) <-> s(y)))    
   --> ((\<forall>x. p(x)-->r(x)) <-> (\<forall>x. q(x)-->s(x)))"
 by blast
 
-text{*27*}
+text\<open>27\<close>
 lemma "(\<exists>x. P(x) & ~Q(x)) &    
       (\<forall>x. P(x) --> R(x)) &    
       (\<forall>x. M(x) & L(x) --> P(x)) &    
@@ -203,63 +203,63 @@
   --> (\<forall>x. M(x) --> ~L(x))"
 by blast
 
-text{*28.  AMENDED*}
+text\<open>28.  AMENDED\<close>
 lemma "(\<forall>x. P(x) --> (\<forall>x. Q(x))) &    
         ((\<forall>x. Q(x)|R(x)) --> (\<exists>x. Q(x)&S(x))) &   
         ((\<exists>x. S(x)) --> (\<forall>x. L(x) --> M(x)))   
     --> (\<forall>x. P(x) & L(x) --> M(x))"
 by blast
 
-text{*29.  Essentially the same as Principia Mathematica *11.71*}
+text\<open>29.  Essentially the same as Principia Mathematica *11.71\<close>
 lemma "(\<exists>x. P(x)) & (\<exists>y. Q(y))   
     --> ((\<forall>x. P(x)-->R(x)) & (\<forall>y. Q(y)-->S(y))   <->      
          (\<forall>x y. P(x) & Q(y) --> R(x) & S(y)))"
 by blast
 
-text{*30*}
+text\<open>30\<close>
 lemma "(\<forall>x. P(x) | Q(x) --> ~ R(x)) &  
       (\<forall>x. (Q(x) --> ~ S(x)) --> P(x) & R(x))   
     --> (\<forall>x. S(x))"
 by blast
 
-text{*31*}
+text\<open>31\<close>
 lemma "~(\<exists>x. P(x) & (Q(x) | R(x))) &  
         (\<exists>x. L(x) & P(x)) &  
         (\<forall>x. ~ R(x) --> M(x))   
     --> (\<exists>x. L(x) & M(x))"
 by blast
 
-text{*32*}
+text\<open>32\<close>
 lemma "(\<forall>x. P(x) & (Q(x)|R(x))-->S(x)) &  
       (\<forall>x. S(x) & R(x) --> L(x)) &  
       (\<forall>x. M(x) --> R(x))   
       --> (\<forall>x. P(x) & M(x) --> L(x))"
 by blast
 
-text{*33*}
+text\<open>33\<close>
 lemma "(\<forall>x. P(a) & (P(x)-->P(b))-->P(c))  <->     
       (\<forall>x. (~P(a) | P(x) | P(c)) & (~P(a) | ~P(b) | P(c)))"
 by blast
 
-text{*34  AMENDED (TWICE!!).  Andrews's challenge*}
+text\<open>34  AMENDED (TWICE!!).  Andrews's challenge\<close>
 lemma "((\<exists>x. \<forall>y. p(x) <-> p(y))  <->                 
        ((\<exists>x. q(x)) <-> (\<forall>y. p(y))))     <->         
       ((\<exists>x. \<forall>y. q(x) <-> q(y))  <->                 
        ((\<exists>x. p(x)) <-> (\<forall>y. q(y))))"
 by blast
 
-text{*35*}
+text\<open>35\<close>
 lemma "\<exists>x y. P(x,y) -->  (\<forall>u v. P(u,v))"
 by blast
 
-text{*36*}
+text\<open>36\<close>
 lemma "(\<forall>x. \<exists>y. J(x,y)) &  
       (\<forall>x. \<exists>y. G(x,y)) &  
       (\<forall>x y. J(x,y) | G(x,y) --> (\<forall>z. J(y,z) | G(y,z) --> H(x,z)))    
   --> (\<forall>x. \<exists>y. H(x,y))"
 by blast
 
-text{*37*}
+text\<open>37\<close>
 lemma "(\<forall>z. \<exists>w. \<forall>x. \<exists>y.  
            (P(x,z)-->P(y,w)) & P(y,z) & (P(y,w) --> (\<exists>u. Q(u,w)))) &  
       (\<forall>x z. ~P(x,z) --> (\<exists>y. Q(y,z))) &  
@@ -267,7 +267,7 @@
       --> (\<forall>x. \<exists>y. R(x,y))"
 by blast
 
-text{*38*}
+text\<open>38\<close>
 lemma "(\<forall>x. p(a) & (p(x) --> (\<exists>y. p(y) & r(x,y))) -->         
              (\<exists>z. \<exists>w. p(z) & r(x,w) & r(w,z)))  <->          
       (\<forall>x. (~p(a) | p(x) | (\<exists>z. \<exists>w. p(z) & r(x,w) & r(w,z))) &     
@@ -275,25 +275,25 @@
               (\<exists>z. \<exists>w. p(z) & r(x,w) & r(w,z))))"
 by blast
 
-text{*39*}
+text\<open>39\<close>
 lemma "~ (\<exists>x. \<forall>y. F(y,x) <-> ~F(y,y))"
 by blast
 
-text{*40.  AMENDED*}
+text\<open>40.  AMENDED\<close>
 lemma "(\<exists>y. \<forall>x. F(x,y) <-> F(x,x)) -->   
               ~(\<forall>x. \<exists>y. \<forall>z. F(z,y) <-> ~ F(z,x))"
 by blast
 
-text{*41*}
+text\<open>41\<close>
 lemma "(\<forall>z. \<exists>y. \<forall>x. f(x,y) <-> f(x,z) & ~ f(x,x))         
           --> ~ (\<exists>z. \<forall>x. f(x,z))"
 by blast
 
-text{*42*}
+text\<open>42\<close>
 lemma "~ (\<exists>y. \<forall>x. p(x,y) <-> ~ (\<exists>z. p(x,z) & p(z,x)))"
 by blast
 
-text{*43*}
+text\<open>43\<close>
 lemma "(\<forall>x. \<forall>y. q(x,y) <-> (\<forall>z. p(z,x) <-> p(z,y)))      
           --> (\<forall>x. \<forall>y. q(x,y) <-> q(y,x))"
 by blast
@@ -302,13 +302,13 @@
   Deepen_tac alone requires 253 secs.  Or
   by (mini_tac @{context} 1 THEN Deepen_tac 5 1) *)
 
-text{*44*}
+text\<open>44\<close>
 lemma "(\<forall>x. f(x) --> (\<exists>y. g(y) & h(x,y) & (\<exists>y. g(y) & ~ h(x,y)))) &  
       (\<exists>x. j(x) & (\<forall>y. g(y) --> h(x,y)))                    
       --> (\<exists>x. j(x) & ~f(x))"
 by blast
 
-text{*45*}
+text\<open>45\<close>
 lemma "(\<forall>x. f(x) & (\<forall>y. g(y) & h(x,y) --> j(x,y))   
                       --> (\<forall>y. g(y) & h(x,y) --> k(y))) &     
       ~ (\<exists>y. l(y) & k(y)) &                                    
@@ -318,7 +318,7 @@
 by blast
 
 
-text{*46*}
+text\<open>46\<close>
 lemma "(\<forall>x. f(x) & (\<forall>y. f(y) & h(y,x) --> g(y)) --> g(x)) &       
       ((\<exists>x. f(x) & ~g(x)) -->                                     
        (\<exists>x. f(x) & ~g(x) & (\<forall>y. f(y) & ~g(y) --> j(x,y)))) &     
@@ -327,42 +327,42 @@
 by blast
 
 
-subsection{*Problems (mainly) involving equality or functions*}
+subsection\<open>Problems (mainly) involving equality or functions\<close>
 
-text{*48*}
+text\<open>48\<close>
 lemma "(a=b | c=d) & (a=c | b=d) --> a=d | b=c"
 by blast
 
-text{*49  NOT PROVED AUTOMATICALLY.  Hard because it involves substitution
+text\<open>49  NOT PROVED AUTOMATICALLY.  Hard because it involves substitution
   for Vars
-  the type constraint ensures that x,y,z have the same type as a,b,u. *}
+  the type constraint ensures that x,y,z have the same type as a,b,u.\<close>
 lemma "(\<exists>x y::'a. \<forall>z. z=x | z=y) & P(a) & P(b) & a~=b  
                 --> (\<forall>u::'a. P(u))"
 apply safe
 apply (rule_tac x = a in allE, assumption)
 apply (rule_tac x = b in allE, assumption, fast)
-       --{*blast's treatment of equality can't do it*}
+       --\<open>blast's treatment of equality can't do it\<close>
 done
 
-text{*50.  (What has this to do with equality?) *}
+text\<open>50.  (What has this to do with equality?)\<close>
 lemma "(\<forall>x. P(a,x) | (\<forall>y. P(x,y))) --> (\<exists>x. \<forall>y. P(x,y))"
 by blast
 
-text{*51*}
+text\<open>51\<close>
 lemma "(\<exists>z w. \<forall>x y. P(x,y) <->  (x=z & y=w)) -->   
       (\<exists>z. \<forall>x. \<exists>w. (\<forall>y. P(x,y) <-> y=w) <-> x=z)"
 by blast
 
-text{*52*}
-text{*Almost the same as 51. *}
+text\<open>52\<close>
+text\<open>Almost the same as 51.\<close>
 lemma "(\<exists>z w. \<forall>x y. P(x,y) <->  (x=z & y=w)) -->   
       (\<exists>w. \<forall>y. \<exists>z. (\<forall>x. P(x,y) <-> x=z) <-> y=w)"
 by blast
 
-text{*55*}
+text\<open>55\<close>
 
-text{*Non-equational version, from Manthey and Bry, CADE-9 (Springer, 1988).
-  fast DISCOVERS who killed Agatha. *}
+text\<open>Non-equational version, from Manthey and Bry, CADE-9 (Springer, 1988).
+  fast DISCOVERS who killed Agatha.\<close>
 schematic_lemma "lives(agatha) & lives(butler) & lives(charles) &  
    (killed(agatha,agatha) | killed(butler,agatha) | killed(charles,agatha)) &  
    (\<forall>x y. killed(x,y) --> hates(x,y) & ~richer(x,y)) &  
@@ -372,53 +372,53 @@
    (\<forall>x. hates(agatha,x) --> hates(butler,x)) &  
    (\<forall>x. ~hates(x,agatha) | ~hates(x,butler) | ~hates(x,charles)) -->  
     killed(?who,agatha)"
-by fast --{*MUCH faster than blast*}
+by fast --\<open>MUCH faster than blast\<close>
 
 
-text{*56*}
+text\<open>56\<close>
 lemma "(\<forall>x. (\<exists>y. P(y) & x=f(y)) --> P(x)) <-> (\<forall>x. P(x) --> P(f(x)))"
 by blast
 
-text{*57*}
+text\<open>57\<close>
 lemma "P(f(a,b), f(b,c)) & P(f(b,c), f(a,c)) &  
      (\<forall>x y z. P(x,y) & P(y,z) --> P(x,z))    -->   P(f(a,b), f(a,c))"
 by blast
 
-text{*58  NOT PROVED AUTOMATICALLY*}
+text\<open>58  NOT PROVED AUTOMATICALLY\<close>
 lemma "(\<forall>x y. f(x)=g(y)) --> (\<forall>x y. f(f(x))=f(g(y)))"
 by (slow elim: subst_context)
 
 
-text{*59*}
+text\<open>59\<close>
 lemma "(\<forall>x. P(x) <-> ~P(f(x))) --> (\<exists>x. P(x) & ~P(f(x)))"
 by blast
 
-text{*60*}
+text\<open>60\<close>
 lemma "\<forall>x. P(x,f(x)) <-> (\<exists>y. (\<forall>z. P(z,y) --> P(z,f(x))) & P(x,y))"
 by blast
 
-text{*62 as corrected in JAR 18 (1997), page 135*}
+text\<open>62 as corrected in JAR 18 (1997), page 135\<close>
 lemma "(\<forall>x. p(a) & (p(x) --> p(f(x))) --> p(f(f(x))))  <->      
       (\<forall>x. (~p(a) | p(x) | p(f(f(x)))) &                       
               (~p(a) | ~p(f(x)) | p(f(f(x)))))"
 by blast
 
-text{*From Davis, Obvious Logical Inferences, IJCAI-81, 530-531
-  fast indeed copes!*}
+text\<open>From Davis, Obvious Logical Inferences, IJCAI-81, 530-531
+  fast indeed copes!\<close>
 lemma "(\<forall>x. F(x) & ~G(x) --> (\<exists>y. H(x,y) & J(y))) &  
               (\<exists>x. K(x) & F(x) & (\<forall>y. H(x,y) --> K(y))) &    
               (\<forall>x. K(x) --> ~G(x))  -->  (\<exists>x. K(x) & J(x))"
 by fast
 
-text{*From Rudnicki, Obvious Inferences, JAR 3 (1987), 383-393.  
-  It does seem obvious!*}
+text\<open>From Rudnicki, Obvious Inferences, JAR 3 (1987), 383-393.  
+  It does seem obvious!\<close>
 lemma "(\<forall>x. F(x) & ~G(x) --> (\<exists>y. H(x,y) & J(y))) &         
       (\<exists>x. K(x) & F(x) & (\<forall>y. H(x,y) --> K(y)))  &         
       (\<forall>x. K(x) --> ~G(x))   -->   (\<exists>x. K(x) --> ~G(x))"
 by fast
 
-text{*Halting problem: Formulation of Li Dafa (AAR Newsletter 27, Oct 1994.)
-  author U. Egly*}
+text\<open>Halting problem: Formulation of Li Dafa (AAR Newsletter 27, Oct 1994.)
+  author U. Egly\<close>
 lemma "((\<exists>x. A(x) & (\<forall>y. C(y) --> (\<forall>z. D(x,y,z)))) -->                
    (\<exists>w. C(w) & (\<forall>y. C(y) --> (\<forall>z. D(w,y,z)))))                   
   &                                                                      
@@ -437,10 +437,10 @@
    -->                   
    ~ (\<exists>x. A(x) & (\<forall>y. C(y) --> (\<forall>z. D(x,y,z))))"
 by (blast 12)
-   --{*Needed because the search for depths below 12 is very slow*}
+   --\<open>Needed because the search for depths below 12 is very slow\<close>
 
 
-text{*Halting problem II: credited to M. Bruschi by Li Dafa in JAR 18(1), p.105*}
+text\<open>Halting problem II: credited to M. Bruschi by Li Dafa in JAR 18(1), p.105\<close>
 lemma "((\<exists>x. A(x) & (\<forall>y. C(y) --> (\<forall>z. D(x,y,z)))) -->        
    (\<exists>w. C(w) & (\<forall>y. C(y) --> (\<forall>z. D(w,y,z)))))           
   &                                                              
@@ -464,12 +464,12 @@
    ~ (\<exists>x. A(x) & (\<forall>y. C(y) --> (\<forall>z. D(x,y,z))))"
 by blast
 
-text{* Challenge found on info-hol *}
+text\<open>Challenge found on info-hol\<close>
 lemma "\<forall>x. \<exists>v w. \<forall>y z. P(x) & Q(y) --> (P(v) | R(w)) & (R(z) --> Q(v))"
 by blast
 
-text{*Attributed to Lewis Carroll by S. G. Pulman.  The first or last assumption
-can be deleted.*}
+text\<open>Attributed to Lewis Carroll by S. G. Pulman.  The first or last assumption
+can be deleted.\<close>
 lemma "(\<forall>x. honest(x) & industrious(x) --> healthy(x)) &  
       ~ (\<exists>x. grocer(x) & healthy(x)) &  
       (\<forall>x. industrious(x) & grocer(x) --> honest(x)) &  
--- a/src/FOL/ex/First_Order_Logic.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/First_Order_Logic.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,18 +2,18 @@
     Author:     Markus Wenzel, TU Munich
 *)
 
-section {* A simple formulation of First-Order Logic *}
+section \<open>A simple formulation of First-Order Logic\<close>
 
 theory First_Order_Logic imports Pure begin
 
-text {*
+text \<open>
   The subsequent theory development illustrates single-sorted
   intuitionistic first-order logic with equality, formulated within
   the Pure framework.  Actually this is not an example of
   Isabelle/FOL, but of Isabelle/Pure.
-*}
+\<close>
 
-subsection {* Syntax *}
+subsection \<open>Syntax\<close>
 
 typedecl i
 typedecl o
@@ -22,7 +22,7 @@
   Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
 
 
-subsection {* Propositional logic *}
+subsection \<open>Propositional logic\<close>
 
 axiomatization
   false :: o  ("\<bottom>") and
@@ -47,8 +47,8 @@
   assumes "A \<and> B"
   obtains A and B
 proof
-  from `A \<and> B` show A by (rule conjD1)
-  from `A \<and> B` show B by (rule conjD2)
+  from \<open>A \<and> B\<close> show A by (rule conjD1)
+  from \<open>A \<and> B\<close> show B by (rule conjD2)
 qed
 
 definition true :: o  ("\<top>")
@@ -101,7 +101,7 @@
 qed
 
 
-subsection {* Equality *}
+subsection \<open>Equality\<close>
 
 axiomatization
   equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infixl "=" 50)
@@ -119,7 +119,7 @@
 qed
 
 
-subsection {* Quantifiers *}
+subsection \<open>Quantifiers\<close>
 
 axiomatization
   All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) and
--- a/src/FOL/ex/Foundation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Foundation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -18,7 +18,7 @@
 apply assumption
 done
 
-text {*A form of conj-elimination*}
+text \<open>A form of conj-elimination\<close>
 lemma
   assumes "A & B"
     and "A ==> B ==> C"
@@ -99,7 +99,7 @@
 apply (rule refl)?
 oops
 
-text {* Parallel lifting example. *}
+text \<open>Parallel lifting example.\<close>
 lemma "EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w)"
 apply (rule exI allI)
 apply (rule exI allI)
@@ -121,7 +121,7 @@
 apply assumption
 done
 
-text {* A bigger demonstration of quantifiers -- not in the paper. *}
+text \<open>A bigger demonstration of quantifiers -- not in the paper.\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
 apply (rule impI)
 apply (rule allI)
--- a/src/FOL/ex/If.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/If.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: the 'if' example *}
+section \<open>First-Order Logic: the 'if' example\<close>
 
 theory If imports FOL begin
 
@@ -28,7 +28,7 @@
 apply (rule ifI)
 oops
 
-text{*Trying again from the beginning in order to use @{text blast}*}
+text\<open>Trying again from the beginning in order to use @{text blast}\<close>
 declare ifI [intro!]
 declare ifE [elim!]
 
@@ -39,22 +39,22 @@
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
 by blast
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
 by (simp add: if_def, blast)
 
 
-text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
+text\<open>An invalid formula.  High-level rules permit a simpler diagnosis\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
 apply auto
-  -- {*The next step will fail unless subgoals remain*}
+  -- \<open>The next step will fail unless subgoals remain\<close>
 apply (tactic all_tac)
 oops
 
-text{*Trying again from the beginning in order to prove from the definitions*}
+text\<open>Trying again from the beginning in order to prove from the definitions\<close>
 lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
 apply (simp add: if_def, auto) 
-  -- {*The next step will fail unless subgoals remain*}
+  -- \<open>The next step will fail unless subgoals remain\<close>
 apply (tactic all_tac)
 oops
 
--- a/src/FOL/ex/Intro.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Intro.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,13 +5,13 @@
 Derives some inference rules, illustrating the use of definitions.
 *)
 
-section {* Examples for the manual ``Introduction to Isabelle'' *}
+section \<open>Examples for the manual ``Introduction to Isabelle''\<close>
 
 theory Intro
 imports FOL
 begin
 
-subsubsection {* Some simple backward proofs *}
+subsubsection \<open>Some simple backward proofs\<close>
 
 lemma mythm: "P|P --> P"
 apply (rule impI)
@@ -41,7 +41,7 @@
 done
 
 
-subsubsection {* Demonstration of @{text "fast"} *}
+subsubsection \<open>Demonstration of @{text "fast"}\<close>
 
 lemma "(EX y. ALL x. J(y,x) <-> ~J(x,x))
         -->  ~ (ALL x. EX y. ALL z. J(z,y) <-> ~ J(z,x))"
@@ -55,7 +55,7 @@
 done
 
 
-subsubsection {* Derivation of conjunction elimination rule *}
+subsubsection \<open>Derivation of conjunction elimination rule\<close>
 
 lemma
   assumes major: "P&Q"
@@ -67,9 +67,9 @@
 done
 
 
-subsection {* Derived rules involving definitions *}
+subsection \<open>Derived rules involving definitions\<close>
 
-text {* Derivation of negation introduction *}
+text \<open>Derivation of negation introduction\<close>
 
 lemma
   assumes "P ==> False"
@@ -90,7 +90,7 @@
 apply (rule minor)
 done
 
-text {* Alternative proof of the result above *}
+text \<open>Alternative proof of the result above\<close>
 lemma
   assumes major: "~P"
     and minor: P
--- a/src/FOL/ex/Intuitionistic.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Intuitionistic.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* Intuitionistic First-Order Logic *}
+section \<open>Intuitionistic First-Order Logic\<close>
 
 theory Intuitionistic
 imports IFOL
@@ -21,7 +21,7 @@
 *)
 
 
-text{*Metatheorem (for \emph{propositional} formulae):
+text\<open>Metatheorem (for \emph{propositional} formulae):
   $P$ is classically provable iff $\neg\neg P$ is intuitionistically provable.
   Therefore $\neg P$ is classically provable iff it is intuitionistically
   provable.
@@ -34,78 +34,78 @@
 intuitionistically.  The latter is intuitionistically equivalent to $\neg\neg
 Q\rightarrow\neg\neg P$, hence to $\neg\neg P$, since $\neg\neg Q$ is
 intuitionistically provable.  Finally, if $P$ is a negation then $\neg\neg P$
-is intuitionstically equivalent to $P$.  [Andy Pitts] *}
+is intuitionstically equivalent to $P$.  [Andy Pitts]\<close>
 
 lemma "~~(P&Q) <-> ~~P & ~~Q"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "~~ ((~P --> Q) --> (~P --> ~Q) --> P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*Double-negation does NOT distribute over disjunction*}
+text\<open>Double-negation does NOT distribute over disjunction\<close>
 
 lemma "~~(P-->Q)  <-> (~~P --> ~~Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "~~~P <-> ~P"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "~~((P --> Q | R)  -->  (P-->Q) | (P-->R))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "(P<->Q) <-> (Q<->P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "((P --> (Q | (Q-->R))) --> R) --> R"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "(((G-->A) --> J) --> D --> E) --> (((H-->B)-->I)-->C-->J)
       --> (A-->H) --> F --> G --> (((C-->B)-->I)-->D)-->(A-->C)
       --> (((F-->A)-->B) --> I) --> E"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-text{*Lemmas for the propositional double-negation translation*}
+text\<open>Lemmas for the propositional double-negation translation\<close>
 
 lemma "P --> ~~P"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "~~(~~P --> P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "~~P & ~~(P --> Q) --> ~~Q"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-text{*The following are classically but not constructively valid.
-      The attempt to prove them terminates quickly!*}
+text\<open>The following are classically but not constructively valid.
+      The attempt to prove them terminates quickly!\<close>
 lemma "((P-->Q) --> P)  -->  P"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 lemma "(P&Q-->R)  -->  (P-->R) | (Q-->R)"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 
-subsection{*de Bruijn formulae*}
+subsection\<open>de Bruijn formulae\<close>
 
-text{*de Bruijn formula with three predicates*}
+text\<open>de Bruijn formula with three predicates\<close>
 lemma "((P<->Q) --> P&Q&R) &
                ((Q<->R) --> P&Q&R) &
                ((R<->P) --> P&Q&R) --> P&Q&R"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-text{*de Bruijn formula with five predicates*}
+text\<open>de Bruijn formula with five predicates\<close>
 lemma "((P<->Q) --> P&Q&R&S&T) &
                ((Q<->R) --> P&Q&R&S&T) &
                ((R<->S) --> P&Q&R&S&T) &
                ((S<->T) --> P&Q&R&S&T) &
                ((T<->P) --> P&Q&R&S&T) --> P&Q&R&S&T"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 (*** Problems from of Sahlin, Franzen and Haridi,
@@ -113,313 +113,313 @@
      J. Logic and Comp. 2 (5), October 1992, 619-656.
 ***)
 
-text{*Problem 1.1*}
+text\<open>Problem 1.1\<close>
 lemma "(ALL x. EX y. ALL z. p(x) & q(y) & r(z)) <->
       (ALL z. EX y. ALL x. p(x) & q(y) & r(z))"
-by (tactic{*IntPr.best_dup_tac @{context} 1*})  --{*SLOW*}
+by (tactic\<open>IntPr.best_dup_tac @{context} 1\<close>)  --\<open>SLOW\<close>
 
-text{*Problem 3.1*}
+text\<open>Problem 3.1\<close>
 lemma "~ (EX x. ALL y. mem(y,x) <-> ~ mem(x,x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*Problem 4.1: hopeless!*}
+text\<open>Problem 4.1: hopeless!\<close>
 lemma "(ALL x. p(x) --> p(h(x)) | p(g(x))) & (EX x. p(x)) & (ALL x. ~p(h(x)))
       --> (EX x. p(g(g(g(g(g(x)))))))"
 oops
 
 
-subsection{*Intuitionistic FOL: propositional problems based on Pelletier.*}
+subsection\<open>Intuitionistic FOL: propositional problems based on Pelletier.\<close>
 
-text{*~~1*}
+text\<open>~~1\<close>
 lemma "~~((P-->Q)  <->  (~Q --> ~P))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~2*}
+text\<open>~~2\<close>
 lemma "~~(~~P  <->  P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*3*}
+text\<open>3\<close>
 lemma "~(P-->Q) --> (Q-->P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~4*}
+text\<open>~~4\<close>
 lemma "~~((~P-->Q)  <->  (~Q --> P))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~5*}
+text\<open>~~5\<close>
 lemma "~~((P|Q-->P|R) --> P|(Q-->R))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~6*}
+text\<open>~~6\<close>
 lemma "~~(P | ~P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~7*}
+text\<open>~~7\<close>
 lemma "~~(P | ~~~P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~8.  Peirce's law*}
+text\<open>~~8.  Peirce's law\<close>
 lemma "~~(((P-->Q) --> P)  -->  P)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*9*}
+text\<open>9\<close>
 lemma "((P|Q) & (~P|Q) & (P| ~Q)) --> ~ (~P | ~Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*10*}
+text\<open>10\<close>
 lemma "(Q-->R) --> (R-->P&Q) --> (P-->(Q|R)) --> (P<->Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-subsection{*11.  Proved in each direction (incorrectly, says Pelletier!!) *}
+subsection\<open>11.  Proved in each direction (incorrectly, says Pelletier!!)\<close>
 lemma "P<->P"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~12.  Dijkstra's law  *}
+text\<open>~~12.  Dijkstra's law\<close>
 lemma "~~(((P <-> Q) <-> R)  <->  (P <-> (Q <-> R)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "((P <-> Q) <-> R)  -->  ~~(P <-> (Q <-> R))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*13.  Distributive law*}
+text\<open>13.  Distributive law\<close>
 lemma "P | (Q & R)  <-> (P | Q) & (P | R)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~14*}
+text\<open>~~14\<close>
 lemma "~~((P <-> Q) <-> ((Q | ~P) & (~Q|P)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~15*}
+text\<open>~~15\<close>
 lemma "~~((P --> Q) <-> (~P | Q))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~16*}
+text\<open>~~16\<close>
 lemma "~~((P-->Q) | (Q-->P))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~17*}
+text\<open>~~17\<close>
 lemma "~~(((P & (Q-->R))-->S) <-> ((~P | Q | S) & (~P | ~R | S)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*Dijkstra's "Golden Rule"*}
+text\<open>Dijkstra's "Golden Rule"\<close>
 lemma "(P&Q) <-> P <-> Q <-> (P|Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-subsection{*****Examples with quantifiers*****}
+subsection\<open>****Examples with quantifiers****\<close>
 
 
-subsection{*The converse is classical in the following implications...*}
+subsection\<open>The converse is classical in the following implications...\<close>
 
 lemma "(EX x. P(x)-->Q)  -->  (ALL x. P(x)) --> Q"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "((ALL x. P(x))-->Q) --> ~ (ALL x. P(x) & ~Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "((ALL x. ~P(x))-->Q)  -->  ~ (ALL x. ~ (P(x)|Q))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "(ALL x. P(x)) | Q  -->  (ALL x. P(x) | Q)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 lemma "(EX x. P --> Q(x)) --> (P --> (EX x. Q(x)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 
 
-subsection{*The following are not constructively valid!*}
-text{*The attempt to prove them terminates quickly!*}
+subsection\<open>The following are not constructively valid!\<close>
+text\<open>The attempt to prove them terminates quickly!\<close>
 
 lemma "((ALL x. P(x))-->Q) --> (EX x. P(x)-->Q)"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 lemma "(P --> (EX x. Q(x))) --> (EX x. P-->Q(x))"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 lemma "(ALL x. P(x) | Q) --> ((ALL x. P(x)) | Q)"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 lemma "(ALL x. ~~P(x)) --> ~~(ALL x. P(x))"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
-text{*Classically but not intuitionistically valid.  Proved by a bug in 1986!*}
+text\<open>Classically but not intuitionistically valid.  Proved by a bug in 1986!\<close>
 lemma "EX x. Q(x) --> (ALL x. Q(x))"
-apply (tactic{*IntPr.fast_tac @{context} 1*} | -)
-apply (rule asm_rl) --{*Checks that subgoals remain: proof failed.*}
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close> | -)
+apply (rule asm_rl) --\<open>Checks that subgoals remain: proof failed.\<close>
 oops
 
 
-subsection{*Hard examples with quantifiers*}
+subsection\<open>Hard examples with quantifiers\<close>
 
-text{*The ones that have not been proved are not known to be valid!
-  Some will require quantifier duplication -- not currently available*}
+text\<open>The ones that have not been proved are not known to be valid!
+  Some will require quantifier duplication -- not currently available\<close>
 
-text{*~~18*}
+text\<open>~~18\<close>
 lemma "~~(EX y. ALL x. P(y)-->P(x))"
-oops  --{*NOT PROVED*}
+oops  --\<open>NOT PROVED\<close>
 
-text{*~~19*}
+text\<open>~~19\<close>
 lemma "~~(EX x. ALL y z. (P(y)-->Q(z)) --> (P(x)-->Q(x)))"
-oops  --{*NOT PROVED*}
+oops  --\<open>NOT PROVED\<close>
 
-text{*20*}
+text\<open>20\<close>
 lemma "(ALL x y. EX z. ALL w. (P(x)&Q(y)-->R(z)&S(w)))
     --> (EX x y. P(x) & Q(y)) --> (EX z. R(z))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*21*}
+text\<open>21\<close>
 lemma "(EX x. P-->Q(x)) & (EX x. Q(x)-->P) --> ~~(EX x. P<->Q(x))"
-oops --{*NOT PROVED; needs quantifier duplication*}
+oops --\<open>NOT PROVED; needs quantifier duplication\<close>
 
-text{*22*}
+text\<open>22\<close>
 lemma "(ALL x. P <-> Q(x))  -->  (P <-> (ALL x. Q(x)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~23*}
+text\<open>~~23\<close>
 lemma "~~ ((ALL x. P | Q(x))  <->  (P | (ALL x. Q(x))))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*24*}
+text\<open>24\<close>
 lemma "~(EX x. S(x)&Q(x)) & (ALL x. P(x) --> Q(x)|R(x)) &
      (~(EX x. P(x)) --> (EX x. Q(x))) & (ALL x. Q(x)|R(x) --> S(x))
     --> ~~(EX x. P(x)&R(x))"
-txt{*Not clear why @{text fast_tac}, @{text best_tac}, @{text ASTAR} and 
-    @{text ITER_DEEPEN} all take forever*}
-apply (tactic{* IntPr.safe_tac @{context}*})
+txt\<open>Not clear why @{text fast_tac}, @{text best_tac}, @{text ASTAR} and 
+    @{text ITER_DEEPEN} all take forever\<close>
+apply (tactic\<open>IntPr.safe_tac @{context}\<close>)
 apply (erule impE)
-apply (tactic{*IntPr.fast_tac @{context} 1*})
-by (tactic{*IntPr.fast_tac @{context} 1*})
+apply (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*25*}
+text\<open>25\<close>
 lemma "(EX x. P(x)) &
         (ALL x. L(x) --> ~ (M(x) & R(x))) &
         (ALL x. P(x) --> (M(x) & L(x))) &
         ((ALL x. P(x)-->Q(x)) | (EX x. P(x)&R(x)))
     --> (EX x. Q(x)&P(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~26*}
+text\<open>~~26\<close>
 lemma "(~~(EX x. p(x)) <-> ~~(EX x. q(x))) &
       (ALL x. ALL y. p(x) & q(y) --> (r(x) <-> s(y)))
   --> ((ALL x. p(x)-->r(x)) <-> (ALL x. q(x)-->s(x)))"
-oops  --{*NOT PROVED*}
+oops  --\<open>NOT PROVED\<close>
 
-text{*27*}
+text\<open>27\<close>
 lemma "(EX x. P(x) & ~Q(x)) &
               (ALL x. P(x) --> R(x)) &
               (ALL x. M(x) & L(x) --> P(x)) &
               ((EX x. R(x) & ~ Q(x)) --> (ALL x. L(x) --> ~ R(x)))
           --> (ALL x. M(x) --> ~L(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~28.  AMENDED*}
+text\<open>~~28.  AMENDED\<close>
 lemma "(ALL x. P(x) --> (ALL x. Q(x))) &
         (~~(ALL x. Q(x)|R(x)) --> (EX x. Q(x)&S(x))) &
         (~~(EX x. S(x)) --> (ALL x. L(x) --> M(x)))
     --> (ALL x. P(x) & L(x) --> M(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*29.  Essentially the same as Principia Mathematica *11.71*}
+text\<open>29.  Essentially the same as Principia Mathematica *11.71\<close>
 lemma "(EX x. P(x)) & (EX y. Q(y))
     --> ((ALL x. P(x)-->R(x)) & (ALL y. Q(y)-->S(y))   <->
          (ALL x y. P(x) & Q(y) --> R(x) & S(y)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~30*}
+text\<open>~~30\<close>
 lemma "(ALL x. (P(x) | Q(x)) --> ~ R(x)) &
         (ALL x. (Q(x) --> ~ S(x)) --> P(x) & R(x))
     --> (ALL x. ~~S(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*31*}
+text\<open>31\<close>
 lemma "~(EX x. P(x) & (Q(x) | R(x))) &
         (EX x. L(x) & P(x)) &
         (ALL x. ~ R(x) --> M(x))
     --> (EX x. L(x) & M(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*32*}
+text\<open>32\<close>
 lemma "(ALL x. P(x) & (Q(x)|R(x))-->S(x)) &
         (ALL x. S(x) & R(x) --> L(x)) &
         (ALL x. M(x) --> R(x))
     --> (ALL x. P(x) & M(x) --> L(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*~~33*}
+text\<open>~~33\<close>
 lemma "(ALL x. ~~(P(a) & (P(x)-->P(b))-->P(c)))  <->
       (ALL x. ~~((~P(a) | P(x) | P(c)) & (~P(a) | ~P(b) | P(c))))"
-apply (tactic{*IntPr.best_tac @{context} 1*})
+apply (tactic\<open>IntPr.best_tac @{context} 1\<close>)
 done
 
 
-text{*36*}
+text\<open>36\<close>
 lemma "(ALL x. EX y. J(x,y)) &
       (ALL x. EX y. G(x,y)) &
       (ALL x y. J(x,y) | G(x,y) --> (ALL z. J(y,z) | G(y,z) --> H(x,z)))
   --> (ALL x. EX y. H(x,y))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*37*}
+text\<open>37\<close>
 lemma "(ALL z. EX w. ALL x. EX y.
            ~~(P(x,z)-->P(y,w)) & P(y,z) & (P(y,w) --> (EX u. Q(u,w)))) &
         (ALL x z. ~P(x,z) --> (EX y. Q(y,z))) &
         (~~(EX x y. Q(x,y)) --> (ALL x. R(x,x)))
     --> ~~(ALL x. EX y. R(x,y))"
-oops  --{*NOT PROVED*}
+oops  --\<open>NOT PROVED\<close>
 
-text{*39*}
+text\<open>39\<close>
 lemma "~ (EX x. ALL y. F(y,x) <-> ~F(y,y))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*40.  AMENDED*}
+text\<open>40.  AMENDED\<close>
 lemma "(EX y. ALL x. F(x,y) <-> F(x,x)) -->
               ~(ALL x. EX y. ALL z. F(z,y) <-> ~ F(z,x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*44*}
+text\<open>44\<close>
 lemma "(ALL x. f(x) -->
               (EX y. g(y) & h(x,y) & (EX y. g(y) & ~ h(x,y))))  &
               (EX x. j(x) & (ALL y. g(y) --> h(x,y)))
               --> (EX x. j(x) & ~f(x))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*48*}
+text\<open>48\<close>
 lemma "(a=b | c=d) & (a=c | b=d) --> a=d | b=c"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*51*}
+text\<open>51\<close>
 lemma "(EX z w. ALL x y. P(x,y) <->  (x=z & y=w)) -->
      (EX z. ALL x. EX w. (ALL y. P(x,y) <-> y=w) <-> x=z)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*52*}
-text{*Almost the same as 51. *}
+text\<open>52\<close>
+text\<open>Almost the same as 51.\<close>
 lemma "(EX z w. ALL x y. P(x,y) <->  (x=z & y=w)) -->
      (EX w. ALL y. EX z. (ALL x. P(x,y) <-> x=z) <-> y=w)"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*56*}
+text\<open>56\<close>
 lemma "(ALL x. (EX y. P(y) & x=f(y)) --> P(x)) <-> (ALL x. P(x) --> P(f(x)))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*57*}
+text\<open>57\<close>
 lemma "P(f(a,b), f(b,c)) & P(f(b,c), f(a,c)) &
      (ALL x y z. P(x,y) & P(y,z) --> P(x,z))    -->   P(f(a,b), f(a,c))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
-text{*60*}
+text\<open>60\<close>
 lemma "ALL x. P(x,f(x)) <-> (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))"
-by (tactic{*IntPr.fast_tac @{context} 1*})
+by (tactic\<open>IntPr.fast_tac @{context} 1\<close>)
 
 end
 
--- a/src/FOL/ex/Locale_Test/Locale_Test.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Locale_Test/Locale_Test.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -8,7 +8,7 @@
 imports Locale_Test1 Locale_Test2 Locale_Test3
 begin
 
-text {* Result of theory merge with distinct but identical interpretations *}
+text \<open>Result of theory merge with distinct but identical interpretations\<close>
 
 context mixin_thy_merge
 begin
--- a/src/FOL/ex/Locale_Test/Locale_Test1.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Locale_Test/Locale_Test1.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -21,7 +21,7 @@
   int_minus: "(-x) + x = 0" and
   int_minus2: "-(-x) = x"
 
-section {* Inference of parameter types *}
+section \<open>Inference of parameter types\<close>
 
 locale param1 = fixes p
 print_locale! param1
@@ -41,7 +41,7 @@
 print_locale! param4
 
 
-subsection {* Incremental type constraints *}
+subsection \<open>Incremental type constraints\<close>
 
 locale constraint1 =
   fixes  prod (infixl "**" 65)
@@ -55,7 +55,7 @@
 print_locale! constraint2
 
 
-section {* Inheritance *}
+section \<open>Inheritance\<close>
 
 locale semi =
   fixes prod (infixl "**" 65)
@@ -86,12 +86,12 @@
 locale pert_hom = d1: perturbation f d1 + d2: perturbation f d2 for f d1 d2
 print_locale! pert_hom thm pert_hom_def
 
-text {* Alternative expression, obtaining nicer names in @{text "semi f"}. *}
+text \<open>Alternative expression, obtaining nicer names in @{text "semi f"}.\<close>
 locale pert_hom' = semi f + d1: perturbation f d1 + d2: perturbation f d2 for f d1 d2
 print_locale! pert_hom' thm pert_hom'_def
 
 
-section {* Syntax declarations *}
+section \<open>Syntax declarations\<close>
 
 locale logic =
   fixes land (infixl "&&" 55)
@@ -126,7 +126,7 @@
 thm var.test_def
 
 
-text {* Under which circumstances term syntax remains active. *}
+text \<open>Under which circumstances term syntax remains active.\<close>
 
 locale "syntax" =
   fixes p1 :: "'a => 'b"
@@ -147,7 +147,7 @@
 
 thm d1_def d2_def  (* should print as "d1(?x) <-> ..." and "d2(?x) <-> ..." *)
 
-ML {*
+ML \<open>
   fun check_syntax ctxt thm expected =
     let
       val obtained =
@@ -157,14 +157,14 @@
       then error ("Theorem syntax '" ^ obtained ^ "' obtained, but '" ^ expected ^ "' expected.")
       else ()
     end;
-*}
+\<close>
 
 declare [[show_hyps]]
 
-ML {*
+ML \<open>
   check_syntax @{context} @{thm d1_def} "d1(?x) <-> ~ p2(p1(?x))";
   check_syntax @{context} @{thm d2_def} "d2(?x) <-> ~ p2(?x)";
-*}
+\<close>
 
 end
 
@@ -174,21 +174,21 @@
 thm d1_def d2_def
   (* should print as "syntax.d1(p3, p2, ?x) <-> ..." and "d2(?x) <-> ..." *)
 
-ML {*
+ML \<open>
   check_syntax @{context} @{thm d1_def} "syntax.d1(p3, p2, ?x) <-> ~ p2(p3(?x))";
   check_syntax @{context} @{thm d2_def} "d2(?x) <-> ~ p2(?x)";
-*}
+\<close>
 
 end
 
 
-section {* Foundational versions of theorems *}
+section \<open>Foundational versions of theorems\<close>
 
 thm logic.assoc
 thm logic.lor_def
 
 
-section {* Defines *}
+section \<open>Defines\<close>
 
 locale logic_def =
   fixes land (infixl "&&" 55)
@@ -217,7 +217,7 @@
 end
 
 
-section {* Notes *}
+section \<open>Notes\<close>
 
 (* A somewhat arcane homomorphism example *)
 
@@ -247,7 +247,7 @@
   shows True ..
 
 
-section {* Theorem statements *}
+section \<open>Theorem statements\<close>
 
 lemma (in lgrp) lcancel:
   "x ** y = x ** z <-> y = z"
@@ -278,13 +278,13 @@
 print_locale! rgrp
 
 
-subsection {* Patterns *}
+subsection \<open>Patterns\<close>
 
 lemma (in rgrp)
   assumes "y ** x = z ** x" (is ?a)
   shows "y = z" (is ?t)
 proof -
-  txt {* Weird proof involving patterns from context element and conclusion. *}
+  txt \<open>Weird proof involving patterns from context element and conclusion.\<close>
   {
     assume ?a
     then have "y ** (x ** inv(x)) = z ** (x ** inv(x))"
@@ -292,11 +292,11 @@
     then have ?t by (simp add: rone rinv)
   }
   note x = this
-  show ?t by (rule x [OF `?a`])
+  show ?t by (rule x [OF \<open>?a\<close>])
 qed
 
 
-section {* Interpretation between locales: sublocales *}
+section \<open>Interpretation between locales: sublocales\<close>
 
 sublocale lgrp < right: rgrp
 print_facts
@@ -436,7 +436,7 @@
 print_locale! trivial  (* No instance for y created (subsumed). *)
 
 
-subsection {* Sublocale, then interpretation in theory *}
+subsection \<open>Sublocale, then interpretation in theory\<close>
 
 interpretation int?: lgrp "op +" "0" "minus"
 proof unfold_locales
@@ -447,15 +447,15 @@
 interpretation int2?: semi "op +"
   by unfold_locales  (* subsumed, thm int2.assoc not generated *)
 
-ML {* (Global_Theory.get_thms @{theory} "int2.assoc";
+ML \<open>(Global_Theory.get_thms @{theory} "int2.assoc";
     raise Fail "thm int2.assoc was generated")
-  handle ERROR _ => ([]:thm list); *}
+  handle ERROR _ => ([]:thm list);\<close>
 
 thm int.lone int.right.rone
   (* the latter comes through the sublocale relation *)
 
 
-subsection {* Interpretation in theory, then sublocale *}
+subsection \<open>Interpretation in theory, then sublocale\<close>
 
 interpretation fol: logic "op +" "minus"
   by unfold_locales (rule int_assoc int_minus2)+
@@ -478,7 +478,7 @@
 thm fol.two.assoc
 
 
-subsection {* Declarations and sublocale *}
+subsection \<open>Declarations and sublocale\<close>
 
 locale logic_a = logic
 locale logic_b = logic
@@ -487,9 +487,9 @@
   by unfold_locales
 
 
-subsection {* Interpretation *}
+subsection \<open>Interpretation\<close>
 
-subsection {* Rewrite morphism *}
+subsection \<open>Rewrite morphism\<close>
 
 locale logic_o =
   fixes land (infixl "&&" 55)
@@ -521,7 +521,7 @@
   x.lor_triv
 
 
-subsection {* Inheritance of rewrite morphisms *}
+subsection \<open>Inheritance of rewrite morphisms\<close>
 
 locale reflexive =
   fixes le :: "'a => 'a => o" (infix "\<sqsubseteq>" 50)
@@ -539,7 +539,7 @@
   grefl: "gle(x, x)" and gless_def: "gless(x, y) <-> gle(x, y) & x ~= y" and
   grefl': "gle'(x, x)" and gless'_def: "gless'(x, y) <-> gle'(x, y) & x ~= y"
 
-text {* Setup *}
+text \<open>Setup\<close>
 
 locale mixin = reflexive
 begin
@@ -554,7 +554,7 @@
     by (simp add: reflexive.less_def[OF reflexive] gless_def)
 qed
 
-text {* Rewrite morphism propagated along the locale hierarchy *}
+text \<open>Rewrite morphism propagated along the locale hierarchy\<close>
 
 locale mixin2 = mixin
 begin
@@ -568,7 +568,7 @@
 lemma "gless(x, y) <-> gle(x, y) & x ~= y"
   by (rule le.less_thm2)
 
-text {* Rewrite morphism does not leak to a side branch. *}
+text \<open>Rewrite morphism does not leak to a side branch.\<close>
 
 locale mixin3 = reflexive
 begin
@@ -581,7 +581,7 @@
 thm le.less_thm3  (* rewrite morphism not applied *)
 lemma "reflexive.less(gle, x, y) <-> gle(x, y) & x ~= y" by (rule le.less_thm3)
 
-text {* Rewrite morphism only available in original context *}
+text \<open>Rewrite morphism only available in original context\<close>
 
 locale mixin4_base = reflexive
 
@@ -613,7 +613,7 @@
 lemma "reflexive.less(gle, x, y) <-> gle(x, y) & x ~= y"
   by (rule le4.less_thm4')
 
-text {* Inherited rewrite morphism applied to new theorem *}
+text \<open>Inherited rewrite morphism applied to new theorem\<close>
 
 locale mixin5_base = reflexive
 
@@ -637,7 +637,7 @@
 lemma "gless(x, y) <-> gle(x, y) & x ~= y"
   by (rule le5.less_thm5)
 
-text {* Rewrite morphism pushed down to existing inherited locale *}
+text \<open>Rewrite morphism pushed down to existing inherited locale\<close>
 
 locale mixin6_base = reflexive
 
@@ -662,7 +662,7 @@
 lemma "gless(x, y) <-> gle(x, y) & x ~= y"
   by (rule le6.less_thm6)
 
-text {* Existing rewrite morphism inherited through sublocale relation *}
+text \<open>Existing rewrite morphism inherited through sublocale relation\<close>
 
 locale mixin7_base = reflexive
 
@@ -696,16 +696,16 @@
   by (rule le7.less_thm7b)
 
 
-text {* This locale will be interpreted in later theories. *}
+text \<open>This locale will be interpreted in later theories.\<close>
 
 locale mixin_thy_merge = le: reflexive le + le': reflexive le' for le le'
 
 
-subsection {* Rewrite morphisms in sublocale *}
+subsection \<open>Rewrite morphisms in sublocale\<close>
 
-text {* Simulate a specification of left groups where unit and inverse are defined
+text \<open>Simulate a specification of left groups where unit and inverse are defined
   rather than specified.  This is possible, but not in FOL, due to the lack of a
-  selection operator. *}
+  selection operator.\<close>
 
 axiomatization glob_one and glob_inv
   where glob_lone: "prod(glob_one(prod), x) = x"
@@ -745,24 +745,24 @@
 
 context lgrp begin
 
-text {* Equations stored in target *}
+text \<open>Equations stored in target\<close>
 
 lemma "dgrp.one(prod) = one" by (rule one_equation)
 lemma "dgrp.inv(prod, x) = inv(x)" by (rule inv_equation)
 
-text {* Rewrite morphisms applied *}
+text \<open>Rewrite morphisms applied\<close>
 
 lemma "one = glob_one(prod)" by (rule one_def)
 lemma "inv(x) = glob_inv(prod, x)" by (rule inv_def)
 
 end
 
-text {* Interpreted versions *}
+text \<open>Interpreted versions\<close>
 
 lemma "0 = glob_one (op +)" by (rule int.def.one_def)
 lemma "- x = glob_inv(op +, x)" by (rule int.def.inv_def)
 
-text {* Roundup applies rewrite morphisms at declaration level in DFS tree *}
+text \<open>Roundup applies rewrite morphisms at declaration level in DFS tree\<close>
 
 locale roundup = fixes x assumes true: "x <-> True"
 
@@ -771,7 +771,7 @@
 lemma (in roundup) "True & True <-> True" by (rule sub.true)
 
 
-section {* Interpretation in named contexts *}
+section \<open>Interpretation in named contexts\<close>
 
 locale container
 begin
@@ -780,15 +780,15 @@
 end
 
 context container begin
-ML {* (Context.>> (fn generic => let val context = Context.proof_of generic
+ML \<open>(Context.>> (fn generic => let val context = Context.proof_of generic
   val _ = Proof_Context.get_thms context "private.true" in generic end);
   raise Fail "thm private.true was persisted")
-  handle ERROR _ => ([]:thm list); *}
+  handle ERROR _ => ([]:thm list);\<close>
 thm true_copy
 end
 
 
-section {* Interpretation in proofs *}
+section \<open>Interpretation in proofs\<close>
 
 lemma True
 proof
--- a/src/FOL/ex/Miniscope.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Miniscope.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -14,9 +14,9 @@
 
 lemmas ccontr = FalseE [THEN classical]
 
-subsection {* Negation Normal Form *}
+subsection \<open>Negation Normal Form\<close>
 
-subsubsection {* de Morgan laws *}
+subsubsection \<open>de Morgan laws\<close>
 
 lemma demorgans:
   "~(P&Q) <-> ~P | ~Q"
@@ -38,7 +38,7 @@
 
 (* BEWARE: rewrite rules for <-> can confuse the simplifier!! *)
 
-subsubsection {* Pushing in the existential quantifiers *}
+subsubsection \<open>Pushing in the existential quantifiers\<close>
 
 lemma ex_simps:
   "(EX x. P) <-> P"
@@ -50,7 +50,7 @@
   by blast+
 
 
-subsubsection {* Pushing in the universal quantifiers *}
+subsubsection \<open>Pushing in the universal quantifiers\<close>
 
 lemma all_simps:
   "(ALL x. P) <-> P"
@@ -63,10 +63,10 @@
 
 lemmas mini_simps = demorgans nnf_simps ex_simps all_simps
 
-ML {*
+ML \<open>
 val mini_ss = simpset_of (@{context} addsimps @{thms mini_simps});
 fun mini_tac ctxt =
   resolve_tac ctxt @{thms ccontr} THEN' asm_full_simp_tac (put_simpset mini_ss ctxt);
-*}
+\<close>
 
 end
--- a/src/FOL/ex/Nat.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Nat.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Theory of the natural numbers: Peano's axioms, primitive recursion *}
+section \<open>Theory of the natural numbers: Peano's axioms, primitive recursion\<close>
 
 theory Nat
 imports FOL
@@ -27,7 +27,7 @@
   where "m + n == rec(m, n, %x y. Suc(y))"
 
 
-subsection {* Proofs about the natural numbers *}
+subsection \<open>Proofs about the natural numbers\<close>
 
 lemma Suc_n_not_n: "Suc(k) ~= k"
 apply (rule_tac n = k in induct)
--- a/src/FOL/ex/Nat_Class.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Nat_Class.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -6,14 +6,14 @@
 imports FOL
 begin
 
-text {*
+text \<open>
   This is an abstract version of theory @{text Nat}. Instead of
   axiomatizing a single type @{text nat} we define the class of all
   these types (up to isomorphism).
 
   Note: The @{text rec} operator had to be made \emph{monomorphic},
   because class axioms may not contain more than one type variable.
-*}
+\<close>
 
 class nat =
   fixes Zero :: 'a  ("0")
--- a/src/FOL/ex/Natural_Numbers.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Natural_Numbers.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,16 +2,16 @@
     Author:     Markus Wenzel, TU Munich
 *)
 
-section {* Natural numbers *}
+section \<open>Natural numbers\<close>
 
 theory Natural_Numbers
 imports FOL
 begin
 
-text {*
+text \<open>
   Theory of the natural numbers: Peano's axioms, primitive recursion.
   (Modernized version of Larry Paulson's theory "Nat".)  \medskip
-*}
+\<close>
 
 typedecl nat
 instance nat :: "term" ..
--- a/src/FOL/ex/Prolog.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Prolog.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* First-Order Logic: PROLOG examples *}
+section \<open>First-Order Logic: PROLOG examples\<close>
 
 theory Prolog
 imports FOL
@@ -56,16 +56,16 @@
 done
 
 schematic_lemma "rev(?x, a:b:c:Nil)"
-apply (rule rules)+  -- {* does not solve it directly! *}
+apply (rule rules)+  -- \<open>does not solve it directly!\<close>
 back
 back
 done
 
 (*backtracking version*)
-ML {*
+ML \<open>
 fun prolog_tac ctxt =
   DEPTH_FIRST (has_fewer_prems 1) (resolve_tac ctxt @{thms rules} 1)
-*}
+\<close>
 
 schematic_lemma "rev(?x, a:b:c:Nil)"
 apply (tactic \<open>prolog_tac @{context}\<close>)
@@ -77,15 +77,15 @@
 
 (*rev([a..p], ?w) requires 153 inferences *)
 schematic_lemma "rev(a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:Nil, ?w)"
-apply (tactic {*
-  DEPTH_SOLVE (resolve_tac @{context} ([@{thm refl}, @{thm conjI}] @ @{thms rules}) 1) *})
+apply (tactic \<open>
+  DEPTH_SOLVE (resolve_tac @{context} ([@{thm refl}, @{thm conjI}] @ @{thms rules}) 1)\<close>)
 done
 
 (*?x has 16, ?y has 32;  rev(?y,?w) requires 561 (rather large) inferences
   total inferences = 2 + 1 + 17 + 561 = 581*)
 schematic_lemma "a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:Nil = ?x & app(?x,?x,?y) & rev(?y,?w)"
-apply (tactic {*
-  DEPTH_SOLVE (resolve_tac @{context} ([@{thm refl}, @{thm conjI}] @ @{thms rules}) 1) *})
+apply (tactic \<open>
+  DEPTH_SOLVE (resolve_tac @{context} ([@{thm refl}, @{thm conjI}] @ @{thms rules}) 1)\<close>)
 done
 
 end
--- a/src/FOL/ex/Propositional_Cla.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Propositional_Cla.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,13 +3,13 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: propositional examples (classical version) *}
+section \<open>First-Order Logic: propositional examples (classical version)\<close>
 
 theory Propositional_Cla
 imports FOL
 begin
 
-text {* commutative laws of @{text "&"} and @{text "|"} *}
+text \<open>commutative laws of @{text "&"} and @{text "|"}\<close>
 
 lemma "P & Q  -->  Q & P"
   by (tactic "IntPr.fast_tac @{context} 1")
@@ -18,7 +18,7 @@
   by fast
 
 
-text {* associative laws of @{text "&"} and @{text "|"} *}
+text \<open>associative laws of @{text "&"} and @{text "|"}\<close>
 lemma "(P & Q) & R  -->  P & (Q & R)"
   by fast
 
@@ -26,7 +26,7 @@
   by fast
 
 
-text {* distributive laws of @{text "&"} and @{text "|"} *}
+text \<open>distributive laws of @{text "&"} and @{text "|"}\<close>
 lemma "(P & Q) | R  --> (P | R) & (Q | R)"
   by fast
 
@@ -40,7 +40,7 @@
   by fast
 
 
-text {* Laws involving implication *}
+text \<open>Laws involving implication\<close>
 
 lemma "(P-->R) & (Q-->R) <-> (P|Q --> R)"
   by fast
@@ -58,18 +58,18 @@
   by fast
 
 
-text {* Propositions-as-types *}
+text \<open>Propositions-as-types\<close>
 
--- {* The combinator K *}
+-- \<open>The combinator K\<close>
 lemma "P --> (Q --> P)"
   by fast
 
--- {* The combinator S *}
+-- \<open>The combinator S\<close>
 lemma "(P-->Q-->R)  --> (P-->Q) --> (P-->R)"
   by fast
 
 
--- {* Converse is classical *}
+-- \<open>Converse is classical\<close>
 lemma "(P-->Q) | (P-->R)  -->  (P --> Q | R)"
   by fast
 
@@ -77,7 +77,7 @@
   by fast
 
 
-text {* Schwichtenberg's examples (via T. Nipkow) *}
+text \<open>Schwichtenberg's examples (via T. Nipkow)\<close>
 
 lemma stab_imp: "(((Q-->R)-->R)-->Q) --> (((P-->Q)-->R)-->R)-->P-->Q"
   by fast
--- a/src/FOL/ex/Propositional_Int.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Propositional_Int.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,13 +3,13 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: propositional examples (intuitionistic version) *}
+section \<open>First-Order Logic: propositional examples (intuitionistic version)\<close>
 
 theory Propositional_Int
 imports IFOL
 begin
 
-text {* commutative laws of @{text "&"} and @{text "|"} *}
+text \<open>commutative laws of @{text "&"} and @{text "|"}\<close>
 
 lemma "P & Q  -->  Q & P"
   by (tactic "IntPr.fast_tac @{context} 1")
@@ -18,7 +18,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* associative laws of @{text "&"} and @{text "|"} *}
+text \<open>associative laws of @{text "&"} and @{text "|"}\<close>
 lemma "(P & Q) & R  -->  P & (Q & R)"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -26,7 +26,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* distributive laws of @{text "&"} and @{text "|"} *}
+text \<open>distributive laws of @{text "&"} and @{text "|"}\<close>
 lemma "(P & Q) | R  --> (P | R) & (Q | R)"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -40,7 +40,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Laws involving implication *}
+text \<open>Laws involving implication\<close>
 
 lemma "(P-->R) & (Q-->R) <-> (P|Q --> R)"
   by (tactic "IntPr.fast_tac @{context} 1")
@@ -58,18 +58,18 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Propositions-as-types *}
+text \<open>Propositions-as-types\<close>
 
--- {* The combinator K *}
+-- \<open>The combinator K\<close>
 lemma "P --> (Q --> P)"
   by (tactic "IntPr.fast_tac @{context} 1")
 
--- {* The combinator S *}
+-- \<open>The combinator S\<close>
 lemma "(P-->Q-->R)  --> (P-->Q) --> (P-->R)"
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
--- {* Converse is classical *}
+-- \<open>Converse is classical\<close>
 lemma "(P-->Q) | (P-->R)  -->  (P --> Q | R)"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -77,7 +77,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Schwichtenberg's examples (via T. Nipkow) *}
+text \<open>Schwichtenberg's examples (via T. Nipkow)\<close>
 
 lemma stab_imp: "(((Q-->R)-->R)-->Q) --> (((P-->Q)-->R)-->R)-->P-->Q"
   by (tactic "IntPr.fast_tac @{context} 1")
--- a/src/FOL/ex/Quantifiers_Cla.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Quantifiers_Cla.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: quantifier examples (classical version) *}
+section \<open>First-Order Logic: quantifier examples (classical version)\<close>
 
 theory Quantifiers_Cla
 imports FOL
@@ -16,7 +16,7 @@
   by fast
 
 
--- {* Converse is false *}
+-- \<open>Converse is false\<close>
 lemma "(ALL x. P(x)) | (ALL x. Q(x)) --> (ALL x. P(x) | Q(x))"
   by fast
 
@@ -28,19 +28,19 @@
   by fast
 
 
-text {* Some harder ones *}
+text \<open>Some harder ones\<close>
 
 lemma "(EX x. P(x) | Q(x)) <-> (EX x. P(x)) | (EX x. Q(x))"
   by fast
 
--- {* Converse is false *}
+-- \<open>Converse is false\<close>
 lemma "(EX x. P(x)&Q(x)) --> (EX x. P(x))  &  (EX x. Q(x))"
   by fast
 
 
-text {* Basic test of quantifier reasoning *}
+text \<open>Basic test of quantifier reasoning\<close>
 
--- {* TRUE *}
+-- \<open>TRUE\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
   by fast
 
@@ -48,7 +48,7 @@
   by fast
 
 
-text {* The following should fail, as they are false! *}
+text \<open>The following should fail, as they are false!\<close>
 
 lemma "(ALL x. EX y. Q(x,y))  -->  (EX y. ALL x. Q(x,y))"
   apply fast?
@@ -67,12 +67,12 @@
   oops
 
 
-text {* Back to things that are provable \dots *}
+text \<open>Back to things that are provable \dots\<close>
 
 lemma "(ALL x. P(x)-->Q(x)) & (EX x. P(x)) --> (EX x. Q(x))"
   by fast
 
--- {* An example of why exI should be delayed as long as possible *}
+-- \<open>An example of why exI should be delayed as long as possible\<close>
 lemma "(P --> (EX x. Q(x))) & P --> (EX x. Q(x))"
   by fast
 
@@ -83,9 +83,9 @@
   by fast
 
 
-text {* Some slow ones *}
+text \<open>Some slow ones\<close>
 
--- {* Principia Mathematica *11.53 *}
+-- \<open>Principia Mathematica *11.53\<close>
 lemma "(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))"
   by fast
 
--- a/src/FOL/ex/Quantifiers_Int.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOL/ex/Quantifiers_Int.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: quantifier examples (intuitionistic version) *}
+section \<open>First-Order Logic: quantifier examples (intuitionistic version)\<close>
 
 theory Quantifiers_Int
 imports IFOL
@@ -16,7 +16,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
--- {* Converse is false *}
+-- \<open>Converse is false\<close>
 lemma "(ALL x. P(x)) | (ALL x. Q(x)) --> (ALL x. P(x) | Q(x))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -28,19 +28,19 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Some harder ones *}
+text \<open>Some harder ones\<close>
 
 lemma "(EX x. P(x) | Q(x)) <-> (EX x. P(x)) | (EX x. Q(x))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
--- {* Converse is false *}
+-- \<open>Converse is false\<close>
 lemma "(EX x. P(x)&Q(x)) --> (EX x. P(x))  &  (EX x. Q(x))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Basic test of quantifier reasoning *}
+text \<open>Basic test of quantifier reasoning\<close>
 
--- {* TRUE *}
+-- \<open>TRUE\<close>
 lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -48,7 +48,7 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* The following should fail, as they are false! *}
+text \<open>The following should fail, as they are false!\<close>
 
 lemma "(ALL x. EX y. Q(x,y))  -->  (EX y. ALL x. Q(x,y))"
   apply (tactic "IntPr.fast_tac @{context} 1")?
@@ -67,12 +67,12 @@
   oops
 
 
-text {* Back to things that are provable \dots *}
+text \<open>Back to things that are provable \dots\<close>
 
 lemma "(ALL x. P(x)-->Q(x)) & (EX x. P(x)) --> (EX x. Q(x))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
--- {* An example of why exI should be delayed as long as possible *}
+-- \<open>An example of why exI should be delayed as long as possible\<close>
 lemma "(P --> (EX x. Q(x))) & P --> (EX x. Q(x))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
@@ -83,9 +83,9 @@
   by (tactic "IntPr.fast_tac @{context} 1")
 
 
-text {* Some slow ones *}
+text \<open>Some slow ones\<close>
 
--- {* Principia Mathematica *11.53 *}
+-- \<open>Principia Mathematica *11.53\<close>
 lemma "(ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))"
   by (tactic "IntPr.fast_tac @{context} 1")
 
--- a/src/FOLP/FOLP.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/FOLP.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Classical First-Order Logic with Proofs *}
+section \<open>Classical First-Order Logic with Proofs\<close>
 
 theory FOLP
 imports IFOLP
@@ -56,8 +56,8 @@
     and r2: "!!y. y:Q ==> g(y):R"
   shows "?p : R"
   apply (rule excluded_middle [THEN disjE])
-   apply (tactic {* DEPTH_SOLVE (assume_tac @{context} 1 ORELSE
-       resolve_tac @{context} [@{thm r1}, @{thm r2}, @{thm major} RS @{thm mp}] 1) *})
+   apply (tactic \<open>DEPTH_SOLVE (assume_tac @{context} 1 ORELSE
+       resolve_tac @{context} [@{thm r1}, @{thm r2}, @{thm major} RS @{thm mp}] 1)\<close>)
   done
 
 (*Double negation law*)
@@ -80,10 +80,10 @@
   apply (insert major)
   apply (unfold iff_def)
   apply (rule conjE)
-  apply (tactic {* DEPTH_SOLVE_1 (eresolve_tac @{context} @{thms impCE} 1 ORELSE
+  apply (tactic \<open>DEPTH_SOLVE_1 (eresolve_tac @{context} @{thms impCE} 1 ORELSE
       eresolve_tac @{context} [@{thm notE}, @{thm impE}] 1 THEN assume_tac @{context} 1 ORELSE
       assume_tac @{context} 1 ORELSE
-      resolve_tac @{context} [@{thm r1}, @{thm r2}] 1) *})+
+      resolve_tac @{context} [@{thm r1}, @{thm r2}] 1)\<close>)+
   done
 
 
@@ -101,7 +101,7 @@
 ML_file "classical.ML"      (* Patched because matching won't instantiate proof *)
 ML_file "simp.ML"           (* Patched because matching won't instantiate proof *)
 
-ML {*
+ML \<open>
 structure Cla = Classical
 (
   val sizef = size_of_thm
@@ -128,14 +128,14 @@
 val FOLP_dup_cs =
   prop_cs addSIs [@{thm allI}] addIs [@{thm exCI}, @{thm ex1I}]
     addSEs [@{thm exE}, @{thm ex1E}] addEs [@{thm all_dupE}];
-*}
+\<close>
 
 schematic_lemma cla_rews:
   "?p1 : P | ~P"
   "?p2 : ~P | P"
   "?p3 : ~ ~ P <-> P"
   "?p4 : (~P --> P) <-> P"
-  apply (tactic {* ALLGOALS (Cla.fast_tac @{context} FOLP_cs) *})
+  apply (tactic \<open>ALLGOALS (Cla.fast_tac @{context} FOLP_cs)\<close>)
   done
 
 ML_file "simpdata.ML"
--- a/src/FOLP/IFOLP.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/IFOLP.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Intuitionistic First-Order Logic with Proofs *}
+section \<open>Intuitionistic First-Order Logic with Proofs\<close>
 
 theory IFOLP
 imports Pure
@@ -64,21 +64,21 @@
 
 syntax "_Proof" :: "[p,o]=>prop"    ("(_ /: _)" [51, 10] 5)
 
-parse_translation {*
+parse_translation \<open>
   let fun proof_tr [p, P] = Const (@{const_syntax Proof}, dummyT) $ P $ p
   in [(@{syntax_const "_Proof"}, K proof_tr)] end
-*}
+\<close>
 
 (*show_proofs = true displays the proof terms -- they are ENORMOUS*)
-ML {* val show_proofs = Attrib.setup_config_bool @{binding show_proofs} (K false) *}
+ML \<open>val show_proofs = Attrib.setup_config_bool @{binding show_proofs} (K false)\<close>
 
-print_translation {*
+print_translation \<open>
   let
     fun proof_tr' ctxt [P, p] =
       if Config.get ctxt show_proofs then Const (@{syntax_const "_Proof"}, dummyT) $ p $ P
       else P
   in [(@{const_syntax Proof}, proof_tr')] end
-*}
+\<close>
 
 
 (**** Propositional logic ****)
@@ -248,7 +248,7 @@
     Fails unless one assumption is equal and exactly one is unifiable
 **)
 
-ML {*
+ML \<open>
 local
   fun discard_proof (Const (@{const_name Proof}, _) $ P $ _) = P;
 in
@@ -265,23 +265,23 @@
           else no_tac
       end);
 end;
-*}
+\<close>
 
 
 (*** Modus Ponens Tactics ***)
 
 (*Finds P-->Q and P in the assumptions, replaces implication by Q *)
-ML {*
+ML \<open>
   fun mp_tac ctxt i =
     eresolve_tac ctxt [@{thm notE}, make_elim @{thm mp}] i  THEN  assume_tac ctxt i
-*}
+\<close>
 method_setup mp = \<open>Scan.succeed (SIMPLE_METHOD' o mp_tac)\<close>
 
 (*Like mp_tac but instantiates no variables*)
-ML {*
+ML \<open>
   fun int_uniq_mp_tac ctxt i =
     eresolve_tac ctxt [@{thm notE}, @{thm impE}] i  THEN  uniq_assume_tac ctxt i
-*}
+\<close>
 
 
 (*** If-and-only-if ***)
@@ -360,11 +360,11 @@
 (*** <-> congruence rules for simplification ***)
 
 (*Use iffE on a premise.  For conj_cong, imp_cong, all_cong, ex_cong*)
-ML {*
+ML \<open>
 fun iff_tac ctxt prems i =
     resolve_tac ctxt (prems RL [@{thm iffE}]) i THEN
     REPEAT1 (eresolve_tac ctxt [asm_rl, @{thm mp}] i)
-*}
+\<close>
 
 method_setup iff =
   \<open>Attrib.thms >> (fn prems => fn ctxt => SIMPLE_METHOD' (iff_tac ctxt prems))\<close>
@@ -503,20 +503,20 @@
 
 schematic_lemma pred1_cong: "p:a=a' ==> ?p:P(a) <-> P(a')"
   apply (rule iffI)
-   apply (tactic {*
-     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1) *})
+   apply (tactic \<open>
+     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1)\<close>)
   done
 
 schematic_lemma pred2_cong: "[| p:a=a';  q:b=b' |] ==> ?p:P(a,b) <-> P(a',b')"
   apply (rule iffI)
-   apply (tactic {*
-     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1) *})
+   apply (tactic \<open>
+     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1)\<close>)
   done
 
 schematic_lemma pred3_cong: "[| p:a=a';  q:b=b';  r:c=c' |] ==> ?p:P(a,b,c) <-> P(a',b',c')"
   apply (rule iffI)
-   apply (tactic {*
-     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1) *})
+   apply (tactic \<open>
+     DEPTH_SOLVE (assume_tac @{context} 1 ORELSE eresolve_tac @{context} [@{thm subst}, @{thm ssubst}] 1)\<close>)
   done
 
 lemmas pred_congs = pred1_cong pred2_cong pred3_cong
@@ -543,9 +543,9 @@
   assumes major: "p:(P|Q)-->S"
     and minor: "!!x y.[| x:P-->S; y:Q-->S |] ==> q(x,y):R"
   shows "?p:R"
-  apply (tactic {* DEPTH_SOLVE (assume_tac @{context} 1 ORELSE
+  apply (tactic \<open>DEPTH_SOLVE (assume_tac @{context} 1 ORELSE
       resolve_tac @{context} [@{thm disjI1}, @{thm disjI2}, @{thm impI},
-        @{thm major} RS @{thm mp}, @{thm minor}] 1) *})
+        @{thm major} RS @{thm mp}, @{thm minor}] 1)\<close>)
   done
 
 (*Simplifies the implication.  Classical version is stronger.
@@ -607,7 +607,7 @@
 
 ML_file "hypsubst.ML"
 
-ML {*
+ML \<open>
 structure Hypsubst = Hypsubst
 (
   (*Take apart an equality judgement; otherwise raise Match!*)
@@ -625,7 +625,7 @@
   val thin_refl = @{thm thin_refl}
 );
 open Hypsubst;
-*}
+\<close>
 
 ML_file "intprover.ML"
 
@@ -641,7 +641,7 @@
   "?p6 : P & ~P <-> False"
   "?p7 : ~P & P <-> False"
   "?p8 : (P & Q) & R <-> P & (Q & R)"
-  apply (tactic {* fn st => IntPr.fast_tac @{context} 1 st *})+
+  apply (tactic \<open>fn st => IntPr.fast_tac @{context} 1 st\<close>)+
   done
 
 schematic_lemma disj_rews:
@@ -651,13 +651,13 @@
   "?p4 : False | P <-> P"
   "?p5 : P | P <-> P"
   "?p6 : (P | Q) | R <-> P | (Q | R)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 schematic_lemma not_rews:
   "?p1 : ~ False <-> True"
   "?p2 : ~ True <-> False"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 schematic_lemma imp_rews:
@@ -667,7 +667,7 @@
   "?p4 : (True --> P) <-> P"
   "?p5 : (P --> P) <-> True"
   "?p6 : (P --> ~P) <-> ~P"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 schematic_lemma iff_rews:
@@ -676,13 +676,13 @@
   "?p3 : (P <-> P) <-> True"
   "?p4 : (False <-> P) <-> ~P"
   "?p5 : (P <-> False) <-> ~P"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 schematic_lemma quant_rews:
   "?p1 : (ALL x. P) <-> P"
   "?p2 : (EX x. P) <-> P"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 (*These are NOT supplied by default!*)
@@ -691,7 +691,7 @@
   "?p2 : P & (Q | R) <-> P&Q | P&R"
   "?p3 : (Q | R) & P <-> Q&P | R&P"
   "?p4 : (P | Q --> R) <-> (P --> R) & (Q --> R)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 schematic_lemma distrib_rews2:
@@ -699,17 +699,17 @@
   "?p2 : ((EX x. NORM(P(x))) --> Q) <-> (ALL x. NORM(P(x)) --> Q)"
   "?p3 : (EX x. NORM(P(x))) & NORM(Q) <-> (EX x. NORM(P(x)) & NORM(Q))"
   "?p4 : NORM(Q) & (EX x. NORM(P(x))) <-> (EX x. NORM(Q) & NORM(P(x)))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})+
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)+
   done
 
 lemmas distrib_rews = distrib_rews1 distrib_rews2
 
 schematic_lemma P_Imp_P_iff_T: "p:P ==> ?p:(P <-> True)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
   done
 
 schematic_lemma not_P_imp_P_iff_F: "p:~P ==> ?p:(P <-> False)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
   done
 
 end
--- a/src/FOLP/ex/Classical.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Classical.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -288,7 +288,7 @@
 text "Problem 58  NOT PROVED AUTOMATICALLY"
 schematic_lemma "?p : (ALL x y. f(x)=g(y)) --> (ALL x y. f(f(x))=f(g(y)))"
   supply f_cong = subst_context [where t = f]
-  by (tactic {* fast_tac @{context} (FOLP_cs addSIs [@{thm f_cong}]) 1 *})
+  by (tactic \<open>fast_tac @{context} (FOLP_cs addSIs [@{thm f_cong}]) 1\<close>)
 
 text "Problem 59"
 schematic_lemma "?p : (ALL x. P(x) <-> ~P(f(x))) --> (EX x. P(x) & ~P(f(x)))"
--- a/src/FOLP/ex/Foundation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Foundation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -18,7 +18,7 @@
 apply assumption
 done
 
-text {*A form of conj-elimination*}
+text \<open>A form of conj-elimination\<close>
 schematic_lemma
   assumes "p : A & B"
     and "!!x y. x : A ==> y : B ==> f(x, y) : C"
@@ -99,7 +99,7 @@
 apply (rule refl)?
 oops
 
-text {* Parallel lifting example. *}
+text \<open>Parallel lifting example.\<close>
 schematic_lemma "?p : EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w)"
 apply (rule exI allI)
 apply (rule exI allI)
@@ -121,7 +121,7 @@
 apply assumption
 done
 
-text {* A bigger demonstration of quantifiers -- not in the paper. *}
+text \<open>A bigger demonstration of quantifiers -- not in the paper.\<close>
 schematic_lemma "?p : (EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
 apply (rule impI)
 apply (rule allI)
--- a/src/FOLP/ex/If.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/If.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
   assumes "!!x. x : P ==> f(x) : Q"  "!!x. x : ~P ==> g(x) : R"
   shows "?p : if(P,Q,R)"
 apply (unfold if_def)
-apply (tactic {* fast_tac @{context} (FOLP_cs addIs @{thms assms}) 1 *})
+apply (tactic \<open>fast_tac @{context} (FOLP_cs addIs @{thms assms}) 1\<close>)
 done
 
 schematic_lemma ifE:
@@ -19,7 +19,7 @@
   shows "?p : S"
 apply (insert 1)
 apply (unfold if_def)
-apply (tactic {* fast_tac @{context} (FOLP_cs addIs [@{thm 2}, @{thm 3}]) 1 *})
+apply (tactic \<open>fast_tac @{context} (FOLP_cs addIs [@{thm 2}, @{thm 3}]) 1\<close>)
 done
 
 schematic_lemma if_commute: "?p : if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
@@ -30,14 +30,14 @@
 apply (rule ifI)
 oops
 
-ML {* val if_cs = FOLP_cs addSIs [@{thm ifI}] addSEs [@{thm ifE}] *}
+ML \<open>val if_cs = FOLP_cs addSIs [@{thm ifI}] addSEs [@{thm ifE}]\<close>
 
 schematic_lemma if_commute: "?p : if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
-apply (tactic {* fast_tac @{context} if_cs 1 *})
+apply (tactic \<open>fast_tac @{context} if_cs 1\<close>)
 done
 
 schematic_lemma nested_ifs: "?p : if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
-apply (tactic {* fast_tac @{context} if_cs 1 *})
+apply (tactic \<open>fast_tac @{context} if_cs 1\<close>)
 done
 
 end
--- a/src/FOLP/ex/Intro.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Intro.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,13 +5,13 @@
 Derives some inference rules, illustrating the use of definitions.
 *)
 
-section {* Examples for the manual ``Introduction to Isabelle'' *}
+section \<open>Examples for the manual ``Introduction to Isabelle''\<close>
 
 theory Intro
 imports FOLP
 begin
 
-subsubsection {* Some simple backward proofs *}
+subsubsection \<open>Some simple backward proofs\<close>
 
 schematic_lemma mythm: "?p : P|P --> P"
 apply (rule impI)
@@ -41,21 +41,21 @@
 done
 
 
-subsubsection {* Demonstration of @{text "fast"} *}
+subsubsection \<open>Demonstration of @{text "fast"}\<close>
 
 schematic_lemma "?p : (EX y. ALL x. J(y,x) <-> ~J(x,x))
         -->  ~ (ALL x. EX y. ALL z. J(z,y) <-> ~ J(z,x))"
-apply (tactic {* fast_tac @{context} FOLP_cs 1 *})
+apply (tactic \<open>fast_tac @{context} FOLP_cs 1\<close>)
 done
 
 
 schematic_lemma "?p : ALL x. P(x,f(x)) <->
         (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))"
-apply (tactic {* fast_tac @{context} FOLP_cs 1 *})
+apply (tactic \<open>fast_tac @{context} FOLP_cs 1\<close>)
 done
 
 
-subsubsection {* Derivation of conjunction elimination rule *}
+subsubsection \<open>Derivation of conjunction elimination rule\<close>
 
 schematic_lemma
   assumes major: "p : P&Q"
@@ -67,9 +67,9 @@
 done
 
 
-subsection {* Derived rules involving definitions *}
+subsection \<open>Derived rules involving definitions\<close>
 
-text {* Derivation of negation introduction *}
+text \<open>Derivation of negation introduction\<close>
 
 schematic_lemma
   assumes "!!x. x : P ==> f(x) : False"
@@ -90,7 +90,7 @@
 apply (rule minor)
 done
 
-text {* Alternative proof of the result above *}
+text \<open>Alternative proof of the result above\<close>
 schematic_lemma
   assumes major: "p : ~P"
     and minor: "q : P"
--- a/src/FOLP/ex/Intuitionistic.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Intuitionistic.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -31,167 +31,167 @@
 begin
 
 schematic_lemma "?p : ~~(P&Q) <-> ~~P & ~~Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ~~~P <-> ~P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ~~((P --> Q | R)  -->  (P-->Q) | (P-->R))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P<->Q) <-> (Q<->P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-subsection {* Lemmas for the propositional double-negation translation *}
+subsection \<open>Lemmas for the propositional double-negation translation\<close>
 
 schematic_lemma "?p : P --> ~~P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ~~(~~P --> P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ~~P & ~~(P --> Q) --> ~~Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
-subsection {* The following are classically but not constructively valid *}
+subsection \<open>The following are classically but not constructively valid\<close>
 
 (*The attempt to prove them terminates quickly!*)
 schematic_lemma "?p : ((P-->Q) --> P)  -->  P"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (P&Q-->R)  -->  (P-->R) | (Q-->R)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 
-subsection {* Intuitionistic FOL: propositional problems based on Pelletier *}
+subsection \<open>Intuitionistic FOL: propositional problems based on Pelletier\<close>
 
 text "Problem ~~1"
 schematic_lemma "?p : ~~((P-->Q)  <->  (~Q --> ~P))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~2"
 schematic_lemma "?p : ~~(~~P  <->  P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 3"
 schematic_lemma "?p : ~(P-->Q) --> (Q-->P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~4"
 schematic_lemma "?p : ~~((~P-->Q)  <->  (~Q --> P))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~5"
 schematic_lemma "?p : ~~((P|Q-->P|R) --> P|(Q-->R))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~6"
 schematic_lemma "?p : ~~(P | ~P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~7"
 schematic_lemma "?p : ~~(P | ~~~P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~8.  Peirce's law"
 schematic_lemma "?p : ~~(((P-->Q) --> P)  -->  P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 9"
 schematic_lemma "?p : ((P|Q) & (~P|Q) & (P| ~Q)) --> ~ (~P | ~Q)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 10"
 schematic_lemma "?p : (Q-->R) --> (R-->P&Q) --> (P-->(Q|R)) --> (P<->Q)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "11.  Proved in each direction (incorrectly, says Pelletier!!) "
 schematic_lemma "?p : P<->P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~12.  Dijkstra's law  "
 schematic_lemma "?p : ~~(((P <-> Q) <-> R)  <->  (P <-> (Q <-> R)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ((P <-> Q) <-> R)  -->  ~~(P <-> (Q <-> R))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 13.  Distributive law"
 schematic_lemma "?p : P | (Q & R)  <-> (P | Q) & (P | R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~14"
 schematic_lemma "?p : ~~((P <-> Q) <-> ((Q | ~P) & (~Q|P)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~15"
 schematic_lemma "?p : ~~((P --> Q) <-> (~P | Q))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~16"
 schematic_lemma "?p : ~~((P-->Q) | (Q-->P))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~17"
 schematic_lemma "?p : ~~(((P & (Q-->R))-->S) <-> ((~P | Q | S) & (~P | ~R | S)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})  -- slow
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)  -- slow
 
 
-subsection {* Examples with quantifiers *}
+subsection \<open>Examples with quantifiers\<close>
 
 text "The converse is classical in the following implications..."
 
 schematic_lemma "?p : (EX x. P(x)-->Q)  -->  (ALL x. P(x)) --> Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ((ALL x. P(x))-->Q) --> ~ (ALL x. P(x) & ~Q)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ((ALL x. ~P(x))-->Q)  -->  ~ (ALL x. ~ (P(x)|Q))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (ALL x. P(x)) | Q  -->  (ALL x. P(x) | Q)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (EX x. P --> Q(x)) --> (P --> (EX x. Q(x)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "The following are not constructively valid!"
 text "The attempt to prove them terminates quickly!"
 
 schematic_lemma "?p : ((ALL x. P(x))-->Q) --> (EX x. P(x)-->Q)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (P --> (EX x. Q(x))) --> (EX x. P-->Q(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (ALL x. P(x) | Q) --> ((ALL x. P(x)) | Q)"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (ALL x. ~~P(x)) --> ~~(ALL x. P(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 (*Classically but not intuitionistically valid.  Proved by a bug in 1986!*)
 schematic_lemma "?p : EX x. Q(x) --> (ALL x. Q(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 
 subsection "Hard examples with quantifiers"
 
-text {*
+text \<open>
   The ones that have not been proved are not known to be valid!
   Some will require quantifier duplication -- not currently available.
-*}
+\<close>
 
 text "Problem ~~18"
 schematic_lemma "?p : ~~(EX y. ALL x. P(y)-->P(x))" oops
@@ -204,7 +204,7 @@
 text "Problem 20"
 schematic_lemma "?p : (ALL x y. EX z. ALL w. (P(x)&Q(y)-->R(z)&S(w)))      
     --> (EX x y. P(x) & Q(y)) --> (EX z. R(z))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 21"
 schematic_lemma "?p : (EX x. P-->Q(x)) & (EX x. Q(x)-->P) --> ~~(EX x. P<->Q(x))" oops
@@ -212,11 +212,11 @@
 
 text "Problem 22"
 schematic_lemma "?p : (ALL x. P <-> Q(x))  -->  (P <-> (ALL x. Q(x)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem ~~23"
 schematic_lemma "?p : ~~ ((ALL x. P | Q(x))  <->  (P | (ALL x. Q(x))))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 text "Problem 24"
 schematic_lemma "?p : ~(EX x. S(x)&Q(x)) & (ALL x. P(x) --> Q(x)|R(x)) &   
@@ -287,7 +287,7 @@
 schematic_lemma
     "?p : (EX z w. ALL x y. P(x,y) <->  (x=z & y=w)) -->   
      (EX z. ALL x. EX w. (ALL y. P(x,y) <-> y=w) <-> x=z)"
-  by (tactic "IntPr.best_tac @{context} 1") -- {*60 seconds*}
+  by (tactic "IntPr.best_tac @{context} 1") -- \<open>60 seconds\<close>
 
 text "Problem 56"
 schematic_lemma "?p : (ALL x. (EX y. P(y) & x=f(y)) --> P(x)) <-> (ALL x. P(x) --> P(f(x)))"
--- a/src/FOLP/ex/Nat.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Nat.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Theory of the natural numbers: Peano's axioms, primitive recursion *}
+section \<open>Theory of the natural numbers: Peano's axioms, primitive recursion\<close>
 
 theory Nat
 imports FOLP
@@ -38,7 +38,7 @@
   where "m + n == rec(m, n, %x y. Suc(y))"
 
 
-subsection {* Proofs about the natural numbers *}
+subsection \<open>Proofs about the natural numbers\<close>
 
 schematic_lemma Suc_n_not_n: "?p : ~ (Suc(k) = k)"
 apply (rule_tac n = k in induct)
@@ -81,27 +81,27 @@
 
 lemmas nat_congs = Suc_cong Plus_cong
 
-ML {*
+ML \<open>
   val add_ss =
     FOLP_ss addcongs @{thms nat_congs}
     |> fold (addrew @{context}) @{thms add_0 add_Suc}
-*}
+\<close>
 
 schematic_lemma add_assoc: "?p : (k+m)+n = k+(m+n)"
 apply (rule_tac n = k in induct)
-apply (tactic {* SIMP_TAC @{context} add_ss 1 *})
-apply (tactic {* ASM_SIMP_TAC @{context} add_ss 1 *})
+apply (tactic \<open>SIMP_TAC @{context} add_ss 1\<close>)
+apply (tactic \<open>ASM_SIMP_TAC @{context} add_ss 1\<close>)
 done
 
 schematic_lemma add_0_right: "?p : m+0 = m"
 apply (rule_tac n = m in induct)
-apply (tactic {* SIMP_TAC @{context} add_ss 1 *})
-apply (tactic {* ASM_SIMP_TAC @{context} add_ss 1 *})
+apply (tactic \<open>SIMP_TAC @{context} add_ss 1\<close>)
+apply (tactic \<open>ASM_SIMP_TAC @{context} add_ss 1\<close>)
 done
 
 schematic_lemma add_Suc_right: "?p : m+Suc(n) = Suc(m+n)"
 apply (rule_tac n = m in induct)
-apply (tactic {* ALLGOALS (ASM_SIMP_TAC @{context} add_ss) *})
+apply (tactic \<open>ALLGOALS (ASM_SIMP_TAC @{context} add_ss)\<close>)
 done
 
 (*mk_typed_congs appears not to work with FOLP's version of subst*)
--- a/src/FOLP/ex/Propositional_Cla.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Propositional_Cla.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: propositional examples *}
+section \<open>First-Order Logic: propositional examples\<close>
 
 theory Propositional_Cla
 imports FOLP
@@ -12,106 +12,106 @@
 
 text "commutative laws of & and | "
 schematic_lemma "?p : P & Q  -->  Q & P"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : P | Q  -->  Q | P"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "associative laws of & and | "
 schematic_lemma "?p : (P & Q) & R  -->  P & (Q & R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P | Q) | R  -->  P | (Q | R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "distributive laws of & and | "
 schematic_lemma "?p : (P & Q) | R  --> (P | R) & (Q | R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P | R) & (Q | R)  --> (P & Q) | R"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P | Q) & R  --> (P & R) | (Q & R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 schematic_lemma "?p : (P & R) | (Q & R)  --> (P | Q) & R"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Laws involving implication"
 
 schematic_lemma "?p : (P-->R) & (Q-->R) <-> (P|Q --> R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P & Q --> R) <-> (P--> (Q-->R))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : ((P-->R)-->R) --> ((Q-->R)-->R) --> (P&Q-->R) --> R"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : ~(P-->R) --> ~(Q-->R) --> ~(P&Q-->R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P --> Q & R) <-> (P-->Q)  &  (P-->R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Propositions-as-types"
 
 (*The combinator K*)
 schematic_lemma "?p : P --> (Q --> P)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 (*The combinator S*)
 schematic_lemma "?p : (P-->Q-->R)  --> (P-->Q) --> (P-->R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 (*Converse is classical*)
 schematic_lemma "?p : (P-->Q) | (P-->R)  -->  (P --> Q | R)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (P-->Q)  -->  (~Q --> ~P)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Schwichtenberg's examples (via T. Nipkow)"
 
 schematic_lemma stab_imp: "?p : (((Q-->R)-->R)-->Q) --> (((P-->Q)-->R)-->R)-->P-->Q"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma stab_to_peirce: "?p : (((P --> R) --> R) --> P) --> (((Q --> R) --> R) --> Q)  
               --> ((P --> Q) --> P) --> P"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma peirce_imp1: "?p : (((Q --> R) --> Q) --> Q)  
                --> (((P --> Q) --> R) --> P --> Q) --> P --> Q"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
   
 schematic_lemma peirce_imp2: "?p : (((P --> R) --> P) --> P) --> ((P --> Q --> R) --> P) --> P"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma mints: "?p : ((((P --> Q) --> P) --> P) --> Q) --> Q"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma mints_solovev: "?p : (P --> (Q --> R) --> Q) --> ((P --> Q) --> R) --> R"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma tatsuta: "?p : (((P7 --> P1) --> P10) --> P4 --> P5)  
           --> (((P8 --> P2) --> P9) --> P3 --> P10)  
           --> (P1 --> P8) --> P6 --> P7  
           --> (((P3 --> P2) --> P9) --> P4)  
           --> (P1 --> P3) --> (((P6 --> P1) --> P2) --> P9) --> P5"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma tatsuta1: "?p : (((P8 --> P2) --> P9) --> P3 --> P10)  
      --> (((P3 --> P2) --> P9) --> P4)  
      --> (((P6 --> P1) --> P2) --> P9)  
      --> (((P7 --> P1) --> P10) --> P4 --> P5)  
      --> (P1 --> P3) --> (P1 --> P8) --> P6 --> P7 --> P5"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 end
--- a/src/FOLP/ex/Propositional_Int.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Propositional_Int.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1991  University of Cambridge
 *)
 
-section {* First-Order Logic: propositional examples *}
+section \<open>First-Order Logic: propositional examples\<close>
 
 theory Propositional_Int
 imports IFOLP
@@ -12,106 +12,106 @@
 
 text "commutative laws of & and | "
 schematic_lemma "?p : P & Q  -->  Q & P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : P | Q  -->  Q | P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "associative laws of & and | "
 schematic_lemma "?p : (P & Q) & R  -->  P & (Q & R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P | Q) | R  -->  P | (Q | R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "distributive laws of & and | "
 schematic_lemma "?p : (P & Q) | R  --> (P | R) & (Q | R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P | R) & (Q | R)  --> (P & Q) | R"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P | Q) & R  --> (P & R) | (Q & R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 schematic_lemma "?p : (P & R) | (Q & R)  --> (P | Q) & R"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Laws involving implication"
 
 schematic_lemma "?p : (P-->R) & (Q-->R) <-> (P|Q --> R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P & Q --> R) <-> (P--> (Q-->R))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ((P-->R)-->R) --> ((Q-->R)-->R) --> (P&Q-->R) --> R"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : ~(P-->R) --> ~(Q-->R) --> ~(P&Q-->R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P --> Q & R) <-> (P-->Q)  &  (P-->R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Propositions-as-types"
 
 (*The combinator K*)
 schematic_lemma "?p : P --> (Q --> P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 (*The combinator S*)
 schematic_lemma "?p : (P-->Q-->R)  --> (P-->Q) --> (P-->R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 (*Converse is classical*)
 schematic_lemma "?p : (P-->Q) | (P-->R)  -->  (P --> Q | R)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (P-->Q)  -->  (~Q --> ~P)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Schwichtenberg's examples (via T. Nipkow)"
 
 schematic_lemma stab_imp: "?p : (((Q-->R)-->R)-->Q) --> (((P-->Q)-->R)-->R)-->P-->Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma stab_to_peirce: "?p : (((P --> R) --> R) --> P) --> (((Q --> R) --> R) --> Q)  
               --> ((P --> Q) --> P) --> P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma peirce_imp1: "?p : (((Q --> R) --> Q) --> Q)  
                --> (((P --> Q) --> R) --> P --> Q) --> P --> Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
   
 schematic_lemma peirce_imp2: "?p : (((P --> R) --> P) --> P) --> ((P --> Q --> R) --> P) --> P"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma mints: "?p : ((((P --> Q) --> P) --> P) --> Q) --> Q"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma mints_solovev: "?p : (P --> (Q --> R) --> Q) --> ((P --> Q) --> R) --> R"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma tatsuta: "?p : (((P7 --> P1) --> P10) --> P4 --> P5)  
           --> (((P8 --> P2) --> P9) --> P3 --> P10)  
           --> (P1 --> P8) --> P6 --> P7  
           --> (((P3 --> P2) --> P9) --> P4)  
           --> (P1 --> P3) --> (((P6 --> P1) --> P2) --> P9) --> P5"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma tatsuta1: "?p : (((P8 --> P2) --> P9) --> P3 --> P10)  
      --> (((P3 --> P2) --> P9) --> P4)  
      --> (((P6 --> P1) --> P2) --> P9)  
      --> (((P7 --> P1) --> P10) --> P4 --> P5)  
      --> (P1 --> P3) --> (P1 --> P8) --> P6 --> P7 --> P5"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 end
--- a/src/FOLP/ex/Quantifiers_Cla.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Quantifiers_Cla.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -11,91 +11,91 @@
 begin
 
 schematic_lemma "?p : (ALL x y. P(x,y))  -->  (ALL y x. P(x,y))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (EX x y. P(x,y)) --> (EX y x. P(x,y))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 (*Converse is false*)
 schematic_lemma "?p : (ALL x. P(x)) | (ALL x. Q(x)) --> (ALL x. P(x) | Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (ALL x. P-->Q(x))  <->  (P--> (ALL x. Q(x)))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 schematic_lemma "?p : (ALL x. P(x)-->Q)  <->  ((EX x. P(x)) --> Q)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Some harder ones"
 
 schematic_lemma "?p : (EX x. P(x) | Q(x)) <-> (EX x. P(x)) | (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 (*Converse is false*)
 schematic_lemma "?p : (EX x. P(x)&Q(x)) --> (EX x. P(x))  &  (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Basic test of quantifier reasoning"
 (*TRUE*)
 schematic_lemma "?p : (EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (ALL x. Q(x))  -->  (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "The following should fail, as they are false!"
 
 schematic_lemma "?p : (ALL x. EX y. Q(x,y))  -->  (EX y. ALL x. Q(x,y))"
-  apply (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})?
+  apply (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)?
   oops
 
 schematic_lemma "?p : (EX x. Q(x))  -->  (ALL x. Q(x))"
-  apply (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})?
+  apply (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)?
   oops
 
 schematic_lemma "?p : P(?a) --> (ALL x. P(x))"
-  apply (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})?
+  apply (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)?
   oops
 
 schematic_lemma "?p : (P(?a) --> (ALL x. Q(x))) --> (ALL x. P(x) --> Q(x))"
-  apply (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})?
+  apply (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)?
   oops
 
 
 text "Back to things that are provable..."
 
 schematic_lemma "?p : (ALL x. P(x)-->Q(x)) & (EX x. P(x)) --> (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 (*An example of why exI should be delayed as long as possible*)
 schematic_lemma "?p : (P --> (EX x. Q(x))) & P --> (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (ALL x. P(x)-->Q(f(x))) & (ALL x. Q(x)-->R(g(x))) & P(d) --> R(?a)"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 schematic_lemma "?p : (ALL x. Q(x))  -->  (EX x. Q(x))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 
 text "Some slow ones"
 
 (*Principia Mathematica *11.53  *)
 schematic_lemma "?p : (ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 (*Principia Mathematica *11.55  *)
 schematic_lemma "?p : (EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 (*Principia Mathematica *11.61  *)
 schematic_lemma "?p : (EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))"
-  by (tactic {* Cla.fast_tac @{context} FOLP_cs 1 *})
+  by (tactic \<open>Cla.fast_tac @{context} FOLP_cs 1\<close>)
 
 end
--- a/src/FOLP/ex/Quantifiers_Int.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/FOLP/ex/Quantifiers_Int.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -11,91 +11,91 @@
 begin
 
 schematic_lemma "?p : (ALL x y. P(x,y))  -->  (ALL y x. P(x,y))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (EX x y. P(x,y)) --> (EX y x. P(x,y))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 (*Converse is false*)
 schematic_lemma "?p : (ALL x. P(x)) | (ALL x. Q(x)) --> (ALL x. P(x) | Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (ALL x. P-->Q(x))  <->  (P--> (ALL x. Q(x)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 schematic_lemma "?p : (ALL x. P(x)-->Q)  <->  ((EX x. P(x)) --> Q)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Some harder ones"
 
 schematic_lemma "?p : (EX x. P(x) | Q(x)) <-> (EX x. P(x)) | (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 (*Converse is false*)
 schematic_lemma "?p : (EX x. P(x)&Q(x)) --> (EX x. P(x))  &  (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Basic test of quantifier reasoning"
 (*TRUE*)
 schematic_lemma "?p : (EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (ALL x. Q(x))  -->  (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "The following should fail, as they are false!"
 
 schematic_lemma "?p : (ALL x. EX y. Q(x,y))  -->  (EX y. ALL x. Q(x,y))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (EX x. Q(x))  -->  (ALL x. Q(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : P(?a) --> (ALL x. P(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 schematic_lemma "?p : (P(?a) --> (ALL x. Q(x))) --> (ALL x. P(x) --> Q(x))"
-  apply (tactic {* IntPr.fast_tac @{context} 1 *})?
+  apply (tactic \<open>IntPr.fast_tac @{context} 1\<close>)?
   oops
 
 
 text "Back to things that are provable..."
 
 schematic_lemma "?p : (ALL x. P(x)-->Q(x)) & (EX x. P(x)) --> (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 (*An example of why exI should be delayed as long as possible*)
 schematic_lemma "?p : (P --> (EX x. Q(x))) & P --> (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (ALL x. P(x)-->Q(f(x))) & (ALL x. Q(x)-->R(g(x))) & P(d) --> R(?a)"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 schematic_lemma "?p : (ALL x. Q(x))  -->  (EX x. Q(x))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 
 text "Some slow ones"
 
 (*Principia Mathematica *11.53  *)
 schematic_lemma "?p : (ALL x y. P(x) --> Q(y)) <-> ((EX x. P(x)) --> (ALL y. Q(y)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 (*Principia Mathematica *11.55  *)
 schematic_lemma "?p : (EX x y. P(x) & Q(x,y)) <-> (EX x. P(x) & (EX y. Q(x,y)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 (*Principia Mathematica *11.61  *)
 schematic_lemma "?p : (EX y. ALL x. P(x) --> Q(x,y)) --> (ALL x. P(x) --> (EX y. Q(x,y)))"
-  by (tactic {* IntPr.fast_tac @{context} 1 *})
+  by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
 
 end
--- a/src/LCF/LCF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/LCF/LCF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,15 +3,15 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* LCF on top of First-Order Logic *}
+section \<open>LCF on top of First-Order Logic\<close>
 
 theory LCF
 imports "~~/src/FOL/FOL"
 begin
 
-text {* This theory is based on Lawrence Paulson's book Logic and Computation. *}
+text \<open>This theory is based on Lawrence Paulson's book Logic and Computation.\<close>
 
-subsection {* Natural Deduction Rules for LCF *}
+subsection \<open>Natural Deduction Rules for LCF\<close>
 
 class cpo = "term"
 default_sort cpo
@@ -245,7 +245,7 @@
   surj_pairing FST SND
 
 
-subsection {* Ordered pairs and products *}
+subsection \<open>Ordered pairs and products\<close>
 
 lemma expand_all_PROD: "(\<forall>p. P(p)) \<longleftrightarrow> (\<forall>x y. P(<x,y>))"
   apply (rule iffI)
@@ -293,7 +293,7 @@
   done
 
 
-subsection {* Fixedpoint theory *}
+subsection \<open>Fixedpoint theory\<close>
 
 lemma adm_eq: "adm(\<lambda>x. t(x)=(u(x)::'a::cpo))"
   apply (unfold eq_def)
@@ -318,12 +318,12 @@
   adm_not_free adm_eq adm_less adm_not_less
   adm_not_eq_tr adm_conj adm_disj adm_imp adm_all
 
-method_setup induct = {*
+method_setup induct = \<open>
   Scan.lift Args.name_inner_syntax >> (fn v => fn ctxt =>
     SIMPLE_METHOD' (fn i =>
       Rule_Insts.res_inst_tac ctxt [((("f", 0), Position.none), v)] [] @{thm induct} i THEN
       REPEAT (resolve_tac ctxt @{thms adm_lemmas} i)))
-*}
+\<close>
 
 lemma least_FIX: "f(p) = p \<Longrightarrow> FIX(f) << p"
   apply (induct f)
@@ -378,11 +378,11 @@
   apply (rule 3)
   done
 
-ML {*
+ML \<open>
 fun induct2_tac ctxt (f, g) i =
   Rule_Insts.res_inst_tac ctxt
     [((("f", 0), Position.none), f), ((("g", 0), Position.none), g)] [] @{thm induct2} i THEN
   REPEAT(resolve_tac ctxt @{thms adm_lemmas} i)
-*}
+\<close>
 
 end
--- a/src/LCF/ex/Ex1.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/LCF/ex/Ex1.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,4 +1,4 @@
-section {*  Section 10.4 *}
+section \<open>Section 10.4\<close>
 
 theory Ex1
 imports "../LCF"
--- a/src/LCF/ex/Ex2.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/LCF/ex/Ex2.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,4 +1,4 @@
-section {* Example 3.8 *}
+section \<open>Example 3.8\<close>
 
 theory Ex2
 imports "../LCF"
--- a/src/LCF/ex/Ex3.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/LCF/ex/Ex3.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,4 +1,4 @@
-section {* Addition with fixpoint of successor *}
+section \<open>Addition with fixpoint of successor\<close>
 
 theory Ex3
 imports "../LCF"
--- a/src/LCF/ex/Ex4.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/LCF/ex/Ex4.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,5 +1,5 @@
 
-section {* Prefixpoints *}
+section \<open>Prefixpoints\<close>
 
 theory Ex4
 imports "../LCF"
--- a/src/Sequents/ILL.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/ILL.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -35,17 +35,17 @@
   "_Context"  :: "two_seqe"   ("((_)/ :=: (_))" [6,6] 5)
   "_PromAux"  :: "three_seqe" ("promaux {_||_||_}")
 
-parse_translation {*
+parse_translation \<open>
   [(@{syntax_const "_Trueprop"}, K (single_tr @{const_syntax Trueprop})),
    (@{syntax_const "_Context"}, K (two_seq_tr @{const_syntax Context})),
    (@{syntax_const "_PromAux"}, K (three_seq_tr @{const_syntax PromAux}))]
-*}
+\<close>
 
-print_translation {*
+print_translation \<open>
   [(@{const_syntax Trueprop}, K (single_tr' @{syntax_const "_Trueprop"})),
    (@{const_syntax Context}, K (two_seq_tr' @{syntax_const "_Context"})),
    (@{const_syntax PromAux}, K (three_seq_tr' @{syntax_const "_PromAux"}))]
-*}
+\<close>
 
 defs
 
@@ -271,7 +271,7 @@
   apply best
   done
 
-ML {*
+ML \<open>
   val safe_pack =
     @{context}
     |> fold_rev Cla.add_safe @{thms conj_lemma ll_mp contrad1
@@ -283,13 +283,13 @@
     Cla.put_pack safe_pack @{context}
     |> Cla.add_unsafe @{thm impr_contr_der}
     |> Cla.get_pack;
-*}
+\<close>
 
 method_setup best_safe =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack safe_pack ctxt))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack safe_pack ctxt)))\<close>
 
 method_setup best_power =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack power_pack ctxt))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack power_pack ctxt)))\<close>
 
 
 (* Some examples from Troelstra and van Dalen *)
--- a/src/Sequents/LK.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/LK.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -23,7 +23,7 @@
                ==> (P, $H |- $F) == (P', $H' |- $F')"
 
 
-subsection {* Rewrite rules *}
+subsection \<open>Rewrite rules\<close>
 
 lemma conj_simps:
   "|- P & True <-> P"
@@ -79,15 +79,15 @@
   by (fast add!: subst)+
 
 
-subsection {* Miniscoping: pushing quantifiers in *}
+subsection \<open>Miniscoping: pushing quantifiers in\<close>
 
-text {*
+text \<open>
   We do NOT distribute of ALL over &, or dually that of EX over |
   Baaz and Leitsch, On Skolemization and Proof Complexity (1994)
   show that this step can increase proof length!
-*}
+\<close>
 
-text {*existential miniscoping*}
+text \<open>existential miniscoping\<close>
 lemma ex_simps:
   "!!P Q. |- (EX x. P(x) & Q) <-> (EX x. P(x)) & Q"
   "!!P Q. |- (EX x. P & Q(x)) <-> P & (EX x. Q(x))"
@@ -97,7 +97,7 @@
   "!!P Q. |- (EX x. P --> Q(x)) <-> P --> (EX x. Q(x))"
   by (fast add!: subst)+
 
-text {*universal miniscoping*}
+text \<open>universal miniscoping\<close>
 lemma all_simps:
   "!!P Q. |- (ALL x. P(x) & Q) <-> (ALL x. P(x)) & Q"
   "!!P Q. |- (ALL x. P & Q(x)) <-> P & (ALL x. Q(x))"
@@ -107,7 +107,7 @@
   "!!P Q. |- (ALL x. P | Q(x)) <-> P | (ALL x. Q(x))"
   by (fast add!: subst)+
 
-text {*These are NOT supplied by default!*}
+text \<open>These are NOT supplied by default!\<close>
 lemma distrib_simps:
   "|- P & (Q | R) <-> P&Q | P&R"
   "|- (Q | R) & P <-> Q&P | R&P"
@@ -138,7 +138,7 @@
   by (fast add!: subst)+
 
 
-subsection {* Named rewrite rules *}
+subsection \<open>Named rewrite rules\<close>
 
 lemma conj_commute: "|- P&Q <-> Q&P"
   and conj_left_commute: "|- P&(Q&R) <-> Q&(P&R)"
@@ -177,11 +177,11 @@
   shows "|- (P-->Q) <-> (P'-->Q')"
   apply (lem p1)
   apply safe
-   apply (tactic {*
+   apply (tactic \<open>
      REPEAT (resolve_tac @{context} @{thms cut} 1 THEN
        DEPTH_SOLVE_1
          (resolve_tac @{context} [@{thm thinL}, @{thm thinR}, @{thm p2} COMP @{thm monotonic}] 1) THEN
-           Cla.safe_tac @{context} 1) *})
+           Cla.safe_tac @{context} 1)\<close>)
   done
 
 lemma conj_cong:
@@ -190,22 +190,22 @@
   shows "|- (P&Q) <-> (P'&Q')"
   apply (lem p1)
   apply safe
-   apply (tactic {*
+   apply (tactic \<open>
      REPEAT (resolve_tac @{context} @{thms cut} 1 THEN
        DEPTH_SOLVE_1
          (resolve_tac @{context} [@{thm thinL}, @{thm thinR}, @{thm p2} COMP @{thm monotonic}] 1) THEN
-           Cla.safe_tac @{context} 1) *})
+           Cla.safe_tac @{context} 1)\<close>)
   done
 
 lemma eq_sym_conv: "|- (x=y) <-> (y=x)"
   by (fast add!: subst)
 
 ML_file "simpdata.ML"
-setup {* map_theory_simpset (put_simpset LK_ss) *}
-setup {* Simplifier.method_setup [] *}
+setup \<open>map_theory_simpset (put_simpset LK_ss)\<close>
+setup \<open>Simplifier.method_setup []\<close>
 
 
-text {* To create substition rules *}
+text \<open>To create substition rules\<close>
 
 lemma eq_imp_subst: "|- a=b ==> $H, A(a), $G |- $E, A(b), $F"
   by simp
--- a/src/Sequents/LK/Nat.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/LK/Nat.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1999  University of Cambridge
 *)
 
-section {* Theory of the natural numbers: Peano's axioms, primitive recursion *}
+section \<open>Theory of the natural numbers: Peano's axioms, primitive recursion\<close>
 
 theory Nat
 imports "../LK"
--- a/src/Sequents/LK/Propositional.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/LK/Propositional.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Classical sequent calculus: examples with propositional connectives *}
+section \<open>Classical sequent calculus: examples with propositional connectives\<close>
 
 theory Propositional
 imports "../LK"
--- a/src/Sequents/LK0.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/LK0.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -6,7 +6,7 @@
 (eta-expanded, beta-contracted).
 *)
 
-section {* Classical First-Order Sequent Calculus *}
+section \<open>Classical First-Order Sequent Calculus\<close>
 
 theory LK0
 imports Sequents
@@ -34,8 +34,8 @@
 syntax
  "_Trueprop"    :: "two_seqe" ("((_)/ |- (_))" [6,6] 5)
 
-parse_translation {* [(@{syntax_const "_Trueprop"}, K (two_seq_tr @{const_syntax Trueprop}))] *}
-print_translation {* [(@{const_syntax Trueprop}, K (two_seq_tr' @{syntax_const "_Trueprop"}))] *}
+parse_translation \<open>[(@{syntax_const "_Trueprop"}, K (two_seq_tr @{const_syntax Trueprop}))]\<close>
+print_translation \<open>[(@{const_syntax Trueprop}, K (two_seq_tr' @{syntax_const "_Trueprop"}))]\<close>
 
 abbreviation
   not_equal  (infixl "~=" 50) where
@@ -150,7 +150,7 @@
 lemma exchL: "$H, Q, P, $G |- $E ==> $H, P, Q, $G |- $E"
   by (rule exchLS)
 
-ML {*
+ML \<open>
 (*Cut and thin, replacing the right-side formula*)
 fun cutR_tac ctxt s i =
   Rule_Insts.res_inst_tac ctxt [((("P", 0), Position.none), s)] [] @{thm cut} i THEN
@@ -160,7 +160,7 @@
 fun cutL_tac ctxt s i =
   Rule_Insts.res_inst_tac ctxt [((("P", 0), Position.none), s)] [] @{thm cut} i THEN
   resolve_tac ctxt @{thms thinL} (i + 1)
-*}
+\<close>
 
 
 (** If-and-only-if rules **)
@@ -220,36 +220,36 @@
   conjR conjL
   FalseL TrueR
   refl basic
-ML {* val prop_pack = Cla.get_pack @{context} *}
+ML \<open>val prop_pack = Cla.get_pack @{context}\<close>
 
 lemmas [safe] = exL allR
 lemmas [unsafe] = the_equality exR_thin allL_thin
-ML {* val LK_pack = Cla.get_pack @{context} *}
+ML \<open>val LK_pack = Cla.get_pack @{context}\<close>
 
-ML {*
+ML \<open>
   val LK_dup_pack =
     Cla.put_pack prop_pack @{context}
     |> fold_rev Cla.add_safe @{thms allR exL}
     |> fold_rev Cla.add_unsafe @{thms allL exR the_equality}
     |> Cla.get_pack;
-*}
+\<close>
 
 method_setup fast_prop =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack prop_pack ctxt))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack prop_pack ctxt)))\<close>
 
 method_setup fast_dup =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack LK_dup_pack ctxt))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.fast_tac (Cla.put_pack LK_dup_pack ctxt)))\<close>
 
 method_setup best_dup =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack LK_dup_pack ctxt))) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD' (Cla.best_tac (Cla.put_pack LK_dup_pack ctxt)))\<close>
 
-method_setup lem = {*
+method_setup lem = \<open>
   Attrib.thm >> (fn th => fn ctxt =>
     SIMPLE_METHOD' (fn i =>
       resolve_tac ctxt [@{thm thinR} RS @{thm cut}] i THEN
       REPEAT (resolve_tac ctxt @{thms thinL} i) THEN
       resolve_tac ctxt [th] i))
-*}
+\<close>
 
 
 lemma mp_R:
--- a/src/Sequents/Modal0.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/Modal0.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -21,20 +21,20 @@
   "_Lstar"      :: "two_seqe"   ("(_)|L>(_)" [6,6] 5)
   "_Rstar"      :: "two_seqe"   ("(_)|R>(_)" [6,6] 5)
 
-ML {*
+ML \<open>
   fun star_tr c [s1, s2] = Const(c, dummyT) $ seq_tr s1 $ seq_tr s2;
   fun star_tr' c [s1, s2] = Const(c, dummyT) $ seq_tr' s1 $ seq_tr' s2;
-*}
+\<close>
 
-parse_translation {*
+parse_translation \<open>
  [(@{syntax_const "_Lstar"}, K (star_tr @{const_syntax Lstar})),
   (@{syntax_const "_Rstar"}, K (star_tr @{const_syntax Rstar}))]
-*}
+\<close>
 
-print_translation {*
+print_translation \<open>
  [(@{const_syntax Lstar}, K (star_tr' @{syntax_const "_Lstar"})),
   (@{const_syntax Rstar}, K (star_tr' @{syntax_const "_Rstar"}))]
-*}
+\<close>
 
 defs
   strimp_def:    "P --< Q == [](P --> Q)"
--- a/src/Sequents/S4.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/S4.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -31,7 +31,7 @@
    "[| $E |L> $E';  $F |L> $F';  $G |R> $G';
            $E', P, $F' |-         $G'|] ==> $E, <>P, $F |- $G"
 
-ML {*
+ML \<open>
 structure S4_Prover = Modal_ProverFun
 (
   val rewrite_rls = @{thms rewrite_rls}
@@ -41,10 +41,10 @@
   val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
     @{thm rstar1}, @{thm rstar2}]
 )
-*}
+\<close>
 
 method_setup S4_solve =
-  {* Scan.succeed (fn ctxt => SIMPLE_METHOD (S4_Prover.solve_tac ctxt 2)) *}
+  \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (S4_Prover.solve_tac ctxt 2))\<close>
 
 
 (* Theorems of system T from Hughes and Cresswell and Hailpern, LNCS 129 *)
--- a/src/Sequents/S43.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/S43.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -16,21 +16,21 @@
   "_S43pi" :: "[seq, seq, seq, seq, seq, seq] => prop"
                          ("S43pi((_);(_);(_);(_);(_);(_))" [] 5)
 
-parse_translation {*
+parse_translation \<open>
   let
     val tr  = seq_tr;
     fun s43pi_tr [s1, s2, s3, s4, s5, s6] =
       Const (@{const_syntax S43pi}, dummyT) $ tr s1 $ tr s2 $ tr s3 $ tr s4 $ tr s5 $ tr s6;
   in [(@{syntax_const "_S43pi"}, K s43pi_tr)] end
-*}
+\<close>
 
-print_translation {*
+print_translation \<open>
 let
   val tr' = seq_tr';
   fun s43pi_tr' [s1, s2, s3, s4, s5, s6] =
     Const(@{syntax_const "_S43pi"}, dummyT) $ tr' s1 $ tr' s2 $ tr' s3 $ tr' s4 $ tr' s5 $ tr' s6;
 in [(@{const_syntax S43pi}, K s43pi_tr')] end
-*}
+\<close>
 
 axiomatization where
 (* Definition of the star operation using a set of Horn clauses  *)
@@ -76,7 +76,7 @@
    $L |- $R1, []P, $R2"
 
 
-ML {*
+ML \<open>
 structure S43_Prover = Modal_ProverFun
 (
   val rewrite_rls = @{thms rewrite_rls}
@@ -86,13 +86,13 @@
   val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
     @{thm rstar1}, @{thm rstar2}, @{thm S43pi0}, @{thm S43pi1}, @{thm S43pi2}]
 )
-*}
+\<close>
 
 
-method_setup S43_solve = {*
+method_setup S43_solve = \<open>
   Scan.succeed (fn ctxt => SIMPLE_METHOD
     (S43_Prover.solve_tac ctxt 2 ORELSE S43_Prover.solve_tac ctxt 3))
-*}
+\<close>
 
 
 (* Theorems of system T from Hughes and Cresswell and Hailpern, LNCS 129 *)
--- a/src/Sequents/Sequents.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/Sequents.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Parsing and pretty-printing of sequences *}
+section \<open>Parsing and pretty-printing of sequences\<close>
 
 theory Sequents
 imports Pure
@@ -17,7 +17,7 @@
 typedecl o
 
 
-subsection {* Sequences *}
+subsection \<open>Sequences\<close>
 
 typedecl
  seq'
@@ -27,7 +27,7 @@
  Seq1'         :: "o=>seq'"
 
 
-subsection {* Concrete syntax *}
+subsection \<open>Concrete syntax\<close>
 
 nonterminal seq and seqobj and seqcont
 
@@ -56,7 +56,7 @@
   (*Constant to allow definitions of SEQUENCES of formulas*)
   "_Side"        :: "seq=>(seq'=>seq')"     ("<<(_)>>")
 
-ML {*
+ML \<open>
 
 (* parse translation for sequences *)
 
@@ -139,12 +139,12 @@
 (** for the <<...>> notation **)
 
 fun side_tr [s1] = seq_tr s1;
-*}
+\<close>
 
-parse_translation {* [(@{syntax_const "_Side"}, K side_tr)] *}
+parse_translation \<open>[(@{syntax_const "_Side"}, K side_tr)]\<close>
 
 
-subsection {* Proof tools *}
+subsection \<open>Proof tools\<close>
 
 ML_file "prover.ML"
 
--- a/src/Sequents/T.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/T.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -30,7 +30,7 @@
    "[| $E |L> $E';  $F |L> $F';  $G |R> $G';
                $E', P, $F'|-         $G'|] ==> $E, <>P, $F |-          $G"
 
-ML {*
+ML \<open>
 structure T_Prover = Modal_ProverFun
 (
   val rewrite_rls = @{thms rewrite_rls}
@@ -40,9 +40,9 @@
   val aside_rls = [@{thm lstar0}, @{thm lstar1}, @{thm lstar2}, @{thm rstar0},
     @{thm rstar1}, @{thm rstar2}]
 )
-*}
+\<close>
 
-method_setup T_solve = {* Scan.succeed (fn ctxt => SIMPLE_METHOD (T_Prover.solve_tac ctxt 2)) *}
+method_setup T_solve = \<open>Scan.succeed (fn ctxt => SIMPLE_METHOD (T_Prover.solve_tac ctxt 2))\<close>
 
 
 (* Theorems of system T from Hughes and Cresswell and Hailpern, LNCS 129 *)
--- a/src/Sequents/Washing.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/Sequents/Washing.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -32,10 +32,10 @@
 
 (* "activate" definitions for use in proof *)
 
-ML {* ML_Thms.bind_thms ("changeI", [@{thm context1}] RL ([@{thm change}] RLN (2,[@{thm cut}]))) *}
-ML {* ML_Thms.bind_thms ("load1I", [@{thm context1}] RL ([@{thm load1}] RLN (2,[@{thm cut}]))) *}
-ML {* ML_Thms.bind_thms ("washI", [@{thm context1}] RL ([@{thm wash}] RLN (2,[@{thm cut}]))) *}
-ML {* ML_Thms.bind_thms ("dryI", [@{thm context1}] RL ([@{thm dry}] RLN (2,[@{thm cut}]))) *}
+ML \<open>ML_Thms.bind_thms ("changeI", [@{thm context1}] RL ([@{thm change}] RLN (2,[@{thm cut}])))\<close>
+ML \<open>ML_Thms.bind_thms ("load1I", [@{thm context1}] RL ([@{thm load1}] RLN (2,[@{thm cut}])))\<close>
+ML \<open>ML_Thms.bind_thms ("washI", [@{thm context1}] RL ([@{thm wash}] RLN (2,[@{thm cut}])))\<close>
+ML \<open>ML_Thms.bind_thms ("dryI", [@{thm context1}] RL ([@{thm dry}] RLN (2,[@{thm cut}])))\<close>
 
 (* a load of dirty clothes and two dollars gives you clean clothes *)
 
--- a/src/ZF/AC.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/AC.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*The Axiom of Choice*}
+section\<open>The Axiom of Choice\<close>
 
 theory AC imports Main_ZF begin
 
-text{*This definition comes from Halmos (1960), page 59.*}
+text\<open>This definition comes from Halmos (1960), page 59.\<close>
 axiomatization where
   AC: "[| a \<in> A;  !!x. x \<in> A ==> (\<exists>y. y \<in> B(x)) |] ==> \<exists>z. z \<in> Pi(A,B)"
 
--- a/src/ZF/AC/Cardinal_aux.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/AC/Cardinal_aux.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -45,7 +45,7 @@
     by (simp add: Inf_Card_is_InfCard Finite_cardinal_iff NFI i) 
   have "A \<union> B \<lesssim> A + B" by (rule Un_lepoll_sum)
   also have "... \<lesssim> A \<times> B"
-    by (rule lepoll_imp_sum_lepoll_prod [OF AB [THEN eqpoll_imp_lepoll] `2 \<lesssim> A`])
+    by (rule lepoll_imp_sum_lepoll_prod [OF AB [THEN eqpoll_imp_lepoll] \<open>2 \<lesssim> A\<close>])
   also have "... \<approx> i \<times> i"
     by (blast intro: prod_eqpoll_cong eqpoll_imp_lepoll A B) 
   also have "... \<approx> i"
@@ -172,11 +172,11 @@
 apply (drule eqpoll_imp_lepoll [THEN lepoll_trans],
        rule le_imp_lepoll, assumption)+
 apply (case_tac "Finite(x \<union> xa)")
-txt{*finite case*}
+txt\<open>finite case\<close>
  apply (drule Finite_Un [OF lepoll_Finite lepoll_Finite], assumption+)
  apply (drule subset_Un_Diff [THEN subset_imp_lepoll, THEN lepoll_Finite])
  apply (fast dest: eqpoll_sym [THEN eqpoll_imp_lepoll, THEN lepoll_Finite])
-txt{*infinite case*}
+txt\<open>infinite case\<close>
 apply (drule Un_lepoll_Inf_Ord, (assumption+))
 apply (blast intro: le_Ord2)
 apply (drule lesspoll_trans1
--- a/src/ZF/AC/DC.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/AC/DC.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -17,7 +17,7 @@
 apply (fast intro: LeastI Ord_in_Ord) 
 done
 
-text{*Trivial in the presence of AC, but here we need a wellordering of X*}
+text\<open>Trivial in the presence of AC, but here we need a wellordering of X\<close>
 lemma image_Ord_lepoll: "[| f \<in> X->Y; Ord(X) |] ==> f``X \<lesssim> X"
 apply (unfold lepoll_def)
 apply (rule_tac x = "\<lambda>x \<in> f``X. LEAST y. f`y = x" in exI)
--- a/src/ZF/AC/HH.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/AC/HH.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -16,7 +16,7 @@
     "HH(f,x,a) == transrec(a, %b r. let z = x - (\<Union>c \<in> b. r`c)
                                     in  if f`z \<in> Pow(z)-{0} then f`z else {x})"
 
-subsection{*Lemmas useful in each of the three proofs*}
+subsection\<open>Lemmas useful in each of the three proofs\<close>
 
 lemma HH_def_satisfies_eq:
      "HH(f,x,a) = (let z = x - (\<Union>b \<in> a. HH(f,x,b))   
@@ -126,7 +126,7 @@
 apply (erule_tac [2] ltI [OF _ Ord_Least], assumption)
 done
 
-subsection{*Lemmas used in the proofs of AC1 ==> WO2 and AC17 ==> AC1*}
+subsection\<open>Lemmas used in the proofs of AC1 ==> WO2 and AC17 ==> AC1\<close>
 
 lemma lam_Least_HH_inj_Pow: 
         "(\<lambda>a \<in> (LEAST i. HH(f,x,i)={x}). HH(f,x,a))   
@@ -213,7 +213,7 @@
               lam_sing_bij [THEN bij_converse_bij]]
 
 
-subsection{*The proof of AC1 ==> WO2*}
+subsection\<open>The proof of AC1 ==> WO2\<close>
 
 (*Establishing the existence of a bijection, namely
 converse
--- a/src/ZF/Arith.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Arith.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,11 +9,11 @@
   Also, rec(m, 0, %z w.z) is pred(m).
 *)
 
-section{*Arithmetic Operators and Their Definitions*}
+section\<open>Arithmetic Operators and Their Definitions\<close>
 
 theory Arith imports Univ begin
 
-text{*Proofs about elementary arithmetic: addition, multiplication, etc.*}
+text\<open>Proofs about elementary arithmetic: addition, multiplication, etc.\<close>
 
 definition
   pred   :: "i=>i"    (*inverse of succ*)  where
@@ -91,7 +91,7 @@
 lemmas zero_lt_natE = zero_lt_lemma [THEN bexE]
 
 
-subsection{*@{text natify}, the Coercion to @{term nat}*}
+subsection\<open>@{text natify}, the Coercion to @{term nat}\<close>
 
 lemma pred_succ_eq [simp]: "pred(succ(y)) = y"
 by (unfold pred_def, auto)
@@ -167,7 +167,7 @@
 by (simp add: div_def)
 
 
-subsection{*Typing rules*}
+subsection\<open>Typing rules\<close>
 
 (** Addition **)
 
@@ -222,7 +222,7 @@
 done
 
 
-subsection{*Addition*}
+subsection\<open>Addition\<close>
 
 (*Natify has weakened this law, compared with the older approach*)
 lemma add_0_natify [simp]: "0 #+ m = natify(m)"
@@ -315,7 +315,7 @@
 by (induct_tac "n", auto)
 
 
-subsection{*Monotonicity of Addition*}
+subsection\<open>Monotonicity of Addition\<close>
 
 (*strict, in 1st argument; proof is by rule induction on 'less than'.
   Still need j\<in>nat, for consider j = omega.  Then we can have i<omega,
@@ -326,11 +326,11 @@
 apply (simp_all add: leI)
 done
 
-text{*strict, in second argument*}
+text\<open>strict, in second argument\<close>
 lemma add_lt_mono2: "[| i<j; j\<in>nat |] ==> k#+i < k#+j"
 by (simp add: add_commute [of k] add_lt_mono1)
 
-text{*A [clumsy] way of lifting < monotonicity to @{text "\<le>"} monotonicity*}
+text\<open>A [clumsy] way of lifting < monotonicity to @{text "\<le>"} monotonicity\<close>
 lemma Ord_lt_mono_imp_le_mono:
   assumes lt_mono: "!!i j. [| i<j; j:k |] ==> f(i) < f(j)"
       and ford:    "!!i. i:k ==> Ord(f(i))"
@@ -341,19 +341,19 @@
 apply (blast intro!: leCI lt_mono ford elim!: leE)
 done
 
-text{*@{text "\<le>"} monotonicity, 1st argument*}
+text\<open>@{text "\<le>"} monotonicity, 1st argument\<close>
 lemma add_le_mono1: "[| i \<le> j; j\<in>nat |] ==> i#+k \<le> j#+k"
 apply (rule_tac f = "%j. j#+k" in Ord_lt_mono_imp_le_mono, typecheck)
 apply (blast intro: add_lt_mono1 add_type [THEN nat_into_Ord])+
 done
 
-text{*@{text "\<le>"} monotonicity, both arguments*}
+text\<open>@{text "\<le>"} monotonicity, both arguments\<close>
 lemma add_le_mono: "[| i \<le> j; k \<le> l; j\<in>nat; l\<in>nat |] ==> i#+k \<le> j#+l"
 apply (rule add_le_mono1 [THEN le_trans], assumption+)
 apply (subst add_commute, subst add_commute, rule add_le_mono1, assumption+)
 done
 
-text{*Combinations of less-than and less-than-or-equals*}
+text\<open>Combinations of less-than and less-than-or-equals\<close>
 
 lemma add_lt_le_mono: "[| i<j; k\<le>l; j\<in>nat; l\<in>nat |] ==> i#+k < j#+l"
 apply (rule add_lt_mono1 [THEN lt_trans2], assumption+)
@@ -363,7 +363,7 @@
 lemma add_le_lt_mono: "[| i\<le>j; k<l; j\<in>nat; l\<in>nat |] ==> i#+k < j#+l"
 by (subst add_commute, subst add_commute, erule add_lt_le_mono, assumption+)
 
-text{*Less-than: in other words, strict in both arguments*}
+text\<open>Less-than: in other words, strict in both arguments\<close>
 lemma add_lt_mono: "[| i<j; k<l; j\<in>nat; l\<in>nat |] ==> i#+k < j#+l"
 apply (rule add_lt_le_mono)
 apply (auto intro: leI)
@@ -433,7 +433,7 @@
     "[|i\<in>nat; j\<in>nat|] ==> (i \<union> j) #- k = (i#-k) \<union> (j#-k)"
 by (insert nat_diff_Un_distrib [of i j "natify(k)"], simp)
 
-text{*We actually prove @{term "i #- j #- k = i #- (j #+ k)"}*}
+text\<open>We actually prove @{term "i #- j #- k = i #- (j #+ k)"}\<close>
 lemma diff_diff_left [simplified]:
      "natify(i)#-natify(j)#-k = natify(i) #- (natify(j)#+k)"
 by (rule_tac m="natify(i)" and n="natify(j)" in diff_induct, auto)
@@ -464,7 +464,7 @@
 by auto
 
 
-subsection{*Multiplication*}
+subsection\<open>Multiplication\<close>
 
 lemma mult_0 [simp]: "0 #* m = 0"
 by (simp add: mult_def)
--- a/src/ZF/ArithSimp.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ArithSimp.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   2000  University of Cambridge
 *)
 
-section{*Arithmetic with simplification*}
+section\<open>Arithmetic with simplification\<close>
 
 theory ArithSimp
 imports Arith
@@ -14,7 +14,7 @@
 ML_file "arith_data.ML"
 
 
-subsection{*Difference*}
+subsection\<open>Difference\<close>
 
 lemma diff_self_eq_0 [simp]: "m #- m = 0"
 apply (subgoal_tac "natify (m) #- natify (m) = 0")
@@ -64,7 +64,7 @@
 done
 
 
-subsection{*Remainder*}
+subsection\<open>Remainder\<close>
 
 (*We need m:nat even with natify*)
 lemma div_termination: "[| 0<n;  n \<le> m;  m:nat |] ==> m #- n < m"
@@ -135,7 +135,7 @@
 done
 
 
-subsection{*Division*}
+subsection\<open>Division\<close>
 
 lemma raw_div_type: "[| m:nat;  n:nat |] ==> raw_div (m, n) \<in> nat"
 apply (unfold raw_div_def)
@@ -183,9 +183,9 @@
 apply (simp add: nat_into_Ord [THEN Ord_0_lt_iff])
 apply (erule complete_induct)
 apply (case_tac "x<n")
-txt{*case x<n*}
+txt\<open>case x<n\<close>
 apply (simp (no_asm_simp))
-txt{*case @{term"n \<le> x"}*}
+txt\<open>case @{term"n \<le> x"}\<close>
 apply (simp add: not_lt_iff_le add_assoc mod_geq div_termination [THEN ltD] add_diff_inverse)
 done
 
@@ -200,23 +200,23 @@
 done
 
 
-subsection{*Further Facts about Remainder*}
+subsection\<open>Further Facts about Remainder\<close>
 
-text{*(mainly for mutilated chess board)*}
+text\<open>(mainly for mutilated chess board)\<close>
 
 lemma mod_succ_lemma:
      "[| 0<n;  m:nat;  n:nat |]
       ==> succ(m) mod n = (if succ(m mod n) = n then 0 else succ(m mod n))"
 apply (erule complete_induct)
 apply (case_tac "succ (x) <n")
-txt{* case succ(x) < n *}
+txt\<open>case succ(x) < n\<close>
  apply (simp (no_asm_simp) add: nat_le_refl [THEN lt_trans] succ_neq_self)
  apply (simp add: ltD [THEN mem_imp_not_eq])
-txt{* case @{term"n \<le> succ(x)"} *}
+txt\<open>case @{term"n \<le> succ(x)"}\<close>
 apply (simp add: mod_geq not_lt_iff_le)
 apply (erule leE)
  apply (simp (no_asm_simp) add: mod_geq div_termination [THEN ltD] diff_succ)
-txt{*equality case*}
+txt\<open>equality case\<close>
 apply (simp add: diff_self_eq_0)
 done
 
@@ -235,7 +235,7 @@
 apply (subgoal_tac "natify (m) mod n < n")
 apply (rule_tac [2] i = "natify (m) " in complete_induct)
 apply (case_tac [3] "x<n", auto)
-txt{* case @{term"n \<le> x"}*}
+txt\<open>case @{term"n \<le> x"}\<close>
 apply (simp add: mod_geq not_lt_iff_le div_termination [THEN ltD])
 done
 
@@ -264,7 +264,7 @@
 by (cut_tac n = 0 in mod2_add_more, auto)
 
 
-subsection{*Additional theorems about @{text "\<le>"}*}
+subsection\<open>Additional theorems about @{text "\<le>"}\<close>
 
 lemma add_le_self: "m:nat ==> m \<le> (m #+ n)"
 apply (simp (no_asm_simp))
@@ -339,7 +339,7 @@
 done
 
 
-subsection{*Cancellation Laws for Common Factors in Comparisons*}
+subsection\<open>Cancellation Laws for Common Factors in Comparisons\<close>
 
 lemma mult_less_cancel_lemma:
      "[| k: nat; m: nat; n: nat |] ==> (m#*k < n#*k) \<longleftrightarrow> (0<k & m<n)"
@@ -414,7 +414,7 @@
 done
 
 
-subsection{*More Lemmas about Remainder*}
+subsection\<open>More Lemmas about Remainder\<close>
 
 lemma mult_mod_distrib_raw:
      "[| k:nat; m:nat; n:nat |] ==> (k#*m) mod (k#*n) = k #* (m mod n)"
@@ -507,7 +507,7 @@
 by (drule less_imp_succ_add, auto)
 
 
-subsubsection{*More Lemmas About Difference*}
+subsubsection\<open>More Lemmas About Difference\<close>
 
 lemma diff_is_0_lemma:
      "[| m: nat; n: nat |] ==> m #- n = 0 \<longleftrightarrow> m \<le> n"
@@ -538,7 +538,7 @@
 apply simp_all
 done
 
-text{*Difference and less-than*}
+text\<open>Difference and less-than\<close>
 
 lemma diff_lt_imp_lt: "[|(k#-i) < (k#-j); i\<in>nat; j\<in>nat; k\<in>nat|] ==> j<i"
 apply (erule rev_mp)
--- a/src/ZF/Bin.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Bin.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -13,7 +13,7 @@
 For instance, ~5 div 2 = ~3 and ~5 mod 2 = 1; thus ~5 = (~3)*2 + 1
 *)
 
-section{*Arithmetic on Binary Integers*}
+section\<open>Arithmetic on Binary Integers\<close>
 
 theory Bin
 imports Int_ZF Datatype_ZF
@@ -176,8 +176,8 @@
 by (induct_tac "v", auto)
 
 
-subsubsection{*The Carry and Borrow Functions,
-            @{term bin_succ} and @{term bin_pred}*}
+subsubsection\<open>The Carry and Borrow Functions,
+            @{term bin_succ} and @{term bin_pred}\<close>
 
 (*NCons preserves the integer value of its argument*)
 lemma integ_of_NCons [simp]:
@@ -199,7 +199,7 @@
 done
 
 
-subsubsection{*@{term bin_minus}: Unary Negation of Binary Integers*}
+subsubsection\<open>@{term bin_minus}: Unary Negation of Binary Integers\<close>
 
 lemma integ_of_minus: "w \<in> bin ==> integ_of(bin_minus(w)) = $- integ_of(w)"
 apply (erule bin.induct)
@@ -207,7 +207,7 @@
 done
 
 
-subsubsection{*@{term bin_add}: Binary Addition*}
+subsubsection\<open>@{term bin_add}: Binary Addition\<close>
 
 lemma bin_add_Pls [simp]: "w \<in> bin ==> bin_add(Pls,w) = w"
 by (unfold bin_add_def, simp)
@@ -255,7 +255,7 @@
 done
 
 
-subsubsection{*@{term bin_mult}: Binary Multiplication*}
+subsubsection\<open>@{term bin_mult}: Binary Multiplication\<close>
 
 lemma integ_of_mult:
      "[| v \<in> bin;  w \<in> bin |]
@@ -266,7 +266,7 @@
 done
 
 
-subsection{*Computations*}
+subsection\<open>Computations\<close>
 
 (** extra rules for bin_succ, bin_pred **)
 
@@ -351,8 +351,8 @@
 done
 
 
-subsection{*Simplification Rules for Comparison of Binary Numbers*}
-text{*Thanks to Norbert Voelker*}
+subsection\<open>Simplification Rules for Comparison of Binary Numbers\<close>
+text\<open>Thanks to Norbert Voelker\<close>
 
 (** Equals (=) **)
 
@@ -695,9 +695,9 @@
 
 ML_file "int_arith.ML"
 
-subsection {* examples: *}
+subsection \<open>examples:\<close>
 
-text {* @{text combine_numerals_prod} (products of separate literals) *}
+text \<open>@{text combine_numerals_prod} (products of separate literals)\<close>
 lemma "#5 $* x $* #3 = y" apply simp oops
 
 schematic_lemma "y2 $+ ?x42 = y $+ y2" apply simp oops
@@ -741,7 +741,7 @@
 lemma "a $+ $-(b$+c) $+ b = d" apply simp oops
 lemma "a $+ $-(b$+c) $- b = d" apply simp oops
 
-text {* negative numerals *}
+text \<open>negative numerals\<close>
 lemma "(i $+ j $+ #-2 $+ k) $- (u $+ #5 $+ y) = zz" apply simp oops
 lemma "(i $+ j $+ #-3 $+ k) $< u $+ #5 $+ y" apply simp oops
 lemma "(i $+ j $+ #3 $+ k) $< u $+ #-6 $+ y" apply simp oops
@@ -749,7 +749,7 @@
 lemma "(i $+ j $+ #12 $+ k) $- #-15 = y" apply simp oops
 lemma "(i $+ j $+ #-12 $+ k) $- #-15 = y" apply simp oops
 
-text {* Multiplying separated numerals *}
+text \<open>Multiplying separated numerals\<close>
 lemma "#6 $* ($# x $* #2) =  uu" apply simp oops
 lemma "#4 $* ($# x $* $# x) $* (#2 $* $# x) =  uu" apply simp oops
 
--- a/src/ZF/Bool.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Bool.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section{*Booleans in Zermelo-Fraenkel Set Theory*}
+section\<open>Booleans in Zermelo-Fraenkel Set Theory\<close>
 
 theory Bool imports pair begin
 
@@ -15,7 +15,7 @@
   two  ("2") where
   "2 == succ(1)"
 
-text{*2 is equal to bool, but is used as a number rather than a type.*}
+text\<open>2 is equal to bool, but is used as a number rather than a type.\<close>
 
 definition "bool == {0,1}"
 
@@ -109,7 +109,7 @@
 lemmas bool_typechecks = bool_1I bool_0I cond_type not_type and_type
                          or_type xor_type
 
-subsection{*Laws About 'not' *}
+subsection\<open>Laws About 'not'\<close>
 
 lemma not_not [simp]: "a:bool ==> not(not(a)) = a"
 by (elim boolE, auto)
@@ -120,7 +120,7 @@
 lemma not_or [simp]: "a:bool ==> not(a or b) = not(a) and not(b)"
 by (elim boolE, auto)
 
-subsection{*Laws About 'and' *}
+subsection\<open>Laws About 'and'\<close>
 
 lemma and_absorb [simp]: "a: bool ==> a and a = a"
 by (elim boolE, auto)
@@ -135,7 +135,7 @@
        (a or b) and c  =  (a and c) or (b and c)"
 by (elim boolE, auto)
 
-subsection{*Laws About 'or' *}
+subsection\<open>Laws About 'or'\<close>
 
 lemma or_absorb [simp]: "a: bool ==> a or a = a"
 by (elim boolE, auto)
--- a/src/ZF/Cardinal.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Cardinal.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Cardinal Numbers Without the Axiom of Choice*}
+section\<open>Cardinal Numbers Without the Axiom of Choice\<close>
 
 theory Cardinal imports OrderType Finite Nat_ZF Sum begin
 
@@ -47,8 +47,8 @@
   Least     (binder "\<mu>" 10)
 
 
-subsection{*The Schroeder-Bernstein Theorem*}
-text{*See Davey and Priestly, page 106*}
+subsection\<open>The Schroeder-Bernstein Theorem\<close>
+text\<open>See Davey and Priestly, page 106\<close>
 
 (** Lemma: Banach's Decomposition Theorem **)
 
@@ -178,7 +178,7 @@
 done
 
 
-subsection{*lesspoll: contributions by Krzysztof Grabczewski *}
+subsection\<open>lesspoll: contributions by Krzysztof Grabczewski\<close>
 
 lemma lesspoll_not_refl: "~ (i \<prec> i)"
 by (simp add: lesspoll_def)
@@ -276,7 +276,7 @@
   thus ?thesis using P .
 qed
 
-text{*The proof is almost identical to the one above!*}
+text\<open>The proof is almost identical to the one above!\<close>
 lemma Least_le: 
   assumes P: "P(i)" and i: "Ord(i)" shows "(\<mu> x. P(x)) \<le> i"
 proof -
@@ -333,7 +333,7 @@
 qed
 
 
-subsection{*Basic Properties of Cardinals*}
+subsection\<open>Basic Properties of Cardinals\<close>
 
 (*Not needed for simplification, but helpful below*)
 lemma Least_cong: "(!!y. P(y) \<longleftrightarrow> Q(y)) ==> (\<mu> x. P(x)) = (\<mu> x. Q(x))"
@@ -410,7 +410,7 @@
 apply (rule Ord_Least)
 done
 
-text{*The cardinals are the initial ordinals.*}
+text\<open>The cardinals are the initial ordinals.\<close>
 lemma Card_iff_initial: "Card(K) \<longleftrightarrow> Ord(K) & (\<forall>j. j<K \<longrightarrow> ~ j \<approx> K)"
 proof -
   { fix j
@@ -449,10 +449,10 @@
 proof (unfold cardinal_def)
   show "Card(\<mu> i. i \<approx> A)"
     proof (cases "\<exists>i. Ord (i) & i \<approx> A")
-      case False thus ?thesis           --{*degenerate case*}
+      case False thus ?thesis           --\<open>degenerate case\<close>
         by (simp add: Least_0 Card_0)
     next
-      case True                         --{*real case: @{term A} is isomorphic to some ordinal*}
+      case True                         --\<open>real case: @{term A} is isomorphic to some ordinal\<close>
       then obtain i where i: "Ord(i)" "i \<approx> A" by blast
       show ?thesis
         proof (rule CardI [OF Ord_Least], rule notI)
@@ -500,7 +500,7 @@
   thus ?thesis by simp
 qed
 
-text{*Since we have @{term"|succ(nat)| \<le> |nat|"}, the converse of @{text cardinal_mono} fails!*}
+text\<open>Since we have @{term"|succ(nat)| \<le> |nat|"}, the converse of @{text cardinal_mono} fails!\<close>
 lemma cardinal_lt_imp_lt: "[| |i| < |j|;  Ord(i);  Ord(j) |] ==> i < j"
 apply (rule Ord_linear2 [of i j], assumption+)
 apply (erule lt_trans2 [THEN lt_irrefl])
@@ -556,7 +556,7 @@
 apply (blast intro: Ord_trans)
 done
 
-subsection{*The finite cardinals *}
+subsection\<open>The finite cardinals\<close>
 
 lemma cons_lepoll_consD:
  "[| cons(u,A) \<lesssim> cons(v,B);  u\<notin>A;  v\<notin>B |] ==> A \<lesssim> B"
@@ -591,12 +591,12 @@
   case 0 thus ?case by (blast intro!: nat_0_le)
 next
   case (succ m)
-  show ?case  using `n \<in> nat`
+  show ?case  using \<open>n \<in> nat\<close>
     proof (cases rule: natE)
       case 0 thus ?thesis using succ
         by (simp add: lepoll_def inj_def)
     next
-      case (succ n') thus ?thesis using succ.hyps ` succ(m) \<lesssim> n`
+      case (succ n') thus ?thesis using succ.hyps \<open> succ(m) \<lesssim> n\<close>
         by (blast intro!: succ_leI dest!: succ_lepoll_succD)
     qed
 qed
@@ -682,7 +682,7 @@
 done
 
 
-subsection{*The first infinite cardinal: Omega, or nat *}
+subsection\<open>The first infinite cardinal: Omega, or nat\<close>
 
 (*This implies Kunen's Lemma 10.6*)
 lemma lt_not_lepoll:
@@ -697,7 +697,7 @@
   thus ?thesis by auto
 qed
 
-text{*A slightly weaker version of @{text nat_eqpoll_iff}*}
+text\<open>A slightly weaker version of @{text nat_eqpoll_iff}\<close>
 lemma Ord_nat_eqpoll_iff:
   assumes i: "Ord(i)" and n: "n \<in> nat" shows "i \<approx> n \<longleftrightarrow> i=n"
 using i nat_into_Ord [OF n]
@@ -712,7 +712,7 @@
   case gt
   hence  "~ i \<lesssim> n" using n  by (rule lt_not_lepoll)
   hence  "~ i \<approx> n" using n  by (blast intro: eqpoll_imp_lepoll)
-  moreover have "i \<noteq> n" using `n<i` by auto
+  moreover have "i \<noteq> n" using \<open>n<i\<close> by auto
   ultimately show ?thesis by blast
 qed
 
@@ -740,7 +740,7 @@
   by (blast intro: Ord_nat Card_nat ltI lt_Card_imp_lesspoll)
 
 
-subsection{*Towards Cardinal Arithmetic *}
+subsection\<open>Towards Cardinal Arithmetic\<close>
 (** Congruence laws for successor, cardinal addition and multiplication **)
 
 (*Congruence law for  cons  under equipollence*)
@@ -817,12 +817,12 @@
 done
 
 
-subsection{*Lemmas by Krzysztof Grabczewski*}
+subsection\<open>Lemmas by Krzysztof Grabczewski\<close>
 
 (*New proofs using cons_lepoll_cons. Could generalise from succ to cons.*)
 
-text{*If @{term A} has at most @{term"n+1"} elements and @{term"a \<in> A"}
-      then @{term"A-{a}"} has at most @{term n}.*}
+text\<open>If @{term A} has at most @{term"n+1"} elements and @{term"a \<in> A"}
+      then @{term"A-{a}"} has at most @{term n}.\<close>
 lemma Diff_sing_lepoll:
       "[| a \<in> A;  A \<lesssim> succ(n) |] ==> A - {a} \<lesssim> n"
 apply (unfold succ_def)
@@ -831,7 +831,7 @@
 apply (erule cons_Diff [THEN ssubst], safe)
 done
 
-text{*If @{term A} has at least @{term"n+1"} elements then @{term"A-{a}"} has at least @{term n}.*}
+text\<open>If @{term A} has at least @{term"n+1"} elements then @{term"A-{a}"} has at least @{term n}.\<close>
 lemma lepoll_Diff_sing:
   assumes A: "succ(n) \<lesssim> A" shows "n \<lesssim> A - {a}"
 proof -
@@ -877,7 +877,7 @@
 done
 
 
-subsection {*Finite and infinite sets*}
+subsection \<open>Finite and infinite sets\<close>
 
 lemma eqpoll_imp_Finite_iff: "A \<approx> B ==> Finite(A) \<longleftrightarrow> Finite(B)"
 apply (unfold Finite_def)
@@ -1027,7 +1027,7 @@
 lemma Finite_Un_iff [simp]: "Finite(A \<union> B) \<longleftrightarrow> (Finite(A) & Finite(B))"
 by (blast intro: subset_Finite Finite_Un)
 
-text{*The converse must hold too.*}
+text\<open>The converse must hold too.\<close>
 lemma Finite_Union: "[| \<forall>y\<in>X. Finite(y);  Finite(X) |] ==> Finite(\<Union>(X))"
 apply (simp add: Finite_Fin_iff)
 apply (rule Fin_UnionI)
@@ -1085,8 +1085,8 @@
 apply (blast intro: elim: equalityCE)
 done
 
-text{*I don't know why, but if the premise is expressed using meta-connectives
-then  the simplifier cannot prove it automatically in conditional rewriting.*}
+text\<open>I don't know why, but if the premise is expressed using meta-connectives
+then  the simplifier cannot prove it automatically in conditional rewriting.\<close>
 lemma Finite_RepFun_iff:
      "(\<forall>x y. f(x)=f(y) \<longrightarrow> x=y) ==> Finite(RepFun(A,f)) \<longleftrightarrow> Finite(A)"
 by (blast intro: Finite_RepFun Finite_RepFun_iff_lemma [of _ f])
@@ -1119,7 +1119,7 @@
 next
   case (succ x)
   hence wfx: "\<And>Z. Z = 0 \<or> (\<exists>z\<in>Z. \<forall>y. z \<in> y \<and> z \<in> x \<and> y \<in> x \<and> z \<in> x \<longrightarrow> y \<notin> Z)"
-    by (simp add: wf_on_def wf_def)  --{*not easy to erase the duplicate @{term"z \<in> x"}!*}
+    by (simp add: wf_on_def wf_def)  --\<open>not easy to erase the duplicate @{term"z \<in> x"}!\<close>
   show ?case
     proof (rule wf_onI)
       fix Z u
--- a/src/ZF/CardinalArith.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/CardinalArith.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Cardinal Arithmetic Without the Axiom of Choice*}
+section\<open>Cardinal Arithmetic Without the Axiom of Choice\<close>
 
 theory CardinalArith imports Cardinal OrderArith ArithSimp Finite begin
 
@@ -28,15 +28,15 @@
 
 definition
   jump_cardinal :: "i=>i"  where
-    --{*This def is more complex than Kunen's but it more easily proved to
-        be a cardinal*}
+    --\<open>This def is more complex than Kunen's but it more easily proved to
+        be a cardinal\<close>
     "jump_cardinal(K) ==
          \<Union>X\<in>Pow(K). {z. r \<in> Pow(K*K), well_ord(X,r) & z = ordertype(X,r)}"
 
 definition
   csucc         :: "i=>i"  where
-    --{*needed because @{term "jump_cardinal(K)"} might not be the successor
-        of @{term K}*}
+    --\<open>needed because @{term "jump_cardinal(K)"} might not be the successor
+        of @{term K}\<close>
     "csucc(K) == LEAST L. Card(L) & K<L"
 
 notation (xsymbols)
@@ -87,14 +87,14 @@
 done
 
 
-subsection{*Cardinal addition*}
+subsection\<open>Cardinal addition\<close>
 
-text{*Note: Could omit proving the algebraic laws for cardinal addition and
+text\<open>Note: Could omit proving the algebraic laws for cardinal addition and
 multiplication.  On finite cardinals these operations coincide with
 addition and multiplication of natural numbers; on infinite cardinals they
-coincide with union (maximum).  Either way we get most laws for free.*}
+coincide with union (maximum).  Either way we get most laws for free.\<close>
 
-subsubsection{*Cardinal addition is commutative*}
+subsubsection\<open>Cardinal addition is commutative\<close>
 
 lemma sum_commute_eqpoll: "A+B \<approx> B+A"
 proof (unfold eqpoll_def, rule exI)
@@ -107,7 +107,7 @@
 apply (rule sum_commute_eqpoll [THEN cardinal_cong])
 done
 
-subsubsection{*Cardinal addition is associative*}
+subsubsection\<open>Cardinal addition is associative\<close>
 
 lemma sum_assoc_eqpoll: "(A+B)+C \<approx> A+(B+C)"
 apply (unfold eqpoll_def)
@@ -115,7 +115,7 @@
 apply (rule sum_assoc_bij)
 done
 
-text{*Unconditional version requires AC*}
+text\<open>Unconditional version requires AC\<close>
 lemma well_ord_cadd_assoc:
   assumes i: "well_ord(i,ri)" and j: "well_ord(j,rj)" and k: "well_ord(k,rk)"
   shows "(i \<oplus> j) \<oplus> k = i \<oplus> (j \<oplus> k)"
@@ -130,7 +130,7 @@
 qed
 
 
-subsubsection{*0 is the identity for addition*}
+subsubsection\<open>0 is the identity for addition\<close>
 
 lemma sum_0_eqpoll: "0+A \<approx> A"
 apply (unfold eqpoll_def)
@@ -143,7 +143,7 @@
 apply (simp add: sum_0_eqpoll [THEN cardinal_cong] Card_cardinal_eq)
 done
 
-subsubsection{*Addition by another cardinal*}
+subsubsection\<open>Addition by another cardinal\<close>
 
 lemma sum_lepoll_self: "A \<lesssim> A+B"
 proof (unfold lepoll_def, rule exI)
@@ -165,7 +165,7 @@
     by (blast intro: le_trans)
 qed
 
-subsubsection{*Monotonicity of addition*}
+subsubsection\<open>Monotonicity of addition\<close>
 
 lemma sum_lepoll_mono:
      "[| A \<lesssim> C;  B \<lesssim> D |] ==> A + B \<lesssim> C + D"
@@ -186,7 +186,7 @@
 apply (blast intro: sum_lepoll_mono subset_imp_lepoll)
 done
 
-subsubsection{*Addition of finite cardinals is "ordinary" addition*}
+subsubsection\<open>Addition of finite cardinals is "ordinary" addition\<close>
 
 lemma sum_succ_eqpoll: "succ(A)+B \<approx> succ(A+B)"
 apply (unfold eqpoll_def)
@@ -222,9 +222,9 @@
 qed
 
 
-subsection{*Cardinal multiplication*}
+subsection\<open>Cardinal multiplication\<close>
 
-subsubsection{*Cardinal multiplication is commutative*}
+subsubsection\<open>Cardinal multiplication is commutative\<close>
 
 lemma prod_commute_eqpoll: "A*B \<approx> B*A"
 apply (unfold eqpoll_def)
@@ -238,7 +238,7 @@
 apply (rule prod_commute_eqpoll [THEN cardinal_cong])
 done
 
-subsubsection{*Cardinal multiplication is associative*}
+subsubsection\<open>Cardinal multiplication is associative\<close>
 
 lemma prod_assoc_eqpoll: "(A*B)*C \<approx> A*(B*C)"
 apply (unfold eqpoll_def)
@@ -246,7 +246,7 @@
 apply (rule prod_assoc_bij)
 done
 
-text{*Unconditional version requires AC*}
+text\<open>Unconditional version requires AC\<close>
 lemma well_ord_cmult_assoc:
   assumes i: "well_ord(i,ri)" and j: "well_ord(j,rj)" and k: "well_ord(k,rk)"
   shows "(i \<otimes> j) \<otimes> k = i \<otimes> (j \<otimes> k)"
@@ -260,7 +260,7 @@
   finally show "|i * j| * k \<approx> i * |j * k|" .
 qed
 
-subsubsection{*Cardinal multiplication distributes over addition*}
+subsubsection\<open>Cardinal multiplication distributes over addition\<close>
 
 lemma sum_prod_distrib_eqpoll: "(A+B)*C \<approx> (A*C)+(B*C)"
 apply (unfold eqpoll_def)
@@ -281,7 +281,7 @@
   finally show "|i + j| * k \<approx> |i * k| + |j * k|" .
 qed
 
-subsubsection{*Multiplication by 0 yields 0*}
+subsubsection\<open>Multiplication by 0 yields 0\<close>
 
 lemma prod_0_eqpoll: "0*A \<approx> 0"
 apply (unfold eqpoll_def)
@@ -292,7 +292,7 @@
 lemma cmult_0 [simp]: "0 \<otimes> i = 0"
 by (simp add: cmult_def prod_0_eqpoll [THEN cardinal_cong])
 
-subsubsection{*1 is the identity for multiplication*}
+subsubsection\<open>1 is the identity for multiplication\<close>
 
 lemma prod_singleton_eqpoll: "{x}*A \<approx> A"
 apply (unfold eqpoll_def)
@@ -305,7 +305,7 @@
 apply (simp add: prod_singleton_eqpoll [THEN cardinal_cong] Card_cardinal_eq)
 done
 
-subsection{*Some inequalities for multiplication*}
+subsection\<open>Some inequalities for multiplication\<close>
 
 lemma prod_square_lepoll: "A \<lesssim> A*A"
 apply (unfold lepoll_def inj_def)
@@ -322,7 +322,7 @@
 apply (blast intro: well_ord_rmult well_ord_Memrel Card_is_Ord)
 done
 
-subsubsection{*Multiplication by a non-zero cardinal*}
+subsubsection\<open>Multiplication by a non-zero cardinal\<close>
 
 lemma prod_lepoll_self: "b \<in> B ==> A \<lesssim> A*B"
 apply (unfold lepoll_def inj_def)
@@ -339,7 +339,7 @@
 apply (blast intro: prod_lepoll_self ltD)
 done
 
-subsubsection{*Monotonicity of multiplication*}
+subsubsection\<open>Monotonicity of multiplication\<close>
 
 lemma prod_lepoll_mono:
      "[| A \<lesssim> C;  B \<lesssim> D |] ==> A * B  \<lesssim>  C * D"
@@ -360,7 +360,7 @@
 apply (blast intro: prod_lepoll_mono subset_imp_lepoll)
 done
 
-subsection{*Multiplication of finite cardinals is "ordinary" multiplication*}
+subsection\<open>Multiplication of finite cardinals is "ordinary" multiplication\<close>
 
 lemma prod_succ_eqpoll: "succ(A)*B \<approx> B + A*B"
 apply (unfold eqpoll_def)
@@ -403,7 +403,7 @@
 by (blast intro: sum_lepoll_mono sum_lepoll_prod lepoll_trans lepoll_refl)
 
 
-subsection{*Infinite Cardinals are Limit Ordinals*}
+subsection\<open>Infinite Cardinals are Limit Ordinals\<close>
 
 (*This proof is modelled upon one assuming nat<=A, with injection
   \<lambda>z\<in>cons(u,A). if z=u then 0 else if z \<in> nat then succ(z) else z
@@ -484,7 +484,7 @@
 apply (rule pred_subset)
 done
 
-subsubsection{*Establishing the well-ordering*}
+subsubsection\<open>Establishing the well-ordering\<close>
 
 lemma well_ord_csquare:
   assumes K: "Ord(K)" shows "well_ord(K*K, csquare_rel(K))"
@@ -496,7 +496,7 @@
     using K by (blast intro: well_ord_rmult well_ord_Memrel)
 qed
 
-subsubsection{*Characterising initial segments of the well-ordering*}
+subsubsection\<open>Characterising initial segments of the well-ordering\<close>
 
 lemma csquareD:
  "[| <<x,y>, <z,z>> \<in> csquare_rel(K);  x<K;  y<K;  z<K |] ==> x \<le> z & y \<le> z"
@@ -537,7 +537,7 @@
                      subset_Un_iff2 [THEN iff_sym] OrdmemD)
 done
 
-subsubsection{*The cardinality of initial segments*}
+subsubsection\<open>The cardinality of initial segments\<close>
 
 lemma ordermap_z_lt:
       "[| Limit(K);  x<K;  y<K;  z=succ(x \<union> y) |] ==>
@@ -551,7 +551,7 @@
 apply (blast intro!: Un_upper1_le Un_upper2_le Ord_ordermap elim!: ltE)+
 done
 
-text{*Kunen: "each @{term"\<langle>x,y\<rangle> \<in> K \<times> K"} has no more than @{term"z \<times> z"} predecessors..." (page 29) *}
+text\<open>Kunen: "each @{term"\<langle>x,y\<rangle> \<in> K \<times> K"} has no more than @{term"z \<times> z"} predecessors..." (page 29)\<close>
 lemma ordermap_csquare_le:
   assumes K: "Limit(K)" and x: "x<K" and y: " y<K"
   defines "z \<equiv> succ(x \<union> y)"
@@ -582,7 +582,7 @@
   finally show "ordermap(K \<times> K, csquare_rel(K)) ` \<langle>x,y\<rangle> \<lesssim> |succ(z)| \<times> |succ(z)|" .
 qed
 
-text{*Kunen: "... so the order type is @{text"\<le>"} K" *}
+text\<open>Kunen: "... so the order type is @{text"\<le>"} K"\<close>
 lemma ordertype_csquare_le:
   assumes IK: "InfCard(K)" and eq: "\<And>y. y\<in>K \<Longrightarrow> InfCard(y) \<Longrightarrow> y \<otimes> y = y"
   shows "ordertype(K*K, csquare_rel(K)) \<le> K"
@@ -685,7 +685,7 @@
 lemma Inf_Card_is_InfCard: "[| Card(i); ~ Finite(i) |] ==> InfCard(i)"
 by (simp add: InfCard_def Card_is_Ord [THEN nat_le_infinite_Ord])
 
-subsubsection{*Toward's Kunen's Corollary 10.13 (1)*}
+subsubsection\<open>Toward's Kunen's Corollary 10.13 (1)\<close>
 
 lemma InfCard_le_cmult_eq: "[| InfCard(K);  L \<le> K;  0<L |] ==> K \<otimes> L = K"
 apply (rule le_anti_sym)
@@ -734,9 +734,9 @@
   might be  InfCard(K) ==> |list(K)| = K.
 *)
 
-subsection{*For Every Cardinal Number There Exists A Greater One*}
+subsection\<open>For Every Cardinal Number There Exists A Greater One\<close>
 
-text{*This result is Kunen's Theorem 10.16, which would be trivial using AC*}
+text\<open>This result is Kunen's Theorem 10.16, which would be trivial using AC\<close>
 
 lemma Ord_jump_cardinal: "Ord(jump_cardinal(K))"
 apply (unfold jump_cardinal_def)
@@ -793,7 +793,7 @@
 apply (blast intro: Card_jump_cardinal_lemma [THEN mem_irrefl])
 done
 
-subsection{*Basic Properties of Successor Cardinals*}
+subsection\<open>Basic Properties of Successor Cardinals\<close>
 
 lemma csucc_basic: "Ord(K) ==> Card(csucc(K)) & K < csucc(K)"
 apply (unfold csucc_def)
@@ -834,7 +834,7 @@
               lt_csucc [THEN leI, THEN [2] le_trans])
 
 
-subsubsection{*Removing elements from a finite set decreases its cardinality*}
+subsubsection\<open>Removing elements from a finite set decreases its cardinality\<close>
 
 lemma Finite_imp_cardinal_cons [simp]:
   assumes FA: "Finite(A)" and a: "a\<notin>A" shows "|cons(a,A)| = succ(|A|)"
@@ -918,7 +918,7 @@
 qed
 
 
-subsubsection{*Theorems by Krzysztof Grabczewski, proofs by lcp*}
+subsubsection\<open>Theorems by Krzysztof Grabczewski, proofs by lcp\<close>
 
 lemmas nat_implies_well_ord = nat_into_Ord [THEN well_ord_Memrel]
 
--- a/src/ZF/Cardinal_AC.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Cardinal_AC.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,18 +5,18 @@
 These results help justify infinite-branching datatypes
 *)
 
-section{*Cardinal Arithmetic Using AC*}
+section\<open>Cardinal Arithmetic Using AC\<close>
 
 theory Cardinal_AC imports CardinalArith Zorn begin
 
-subsection{*Strengthened Forms of Existing Theorems on Cardinals*}
+subsection\<open>Strengthened Forms of Existing Theorems on Cardinals\<close>
 
 lemma cardinal_eqpoll: "|A| \<approx> A"
 apply (rule AC_well_ord [THEN exE])
 apply (erule well_ord_cardinal_eqpoll)
 done
 
-text{*The theorem @{term "||A|| = |A|"} *}
+text\<open>The theorem @{term "||A|| = |A|"}\<close>
 lemmas cardinal_idem = cardinal_eqpoll [THEN cardinal_cong, simp]
 
 lemma cardinal_eqE: "|X| = |Y| ==> X \<approx> Y"
@@ -65,7 +65,7 @@
 done
 
 
-subsection {*The relationship between cardinality and le-pollence*}
+subsection \<open>The relationship between cardinality and le-pollence\<close>
 
 lemma Card_le_imp_lepoll:
   assumes "|A| \<le> |B|" shows "A \<lesssim> B"
@@ -113,7 +113,7 @@
   by (blast intro: lt_Ord Card_le_imp_lepoll Ord_cardinal_le le_trans)
 
 
-subsection{*Other Applications of AC*}
+subsection\<open>Other Applications of AC\<close>
 
 lemma surj_implies_inj:
   assumes f: "f \<in> surj(X,Y)" shows "\<exists>g. g \<in> inj(Y,X)"
@@ -129,7 +129,7 @@
     qed
 qed
 
-text{*Kunen's Lemma 10.20*}
+text\<open>Kunen's Lemma 10.20\<close>
 lemma surj_implies_cardinal_le: 
   assumes f: "f \<in> surj(X,Y)" shows "|Y| \<le> |X|"
 proof (rule lepoll_imp_Card_le)
@@ -138,7 +138,7 @@
     by (auto simp add: lepoll_def)
 qed
 
-text{*Kunen's Lemma 10.21*}
+text\<open>Kunen's Lemma 10.21\<close>
 lemma cardinal_UN_le:
   assumes K: "InfCard(K)" 
   shows "(!!i. i\<in>K ==> |X(i)| \<le> K) ==> |\<Union>i\<in>K. X(i)| \<le> K"
@@ -171,14 +171,14 @@
   finally show "(\<Union>i\<in>K. X(i)) \<lesssim> K" .
 qed
 
-text{*The same again, using @{term csucc}*}
+text\<open>The same again, using @{term csucc}\<close>
 lemma cardinal_UN_lt_csucc:
      "[| InfCard(K);  \<And>i. i\<in>K \<Longrightarrow> |X(i)| < csucc(K) |]
       ==> |\<Union>i\<in>K. X(i)| < csucc(K)"
 by (simp add: Card_lt_csucc_iff cardinal_UN_le InfCard_is_Card Card_cardinal)
 
-text{*The same again, for a union of ordinals.  In use, j(i) is a bit like rank(i),
-  the least ordinal j such that i:Vfrom(A,j). *}
+text\<open>The same again, for a union of ordinals.  In use, j(i) is a bit like rank(i),
+  the least ordinal j such that i:Vfrom(A,j).\<close>
 lemma cardinal_UN_Ord_lt_csucc:
      "[| InfCard(K);  \<And>i. i\<in>K \<Longrightarrow> j(i) < csucc(K) |]
       ==> (\<Union>i\<in>K. j(i)) < csucc(K)"
@@ -189,11 +189,11 @@
 done
 
 
-subsection{*The Main Result for Infinite-Branching Datatypes*}
+subsection\<open>The Main Result for Infinite-Branching Datatypes\<close>
 
-text{*As above, but the index set need not be a cardinal. Work
+text\<open>As above, but the index set need not be a cardinal. Work
 backwards along the injection from @{term W} into @{term K}, given
-that @{term"W\<noteq>0"}.*}
+that @{term"W\<noteq>0"}.\<close>
 
 lemma inj_UN_subset:
   assumes f: "f \<in> inj(A,B)" and a: "a \<in> A"
@@ -222,7 +222,7 @@
   note lt_subset_trans [OF _ _ OU, trans]
   show ?thesis
     proof (cases "W=0")
-      case True  --{*solve the easy 0 case*}
+      case True  --\<open>solve the easy 0 case\<close>
       thus ?thesis by (simp add: CK Card_is_Ord Card_csucc Ord_0_lt_csucc)
     next
       case False
--- a/src/ZF/Coind/Language.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Coind/Language.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -6,7 +6,7 @@
 theory Language imports Main begin
 
 
-text{*these really can't be definitions without losing the abstraction*}
+text\<open>these really can't be definitions without losing the abstraction\<close>
 
 axiomatization
   Const :: i  and               (* Abstract type of constants *)
--- a/src/ZF/Constructible/AC_in_L.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/AC_in_L.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,13 +2,13 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {* The Axiom of Choice Holds in L! *}
+section \<open>The Axiom of Choice Holds in L!\<close>
 
 theory AC_in_L imports Formula Separation begin
 
-subsection{*Extending a Wellordering over a List -- Lexicographic Power*}
+subsection\<open>Extending a Wellordering over a List -- Lexicographic Power\<close>
 
-text{*This could be moved into a library.*}
+text\<open>This could be moved into a library.\<close>
 
 consts
   rlist   :: "[i,i]=>i"
@@ -31,13 +31,13 @@
   type_intros list.intros
 
 
-subsubsection{*Type checking*}
+subsubsection\<open>Type checking\<close>
 
 lemmas rlist_type = rlist.dom_subset
 
 lemmas field_rlist = rlist_type [THEN field_rel_subset]
 
-subsubsection{*Linearity*}
+subsubsection\<open>Linearity\<close>
 
 lemma rlist_Nil_Cons [intro]:
     "[|a \<in> A; l \<in> list(A)|] ==> <[], Cons(a,l)> \<in> rlist(A, r)"
@@ -64,7 +64,7 @@
           done
       }
       note yConsCase = this
-      show ?case using `ys \<in> list(A)`
+      show ?case using \<open>ys \<in> list(A)\<close>
         by (cases rule: list.cases) (simp_all add: Cons rlist_Nil_Cons yConsCase) 
     qed
   }
@@ -72,9 +72,9 @@
 qed
 
 
-subsubsection{*Well-foundedness*}
+subsubsection\<open>Well-foundedness\<close>
 
-text{*Nothing preceeds Nil in this ordering.*}
+text\<open>Nothing preceeds Nil in this ordering.\<close>
 inductive_cases rlist_NilE: " <l,[]> \<in> rlist(A,r)"
 
 inductive_cases rlist_ConsE: " <l', Cons(x,l)> \<in> rlist(A,r)"
@@ -139,9 +139,9 @@
 done
 
 
-subsection{*An Injection from Formulas into the Natural Numbers*}
+subsection\<open>An Injection from Formulas into the Natural Numbers\<close>
 
-text{*There is a well-known bijection between @{term "nat*nat"} and @{term
+text\<open>There is a well-known bijection between @{term "nat*nat"} and @{term
 nat} given by the expression f(m,n) = triangle(m+n) + m, where triangle(k)
 enumerates the triangular numbers and can be defined by triangle(0)=0,
 triangle(succ(k)) = succ(k + triangle(k)).  Some small amount of effort is
@@ -151,10 +151,10 @@
 However, this result merely states that there is a bijection between the two
 sets.  It provides no means of naming a specific bijection.  Therefore, we
 conduct the proofs under the assumption that a bijection exists.  The simplest
-way to organize this is to use a locale.*}
+way to organize this is to use a locale.\<close>
 
-text{*Locale for any arbitrary injection between @{term "nat*nat"}
-      and @{term nat}*}
+text\<open>Locale for any arbitrary injection between @{term "nat*nat"}
+      and @{term nat}\<close>
 locale Nat_Times_Nat =
   fixes fn
   assumes fn_inj: "fn \<in> inj(nat*nat, nat)"
@@ -214,7 +214,7 @@
     InfCard_nat [THEN InfCard_square_eqpoll, THEN eqpoll_imp_lepoll]
 
 
-text{*Not needed--but interesting?*}
+text\<open>Not needed--but interesting?\<close>
 theorem formula_lepoll_nat: "formula \<lesssim> nat"
 apply (insert nat_times_nat_lepoll_nat)
 apply (unfold lepoll_def)
@@ -222,31 +222,31 @@
 done
 
 
-subsection{*Defining the Wellordering on @{term "DPow(A)"}*}
+subsection\<open>Defining the Wellordering on @{term "DPow(A)"}\<close>
 
-text{*The objective is to build a wellordering on @{term "DPow(A)"} from a
+text\<open>The objective is to build a wellordering on @{term "DPow(A)"} from a
 given one on @{term A}.  We first introduce wellorderings for environments,
 which are lists built over @{term "A"}.  We combine it with the enumeration of
 formulas.  The order type of the resulting wellordering gives us a map from
 (environment, formula) pairs into the ordinals.  For each member of @{term
-"DPow(A)"}, we take the minimum such ordinal.*}
+"DPow(A)"}, we take the minimum such ordinal.\<close>
 
 definition
   env_form_r :: "[i,i,i]=>i" where
-    --{*wellordering on (environment, formula) pairs*}
+    --\<open>wellordering on (environment, formula) pairs\<close>
    "env_form_r(f,r,A) ==
       rmult(list(A), rlist(A, r),
             formula, measure(formula, enum(f)))"
 
 definition
   env_form_map :: "[i,i,i,i]=>i" where
-    --{*map from (environment, formula) pairs to ordinals*}
+    --\<open>map from (environment, formula) pairs to ordinals\<close>
    "env_form_map(f,r,A,z)
       == ordermap(list(A) * formula, env_form_r(f,r,A)) ` z"
 
 definition
   DPow_ord :: "[i,i,i,i,i]=>o" where
-    --{*predicate that holds if @{term k} is a valid index for @{term X}*}
+    --\<open>predicate that holds if @{term k} is a valid index for @{term X}\<close>
    "DPow_ord(f,r,A,X,k) ==
            \<exists>env \<in> list(A). \<exists>p \<in> formula.
              arity(p) \<le> succ(length(env)) &
@@ -255,12 +255,12 @@
 
 definition
   DPow_least :: "[i,i,i,i]=>i" where
-    --{*function yielding the smallest index for @{term X}*}
+    --\<open>function yielding the smallest index for @{term X}\<close>
    "DPow_least(f,r,A,X) == \<mu> k. DPow_ord(f,r,A,X,k)"
 
 definition
   DPow_r :: "[i,i,i]=>i" where
-    --{*a wellordering on @{term "DPow(A)"}*}
+    --\<open>a wellordering on @{term "DPow(A)"}\<close>
    "DPow_r(f,r,A) == measure(DPow(A), DPow_least(f,r,A))"
 
 
@@ -324,16 +324,16 @@
 by (simp add: DPow_r_def measure_def, blast)
 
 
-subsection{*Limit Construction for Well-Orderings*}
+subsection\<open>Limit Construction for Well-Orderings\<close>
 
-text{*Now we work towards the transfinite definition of wellorderings for
+text\<open>Now we work towards the transfinite definition of wellorderings for
 @{term "Lset(i)"}.  We assume as an inductive hypothesis that there is a family
-of wellorderings for smaller ordinals.*}
+of wellorderings for smaller ordinals.\<close>
 
 definition
   rlimit :: "[i,i=>i]=>i" where
-  --{*Expresses the wellordering at limit ordinals.  The conditional
-      lets us remove the premise @{term "Limit(i)"} from some theorems.*}
+  --\<open>Expresses the wellordering at limit ordinals.  The conditional
+      lets us remove the premise @{term "Limit(i)"} from some theorems.\<close>
     "rlimit(i,r) ==
        if Limit(i) then 
          {z: Lset(i) * Lset(i).
@@ -344,8 +344,8 @@
 
 definition
   Lset_new :: "i=>i" where
-  --{*This constant denotes the set of elements introduced at level
-      @{term "succ(i)"}*}
+  --\<open>This constant denotes the set of elements introduced at level
+      @{term "succ(i)"}\<close>
     "Lset_new(i) == {x \<in> Lset(succ(i)). lrank(x) = i}"
 
 lemma Limit_Lset_eq2:
@@ -412,7 +412,7 @@
 done
 
 
-subsection{*Transfinite Definition of the Wellordering on @{term "L"}*}
+subsection\<open>Transfinite Definition of the Wellordering on @{term "L"}\<close>
 
 definition
   L_r :: "[i, i] => i" where
@@ -420,15 +420,15 @@
       transrec3(i, 0, \<lambda>x r. DPow_r(f, r, Lset(x)), 
                 \<lambda>x r. rlimit(x, \<lambda>y. r`y))"
 
-subsubsection{*The Corresponding Recursion Equations*}
+subsubsection\<open>The Corresponding Recursion Equations\<close>
 lemma [simp]: "L_r(f,0) = 0"
 by (simp add: L_r_def)
 
 lemma [simp]: "L_r(f, succ(i)) = DPow_r(f, L_r(f,i), Lset(i))"
 by (simp add: L_r_def)
 
-text{*The limit case is non-trivial because of the distinction between
-object-level and meta-level abstraction.*}
+text\<open>The limit case is non-trivial because of the distinction between
+object-level and meta-level abstraction.\<close>
 lemma [simp]: "Limit(i) ==> L_r(f,i) = rlimit(i, L_r(f))"
 by (simp cong: rlimit_cong add: transrec3_Limit L_r_def ltD)
 
@@ -454,8 +454,8 @@
 done
 
 
-text{*Every constructible set is well-ordered! Therefore the Wellordering Theorem and
-      the Axiom of Choice hold in @{term L}!!*}
+text\<open>Every constructible set is well-ordered! Therefore the Wellordering Theorem and
+      the Axiom of Choice hold in @{term L}!!\<close>
 theorem L_implies_AC: assumes x: "L(x)" shows "\<exists>r. well_ord(x,r)"
   using Transset_Lset x
 apply (simp add: Transset_def L_def)
@@ -474,8 +474,8 @@
     by (blast intro: well_ord_imp_relativized)
 qed
 
-text{*In order to prove @{term" \<exists>r[L]. wellordered(L,x,r)"}, it's necessary to know 
+text\<open>In order to prove @{term" \<exists>r[L]. wellordered(L,x,r)"}, it's necessary to know 
 that @{term r} is actually constructible. It follows from the assumption ``@{term V} equals @{term L''}, 
-but this reasoning doesn't appear to work in Isabelle.*}
+but this reasoning doesn't appear to work in Isabelle.\<close>
 
 end
--- a/src/ZF/Constructible/DPow_absolute.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/DPow_absolute.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,18 +2,18 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Absoluteness for the Definable Powerset Function*}
+section \<open>Absoluteness for the Definable Powerset Function\<close>
 
 
 theory DPow_absolute imports Satisfies_absolute begin
 
 
-subsection{*Preliminary Internalizations*}
+subsection\<open>Preliminary Internalizations\<close>
 
-subsubsection{*The Operator @{term is_formula_rec}*}
+subsubsection\<open>The Operator @{term is_formula_rec}\<close>
 
-text{*The three arguments of @{term p} are always 2, 1, 0.  It is buried
-   within 11 quantifiers!!*}
+text\<open>The three arguments of @{term p} are always 2, 1, 0.  It is buried
+   within 11 quantifiers!!\<close>
 
 (* is_formula_rec :: "[i=>o, [i,i,i]=>o, i, i] => o"
    "is_formula_rec(M,MH,p,z)  ==
@@ -77,7 +77,7 @@
 done
 
 
-subsubsection{*The Operator @{term is_satisfies}*}
+subsubsection\<open>The Operator @{term is_satisfies}\<close>
 
 (* is_satisfies(M,A,p,z) == is_formula_rec (M, satisfies_MH(M,A), p, z) *)
 definition
@@ -109,7 +109,7 @@
 done
 
 
-subsection {*Relativization of the Operator @{term DPow'}*}
+subsection \<open>Relativization of the Operator @{term DPow'}\<close>
 
 lemma DPow'_eq: 
   "DPow'(A) = {z . ep \<in> list(A) * formula, 
@@ -118,8 +118,8 @@
 by (simp add: DPow'_def, blast) 
 
 
-text{*Relativize the use of @{term sats} within @{term DPow'}
-(the comprehension).*}
+text\<open>Relativize the use of @{term sats} within @{term DPow'}
+(the comprehension).\<close>
 definition
   is_DPow_sats :: "[i=>o,i,i,i,i] => o" where
    "is_DPow_sats(M,A,env,p,x) ==
@@ -142,7 +142,7 @@
 by (simp add: DPow_sats_abs transM [of _ A])
 
 
-subsubsection{*The Operator @{term is_DPow_sats}, Internalized*}
+subsubsection\<open>The Operator @{term is_DPow_sats}, Internalized\<close>
 
 (* is_DPow_sats(M,A,env,p,x) ==
       \<forall>n1[M]. \<forall>e[M]. \<forall>sp[M]. 
@@ -184,7 +184,7 @@
 done
 
 
-subsection{*A Locale for Relativizing the Operator @{term DPow'}*}
+subsection\<open>A Locale for Relativizing the Operator @{term DPow'}\<close>
 
 locale M_DPow = M_satisfies +
  assumes sep:
@@ -219,7 +219,7 @@
 apply (fast intro: rep' sep' univalent_pair_eq)  
 done
 
-text{*Relativization of the Operator @{term DPow'}*}
+text\<open>Relativization of the Operator @{term DPow'}\<close>
 definition 
   is_DPow' :: "[i=>o,i,i] => o" where
     "is_DPow'(M,A,Z) == 
@@ -238,9 +238,9 @@
 done
 
 
-subsection{*Instantiating the Locale @{text M_DPow}*}
+subsection\<open>Instantiating the Locale @{text M_DPow}\<close>
 
-subsubsection{*The Instance of Separation*}
+subsubsection\<open>The Instance of Separation\<close>
 
 lemma DPow_separation:
     "[| L(A); env \<in> list(A); p \<in> formula |]
@@ -253,7 +253,7 @@
 
 
 
-subsubsection{*The Instance of Replacement*}
+subsubsection\<open>The Instance of Replacement\<close>
 
 lemma DPow_replacement_Reflects:
  "REFLECTS [\<lambda>x. \<exists>u[L]. u \<in> B &
@@ -287,7 +287,7 @@
 done
 
 
-subsubsection{*Actually Instantiating the Locale*}
+subsubsection\<open>Actually Instantiating the Locale\<close>
 
 lemma M_DPow_axioms_L: "M_DPow_axioms(L)"
   apply (rule M_DPow_axioms.intro)
@@ -304,10 +304,10 @@
   and DPow'_abs [intro, simp] = M_DPow.DPow'_abs [OF M_DPow_L]
 
 
-subsubsection{*The Operator @{term is_Collect}*}
+subsubsection\<open>The Operator @{term is_Collect}\<close>
 
-text{*The formula @{term is_P} has one free variable, 0, and it is
-enclosed within a single quantifier.*}
+text\<open>The formula @{term is_P} has one free variable, 0, and it is
+enclosed within a single quantifier.\<close>
 
 (* is_Collect :: "[i=>o,i,i=>o,i] => o"
     "is_Collect(M,A,P,z) == \<forall>x[M]. x \<in> z \<longleftrightarrow> x \<in> A & P(x)" *)
@@ -342,8 +342,8 @@
 by (simp add: sats_Collect_fm [OF is_P_iff_sats])
 
 
-text{*The second argument of @{term is_P} gives it direct access to @{term x},
-  which is essential for handling free variable references.*}
+text\<open>The second argument of @{term is_P} gives it direct access to @{term x},
+  which is essential for handling free variable references.\<close>
 theorem Collect_reflection:
   assumes is_P_reflection:
     "!!h f g. REFLECTS[\<lambda>x. is_P(L, f(x), g(x)),
@@ -355,10 +355,10 @@
 done
 
 
-subsubsection{*The Operator @{term is_Replace}*}
+subsubsection\<open>The Operator @{term is_Replace}\<close>
 
-text{*BEWARE!  The formula @{term is_P} has free variables 0, 1
- and not the usual 1, 0!  It is enclosed within two quantifiers.*}
+text\<open>BEWARE!  The formula @{term is_P} has free variables 0, 1
+ and not the usual 1, 0!  It is enclosed within two quantifiers.\<close>
 
 (*  is_Replace :: "[i=>o,i,[i,i]=>o,i] => o"
     "is_Replace(M,A,P,z) == \<forall>u[M]. u \<in> z \<longleftrightarrow> (\<exists>x[M]. x\<in>A & P(x,u))" *)
@@ -395,8 +395,8 @@
 by (simp add: sats_Replace_fm [OF is_P_iff_sats])
 
 
-text{*The second argument of @{term is_P} gives it direct access to @{term x},
-  which is essential for handling free variable references.*}
+text\<open>The second argument of @{term is_P} gives it direct access to @{term x},
+  which is essential for handling free variable references.\<close>
 theorem Replace_reflection:
   assumes is_P_reflection:
     "!!h f g. REFLECTS[\<lambda>x. is_P(L, f(x), g(x), h(x)),
@@ -409,7 +409,7 @@
 
 
 
-subsubsection{*The Operator @{term is_DPow'}, Internalized*}
+subsubsection\<open>The Operator @{term is_DPow'}, Internalized\<close>
 
 (*  "is_DPow'(M,A,Z) == 
        \<forall>X[M]. X \<in> Z \<longleftrightarrow> 
@@ -454,7 +454,7 @@
 done
 
 
-subsection{*A Locale for Relativizing the Operator @{term Lset}*}
+subsection\<open>A Locale for Relativizing the Operator @{term Lset}\<close>
 
 definition
   transrec_body :: "[i=>o,i,i,i,i] => o" where
@@ -506,13 +506,13 @@
 done
 
 
-text{*Relativization of the Operator @{term Lset}*}
+text\<open>Relativization of the Operator @{term Lset}\<close>
 
 definition
   is_Lset :: "[i=>o, i, i] => o" where
-   --{*We can use the term language below because @{term is_Lset} will
+   --\<open>We can use the term language below because @{term is_Lset} will
        not have to be internalized: it isn't used in any instance of
-       separation.*}
+       separation.\<close>
    "is_Lset(M,a,z) == is_transrec(M, %x f u. u = (\<Union>y\<in>x. DPow'(f`y)), a, z)"
 
 lemma (in M_Lset) Lset_abs:
@@ -531,9 +531,9 @@
 done
 
 
-subsection{*Instantiating the Locale @{text M_Lset}*}
+subsection\<open>Instantiating the Locale @{text M_Lset}\<close>
 
-subsubsection{*The First Instance of Replacement*}
+subsubsection\<open>The First Instance of Replacement\<close>
 
 lemma strong_rep_Reflects:
  "REFLECTS [\<lambda>u. \<exists>v[L]. v \<in> B & (\<exists>gy[L].
@@ -553,7 +553,7 @@
 done
 
 
-subsubsection{*The Second Instance of Replacement*}
+subsubsection\<open>The Second Instance of Replacement\<close>
 
 lemma transrec_rep_Reflects:
  "REFLECTS [\<lambda>x. \<exists>v[L]. v \<in> B &
@@ -570,8 +570,8 @@
                       is_DPow'(##Lset(i),gy,z), r) & 
                       big_union(##Lset(i),r,u), mr, v, y))]" 
 apply (simp only: rex_setclass_is_bex [symmetric])
-  --{*Convert @{text "\<exists>y\<in>Lset(i)"} to @{text "\<exists>y[##Lset(i)]"} within the body
-       of the @{term is_wfrec} application. *}
+  --\<open>Convert @{text "\<exists>y\<in>Lset(i)"} to @{text "\<exists>y[##Lset(i)]"} within the body
+       of the @{term is_wfrec} application.\<close>
 apply (intro FOL_reflections function_reflections 
           is_wfrec_reflection Replace_reflection DPow'_reflection) 
 done
@@ -593,7 +593,7 @@
 done
 
 
-subsubsection{*Actually Instantiating @{text M_Lset}*}
+subsubsection\<open>Actually Instantiating @{text M_Lset}\<close>
 
 lemma M_Lset_axioms_L: "M_Lset_axioms(L)"
   apply (rule M_Lset_axioms.intro)
@@ -606,12 +606,12 @@
   apply (rule M_Lset_axioms_L) 
   done
 
-text{*Finally: the point of the whole theory!*}
+text\<open>Finally: the point of the whole theory!\<close>
 lemmas Lset_closed = M_Lset.Lset_closed [OF M_Lset_L]
    and Lset_abs = M_Lset.Lset_abs [OF M_Lset_L]
 
 
-subsection{*The Notion of Constructible Set*}
+subsection\<open>The Notion of Constructible Set\<close>
 
 definition
   constructible :: "[i=>o,i] => o" where
--- a/src/ZF/Constructible/Datatype_absolute.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Datatype_absolute.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,12 +2,12 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Absoluteness Properties for Recursive Datatypes*}
+section \<open>Absoluteness Properties for Recursive Datatypes\<close>
 
 theory Datatype_absolute imports Formula WF_absolute begin
 
 
-subsection{*The lfp of a continuous function can be expressed as a union*}
+subsection\<open>The lfp of a continuous function can be expressed as a union\<close>
 
 definition
   directed :: "i=>o" where
@@ -78,7 +78,7 @@
           intro: lfp_subset_Union Union_subset_lfp)
 
 
-subsubsection{*Some Standard Datatype Constructions Preserve Continuity*}
+subsubsection\<open>Some Standard Datatype Constructions Preserve Continuity\<close>
 
 lemma contin_imp_mono: "[|X\<subseteq>Y; contin(F)|] ==> F(X) \<subseteq> F(Y)"
 apply (simp add: contin_def) 
@@ -111,7 +111,7 @@
 
 
 
-subsection {*Absoluteness for "Iterates"*}
+subsection \<open>Absoluteness for "Iterates"\<close>
 
 definition
   iterates_MH :: "[i=>o, [i,i]=>o, i, i, i, i] => o" where
@@ -169,7 +169,7 @@
 done
 
 
-subsection {*lists without univ*}
+subsection \<open>lists without univ\<close>
 
 lemmas datatype_univs = Inl_in_univ Inr_in_univ 
                         Pair_in_univ nat_into_univ A_into_univ 
@@ -184,7 +184,7 @@
 lemma list_fun_contin: "contin(\<lambda>X. {0} + A*X)"
 by (intro sum_contin prod_contin id_contin const_contin) 
 
-text{*Re-expresses lists using sum and product*}
+text\<open>Re-expresses lists using sum and product\<close>
 lemma list_eq_lfp2: "list(A) = lfp(univ(A), \<lambda>X. {0} + A*X)"
 apply (simp add: list_def) 
 apply (rule equalityI) 
@@ -193,7 +193,7 @@
  apply (clarify, subst lfp_unfold [OF list_fun_bnd_mono])
  apply (simp add: Nil_def Cons_def)
  apply blast 
-txt{*Opposite inclusion*}
+txt\<open>Opposite inclusion\<close>
 apply (rule lfp_lowerbound) 
  prefer 2 apply (rule lfp_subset) 
 apply (clarify, subst lfp_unfold [OF list.bnd_mono]) 
@@ -202,7 +202,7 @@
              dest: lfp_subset [THEN subsetD])
 done
 
-text{*Re-expresses lists using "iterates", no univ.*}
+text\<open>Re-expresses lists using "iterates", no univ.\<close>
 lemma list_eq_Union:
      "list(A) = (\<Union>n\<in>nat. (\<lambda>X. {0} + A*X) ^ n (0))"
 by (simp add: list_eq_lfp2 lfp_eq_Union list_fun_bnd_mono list_fun_contin)
@@ -219,7 +219,7 @@
 by (simp add: is_list_functor_def singleton_0 nat_into_M)
 
 
-subsection {*formulas without univ*}
+subsection \<open>formulas without univ\<close>
 
 lemma formula_fun_bnd_mono:
      "bnd_mono(univ(0), \<lambda>X. ((nat*nat) + (nat*nat)) + (X*X + X))"
@@ -234,7 +234,7 @@
 by (intro sum_contin prod_contin id_contin const_contin) 
 
 
-text{*Re-expresses formulas using sum and product*}
+text\<open>Re-expresses formulas using sum and product\<close>
 lemma formula_eq_lfp2:
     "formula = lfp(univ(0), \<lambda>X. ((nat*nat) + (nat*nat)) + (X*X + X))"
 apply (simp add: formula_def) 
@@ -244,7 +244,7 @@
  apply (clarify, subst lfp_unfold [OF formula_fun_bnd_mono])
  apply (simp add: Member_def Equal_def Nand_def Forall_def)
  apply blast 
-txt{*Opposite inclusion*}
+txt\<open>Opposite inclusion\<close>
 apply (rule lfp_lowerbound) 
  prefer 2 apply (rule lfp_subset, clarify) 
 apply (subst lfp_unfold [OF formula.bnd_mono, simplified]) 
@@ -253,7 +253,7 @@
 apply (blast intro: datatype_univs dest: lfp_subset [THEN subsetD])+  
 done
 
-text{*Re-expresses formulas using "iterates", no univ.*}
+text\<open>Re-expresses formulas using "iterates", no univ.\<close>
 lemma formula_eq_Union:
      "formula = 
       (\<Union>n\<in>nat. (\<lambda>X. ((nat*nat) + (nat*nat)) + (X*X + X)) ^ n (0))"
@@ -277,7 +277,7 @@
 by (simp add: is_formula_functor_def) 
 
 
-subsection{*@{term M} Contains the List and Formula Datatypes*}
+subsection\<open>@{term M} Contains the List and Formula Datatypes\<close>
 
 definition
   list_N :: "[i,i] => i" where
@@ -290,8 +290,8 @@
      "Cons(a,l) \<in> list_N(A,succ(n)) \<longleftrightarrow> a\<in>A & l \<in> list_N(A,n)"
 by (simp add: list_N_def Cons_def) 
 
-text{*These two aren't simprules because they reveal the underlying
-list representation.*}
+text\<open>These two aren't simprules because they reveal the underlying
+list representation.\<close>
 lemma list_N_0: "list_N(A,0) = 0"
 by (simp add: list_N_def)
 
@@ -325,8 +325,8 @@
 apply (blast intro: list_imp_list_N) 
 done
   
-text{*Express @{term list_rec} without using @{term rank} or @{term Vset},
-neither of which is absolute.*}
+text\<open>Express @{term list_rec} without using @{term rank} or @{term Vset},
+neither of which is absolute.\<close>
 lemma (in M_trivial) list_rec_eq:
   "l \<in> list(A) ==>
    list_rec(a,g,l) = 
@@ -356,7 +356,7 @@
   is_list :: "[i=>o,i,i] => o" where
     "is_list(M,A,Z) == \<forall>l[M]. l \<in> Z \<longleftrightarrow> mem_list(M,A,l)"
 
-subsubsection{*Towards Absoluteness of @{term formula_rec}*}
+subsubsection\<open>Towards Absoluteness of @{term formula_rec}\<close>
 
 consts   depth :: "i=>i"
 primrec
@@ -389,8 +389,8 @@
      "Forall(x) \<in> formula_N(succ(n)) \<longleftrightarrow> x \<in> formula_N(n)"
 by (simp add: formula_N_def Forall_def) 
 
-text{*These two aren't simprules because they reveal the underlying
-formula representation.*}
+text\<open>These two aren't simprules because they reveal the underlying
+formula representation.\<close>
 lemma formula_N_0: "formula_N(0) = 0"
 by (simp add: formula_N_def)
 
@@ -430,7 +430,7 @@
 done
 
 
-text{*This result and the next are unused.*}
+text\<open>This result and the next are unused.\<close>
 lemma formula_N_mono [rule_format]:
   "[| m \<in> nat; n \<in> nat |] ==> m\<le>n \<longrightarrow> formula_N(m) \<subseteq> formula_N(n)"
 apply (rule_tac m = m and n = n in diff_induct)
@@ -476,7 +476,7 @@
    "M(l) ==> iterates_replacement(M, %l t. is_tl(M,l,t), l)"
 
 
-subsubsection{*Absoluteness of the List Construction*}
+subsubsection\<open>Absoluteness of the List Construction\<close>
 
 lemma (in M_datatypes) list_replacement2':
   "M(A) ==> strong_replacement(M, \<lambda>n y. n\<in>nat & y = (\<lambda>X. {0} + A * X)^n (0))"
@@ -493,7 +493,7 @@
                list_replacement2' relation1_def
                iterates_closed [of "is_list_functor(M,A)"])
 
-text{*WARNING: use only with @{text "dest:"} or with variables fixed!*}
+text\<open>WARNING: use only with @{text "dest:"} or with variables fixed!\<close>
 lemmas (in M_datatypes) list_into_M = transM [OF _ list_closed]
 
 lemma (in M_datatypes) list_N_abs [simp]:
@@ -524,7 +524,7 @@
 apply (rule M_equalityI, simp_all)
 done
 
-subsubsection{*Absoluteness of Formulas*}
+subsubsection\<open>Absoluteness of Formulas\<close>
 
 lemma (in M_datatypes) formula_replacement2':
   "strong_replacement(M, \<lambda>n y. n\<in>nat & y = (\<lambda>X. ((nat*nat) + (nat*nat)) + (X*X + X))^n (0))"
@@ -574,9 +574,9 @@
 done
 
 
-subsection{*Absoluteness for @{text \<epsilon>}-Closure: the @{term eclose} Operator*}
+subsection\<open>Absoluteness for @{text \<epsilon>}-Closure: the @{term eclose} Operator\<close>
 
-text{*Re-expresses eclose using "iterates"*}
+text\<open>Re-expresses eclose using "iterates"\<close>
 lemma eclose_eq_Union:
      "eclose(A) = (\<Union>n\<in>nat. Union^n (A))"
 apply (simp add: eclose_def)
@@ -645,9 +645,9 @@
 done
 
 
-subsection {*Absoluteness for @{term transrec}*}
+subsection \<open>Absoluteness for @{term transrec}\<close>
 
-text{* @{prop "transrec(a,H) \<equiv> wfrec(Memrel(eclose({a})), a, H)"} *}
+text\<open>@{prop "transrec(a,H) \<equiv> wfrec(Memrel(eclose({a})), a, H)"}\<close>
 
 definition
   is_transrec :: "[i=>o, [i,i,i]=>o, i, i] => o" where
@@ -663,9 +663,9 @@
        upair(M,a,a,sa) & is_eclose(M,sa,esa) & membership(M,esa,mesa) &
        wfrec_replacement(M,MH,mesa)"
 
-text{*The condition @{term "Ord(i)"} lets us use the simpler
+text\<open>The condition @{term "Ord(i)"} lets us use the simpler
   @{text "trans_wfrec_abs"} rather than @{text "trans_wfrec_abs"},
-  which I haven't even proved yet. *}
+  which I haven't even proved yet.\<close>
 theorem (in M_eclose) transrec_abs:
   "[|transrec_replacement(M,MH,i);  relation2(M,MH,H);
      Ord(i);  M(i);  M(z);
@@ -684,7 +684,7 @@
         transrec_def eclose_sing_Ord_eq wf_Memrel trans_Memrel relation_Memrel)
 
 
-text{*Helps to prove instances of @{term transrec_replacement}*}
+text\<open>Helps to prove instances of @{term transrec_replacement}\<close>
 lemma (in M_eclose) transrec_replacementI:
    "[|M(a);
       strong_replacement (M,
@@ -694,8 +694,8 @@
 by (simp add: transrec_replacement_def wfrec_replacement_def)
 
 
-subsection{*Absoluteness for the List Operator @{term length}*}
-text{*But it is never used.*}
+subsection\<open>Absoluteness for the List Operator @{term length}\<close>
+text\<open>But it is never used.\<close>
 
 definition
   is_length :: "[i=>o,i,i,i] => o" where
@@ -714,13 +714,13 @@
              dest: list_N_imp_length_lt)
 done
 
-text{*Proof is trivial since @{term length} returns natural numbers.*}
+text\<open>Proof is trivial since @{term length} returns natural numbers.\<close>
 lemma (in M_trivial) length_closed [intro,simp]:
      "l \<in> list(A) ==> M(length(l))"
 by (simp add: nat_into_M)
 
 
-subsection {*Absoluteness for the List Operator @{term nth}*}
+subsection \<open>Absoluteness for the List Operator @{term nth}\<close>
 
 lemma nth_eq_hd_iterates_tl [rule_format]:
      "xs \<in> list(A) ==> \<forall>n \<in> nat. nth(n,xs) = hd' (tl'^n (xs))"
@@ -737,7 +737,7 @@
 apply (simp add: tl'_Cons tl'_closed)
 done
 
-text{*Immediate by type-checking*}
+text\<open>Immediate by type-checking\<close>
 lemma (in M_datatypes) nth_closed [intro,simp]:
      "[|xs \<in> list(A); n \<in> nat; M(A)|] ==> M(nth(n,xs))"
 apply (case_tac "n < length(xs)")
@@ -761,11 +761,11 @@
 done
 
 
-subsection{*Relativization and Absoluteness for the @{term formula} Constructors*}
+subsection\<open>Relativization and Absoluteness for the @{term formula} Constructors\<close>
 
 definition
   is_Member :: "[i=>o,i,i,i] => o" where
-     --{* because @{term "Member(x,y) \<equiv> Inl(Inl(\<langle>x,y\<rangle>))"}*}
+     --\<open>because @{term "Member(x,y) \<equiv> Inl(Inl(\<langle>x,y\<rangle>))"}\<close>
     "is_Member(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inl(M,p,u) & is_Inl(M,u,Z)"
 
@@ -779,7 +779,7 @@
 
 definition
   is_Equal :: "[i=>o,i,i,i] => o" where
-     --{* because @{term "Equal(x,y) \<equiv> Inl(Inr(\<langle>x,y\<rangle>))"}*}
+     --\<open>because @{term "Equal(x,y) \<equiv> Inl(Inr(\<langle>x,y\<rangle>))"}\<close>
     "is_Equal(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inr(M,p,u) & is_Inl(M,u,Z)"
 
@@ -792,7 +792,7 @@
 
 definition
   is_Nand :: "[i=>o,i,i,i] => o" where
-     --{* because @{term "Nand(x,y) \<equiv> Inr(Inl(\<langle>x,y\<rangle>))"}*}
+     --\<open>because @{term "Nand(x,y) \<equiv> Inr(Inl(\<langle>x,y\<rangle>))"}\<close>
     "is_Nand(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inl(M,p,u) & is_Inr(M,u,Z)"
 
@@ -805,7 +805,7 @@
 
 definition
   is_Forall :: "[i=>o,i,i] => o" where
-     --{* because @{term "Forall(x) \<equiv> Inr(Inr(p))"}*}
+     --\<open>because @{term "Forall(x) \<equiv> Inr(Inr(p))"}\<close>
     "is_Forall(M,p,Z) == \<exists>u[M]. is_Inr(M,p,u) & is_Inr(M,u,Z)"
 
 lemma (in M_trivial) Forall_abs [simp]:
@@ -817,20 +817,20 @@
 
 
 
-subsection {*Absoluteness for @{term formula_rec}*}
+subsection \<open>Absoluteness for @{term formula_rec}\<close>
 
 definition
   formula_rec_case :: "[[i,i]=>i, [i,i]=>i, [i,i,i,i]=>i, [i,i]=>i, i, i] => i" where
-    --{* the instance of @{term formula_case} in @{term formula_rec}*}
+    --\<open>the instance of @{term formula_case} in @{term formula_rec}\<close>
    "formula_rec_case(a,b,c,d,h) ==
         formula_case (a, b,
                 \<lambda>u v. c(u, v, h ` succ(depth(u)) ` u,
                               h ` succ(depth(v)) ` v),
                 \<lambda>u. d(u, h ` succ(depth(u)) ` u))"
 
-text{*Unfold @{term formula_rec} to @{term formula_rec_case}.
+text\<open>Unfold @{term formula_rec} to @{term formula_rec_case}.
      Express @{term formula_rec} without using @{term rank} or @{term Vset},
-neither of which is absolute.*}
+neither of which is absolute.\<close>
 lemma (in M_trivial) formula_rec_eq:
   "p \<in> formula ==>
    formula_rec(a,b,c,d,p) =
@@ -838,20 +838,20 @@
              \<lambda>x h. Lambda (formula, formula_rec_case(a,b,c,d,h))) ` p"
 apply (simp add: formula_rec_case_def)
 apply (induct_tac p)
-   txt{*Base case for @{term Member}*}
+   txt\<open>Base case for @{term Member}\<close>
    apply (subst transrec, simp add: formula.intros)
-  txt{*Base case for @{term Equal}*}
+  txt\<open>Base case for @{term Equal}\<close>
   apply (subst transrec, simp add: formula.intros)
- txt{*Inductive step for @{term Nand}*}
+ txt\<open>Inductive step for @{term Nand}\<close>
  apply (subst transrec)
  apply (simp add: succ_Un_distrib formula.intros)
-txt{*Inductive step for @{term Forall}*}
+txt\<open>Inductive step for @{term Forall}\<close>
 apply (subst transrec)
 apply (simp add: formula_imp_formula_N formula.intros)
 done
 
 
-subsubsection{*Absoluteness for the Formula Operator @{term depth}*}
+subsubsection\<open>Absoluteness for the Formula Operator @{term depth}\<close>
 
 definition
   is_depth :: "[i=>o,i,i] => o" where
@@ -870,18 +870,18 @@
              dest: formula_N_imp_depth_lt)
 done
 
-text{*Proof is trivial since @{term depth} returns natural numbers.*}
+text\<open>Proof is trivial since @{term depth} returns natural numbers.\<close>
 lemma (in M_trivial) depth_closed [intro,simp]:
      "p \<in> formula ==> M(depth(p))"
 by (simp add: nat_into_M)
 
 
-subsubsection{*@{term is_formula_case}: relativization of @{term formula_case}*}
+subsubsection\<open>@{term is_formula_case}: relativization of @{term formula_case}\<close>
 
 definition
  is_formula_case ::
     "[i=>o, [i,i,i]=>o, [i,i,i]=>o, [i,i,i]=>o, [i,i]=>o, i, i] => o" where
-  --{*no constraint on non-formulas*}
+  --\<open>no constraint on non-formulas\<close>
   "is_formula_case(M, is_a, is_b, is_c, is_d, p, z) ==
       (\<forall>x[M]. \<forall>y[M]. finite_ordinal(M,x) \<longrightarrow> finite_ordinal(M,y) \<longrightarrow>
                       is_Member(M,x,y,p) \<longrightarrow> is_a(x,y,z)) &
@@ -911,18 +911,18 @@
 by (erule formula.cases, simp_all)
 
 
-subsubsection {*Absoluteness for @{term formula_rec}: Final Results*}
+subsubsection \<open>Absoluteness for @{term formula_rec}: Final Results\<close>
 
 definition
   is_formula_rec :: "[i=>o, [i,i,i]=>o, i, i] => o" where
-    --{* predicate to relativize the functional @{term formula_rec}*}
+    --\<open>predicate to relativize the functional @{term formula_rec}\<close>
    "is_formula_rec(M,MH,p,z)  ==
       \<exists>dp[M]. \<exists>i[M]. \<exists>f[M]. finite_ordinal(M,dp) & is_depth(M,p,dp) &
              successor(M,dp,i) & fun_apply(M,f,p,z) & is_transrec(M,MH,i,f)"
 
 
-text{*Sufficient conditions to relativize the instance of @{term formula_case}
-      in @{term formula_rec}*}
+text\<open>Sufficient conditions to relativize the instance of @{term formula_case}
+      in @{term formula_rec}\<close>
 lemma (in M_datatypes) Relation1_formula_rec_case:
      "[|Relation2(M, nat, nat, is_a, a);
         Relation2(M, nat, nat, is_b, b);
@@ -939,9 +939,9 @@
 done
 
 
-text{*This locale packages the premises of the following theorems,
+text\<open>This locale packages the premises of the following theorems,
       which is the normal purpose of locales.  It doesn't accumulate
-      constraints on the class @{term M}, as in most of this deveopment.*}
+      constraints on the class @{term M}, as in most of this deveopment.\<close>
 locale Formula_Rec = M_eclose +
   fixes a and is_a and b and is_b and c and is_c and d and is_d and MH
   defines
@@ -995,7 +995,7 @@
 by (simp add: transrec_closed [OF fr_replace MH_rel2]
               nat_into_M formula_rec_lam_closed)
 
-text{*The main two results: @{term formula_rec} is absolute for @{term M}.*}
+text\<open>The main two results: @{term formula_rec} is absolute for @{term M}.\<close>
 theorem (in Formula_Rec) formula_rec_closed:
     "p \<in> formula ==> M(formula_rec(a,b,c,d,p))"
 by (simp add: formula_rec_eq fr_transrec_closed
--- a/src/ZF/Constructible/Formula.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Formula.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,14 +2,14 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {* First-Order Formulas and the Definition of the Class L *}
+section \<open>First-Order Formulas and the Definition of the Class L\<close>
 
 theory Formula imports Main begin
 
-subsection{*Internalized formulas of FOL*}
+subsection\<open>Internalized formulas of FOL\<close>
 
-text{*De Bruijn representation.
-  Unbound variables get their denotations from an environment.*}
+text\<open>De Bruijn representation.
+  Unbound variables get their denotations from an environment.\<close>
 
 consts   formula :: i
 datatype
@@ -109,7 +109,7 @@
 
 declare satisfies.simps [simp del]
 
-subsection{*Dividing line between primitive and derived connectives*}
+subsection\<open>Dividing line between primitive and derived connectives\<close>
 
 lemma sats_Neg_iff [simp]:
   "env \<in> list(A)
@@ -142,7 +142,7 @@
 by (simp add: Exists_def)
 
 
-subsubsection{*Derived rules to help build up formulas*}
+subsubsection\<open>Derived rules to help build up formulas\<close>
 
 lemma mem_iff_sats:
       "[| nth(i,env) = x; nth(j,env) = y; env \<in> list(A)|]
@@ -195,7 +195,7 @@
         bex_iff_sats
 
 
-subsection{*Arity of a Formula: Maximum Free de Bruijn Index*}
+subsection\<open>Arity of a Formula: Maximum Free de Bruijn Index\<close>
 
 consts   arity :: "i=>i"
 primrec
@@ -249,7 +249,7 @@
 done
 
 
-subsection{*Renaming Some de Bruijn Variables*}
+subsection\<open>Renaming Some de Bruijn Variables\<close>
 
 definition
   incr_var :: "[i,i]=>i" where
@@ -284,9 +284,9 @@
 lemma incr_bv_type [TC]: "p \<in> formula ==> incr_bv(p) \<in> nat -> formula"
 by (induct_tac p, simp_all)
 
-text{*Obviously, @{term DPow} is closed under complements and finite
+text\<open>Obviously, @{term DPow} is closed under complements and finite
 intersections and unions.  Needs an inductive lemma to allow two lists of
-parameters to be combined.*}
+parameters to be combined.\<close>
 
 lemma sats_incr_bv_iff [rule_format]:
   "[| p \<in> formula; env \<in> list(A); x \<in> A |]
@@ -325,11 +325,11 @@
                      succ_Un_distrib [symmetric] incr_var_lt incr_var_le
                      Un_commute incr_var_lemma Arith.pred_def nat_imp_quasinat
             split: split_nat_case)
- txt{*the Forall case reduces to linear arithmetic*}
+ txt\<open>the Forall case reduces to linear arithmetic\<close>
  prefer 2
  apply clarify
  apply (blast dest: lt_trans1)
-txt{*left with the And case*}
+txt\<open>left with the And case\<close>
 apply safe
  apply (blast intro: incr_And_lemma lt_trans1)
 apply (subst incr_And_lemma)
@@ -338,7 +338,7 @@
 done
 
 
-subsection{*Renaming all but the First de Bruijn Variable*}
+subsection\<open>Renaming all but the First de Bruijn Variable\<close>
 
 definition
   incr_bv1 :: "i => i" where
@@ -389,9 +389,9 @@
 
 
 
-subsection{*Definable Powerset*}
+subsection\<open>Definable Powerset\<close>
 
-text{*The definable powerset operation: Kunen's definition VI 1.1, page 165.*}
+text\<open>The definable powerset operation: Kunen's definition VI 1.1, page 165.\<close>
 definition
   DPow :: "i => i" where
   "DPow(A) == {X \<in> Pow(A).
@@ -404,7 +404,7 @@
    ==> {x\<in>A. sats(A, p, Cons(x,env))} \<in> DPow(A)"
 by (simp add: DPow_def, blast)
 
-text{*With this rule we can specify @{term p} later.*}
+text\<open>With this rule we can specify @{term p} later.\<close>
 lemma DPowI2 [rule_format]:
   "[|\<forall>x\<in>A. P(x) \<longleftrightarrow> sats(A, p, Cons(x,env));
      env \<in> list(A);  p \<in> formula;  arity(p) \<le> succ(length(env))|]
@@ -482,10 +482,10 @@
 apply (blast intro: cons_in_DPow)
 done
 
-text{*@{term DPow} is not monotonic.  For example, let @{term A} be some
+text\<open>@{term DPow} is not monotonic.  For example, let @{term A} be some
 non-constructible set of natural numbers, and let @{term B} be @{term nat}.
 Then @{term "A<=B"} and obviously @{term "A \<in> DPow(A)"} but @{term "A \<notin>
-DPow(B)"}.*}
+DPow(B)"}.\<close>
 
 (*This may be true but the proof looks difficult, requiring relativization
 lemma DPow_insert: "DPow (cons(a,A)) = DPow(A) \<union> {cons(a,X) . X \<in> DPow(A)}"
@@ -503,17 +503,17 @@
 done
 
 
-subsection{*Internalized Formulas for the Ordinals*}
+subsection\<open>Internalized Formulas for the Ordinals\<close>
 
-text{*The @{text sats} theorems below differ from the usual form in that they
+text\<open>The @{text sats} theorems below differ from the usual form in that they
 include an element of absoluteness.  That is, they relate internalized
 formulas to real concepts such as the subset relation, rather than to the
 relativized concepts defined in theory @{text Relative}.  This lets us prove
 the theorem as @{text Ords_in_DPow} without first having to instantiate the
 locale @{text M_trivial}.  Note that the present theory does not even take
-@{text Relative} as a parent.*}
+@{text Relative} as a parent.\<close>
 
-subsubsection{*The subset relation*}
+subsubsection\<open>The subset relation\<close>
 
 definition
   subset_fm :: "[i,i]=>i" where
@@ -534,7 +534,7 @@
 apply (blast intro: nth_type)
 done
 
-subsubsection{*Transitive sets*}
+subsubsection\<open>Transitive sets\<close>
 
 definition
   transset_fm :: "i=>i" where
@@ -555,7 +555,7 @@
 apply (blast intro: nth_type)
 done
 
-subsubsection{*Ordinals*}
+subsubsection\<open>Ordinals\<close>
 
 definition
   ordinal_fm :: "i=>i" where
@@ -577,8 +577,8 @@
 apply (blast intro: nth_type)
 done
 
-text{*The subset consisting of the ordinals is definable.  Essential lemma for
-@{text Ord_in_Lset}.  This result is the objective of the present subsection.*}
+text\<open>The subset consisting of the ordinals is definable.  Essential lemma for
+@{text Ord_in_Lset}.  This result is the objective of the present subsection.\<close>
 theorem Ords_in_DPow: "Transset(A) ==> {x \<in> A. Ord(x)} \<in> DPow(A)"
 apply (simp add: DPow_def Collect_subset)
 apply (rule_tac x=Nil in bexI)
@@ -587,17 +587,17 @@
 done
 
 
-subsection{* Constant Lset: Levels of the Constructible Universe *}
+subsection\<open>Constant Lset: Levels of the Constructible Universe\<close>
 
 definition
   Lset :: "i=>i" where
   "Lset(i) == transrec(i, %x f. \<Union>y\<in>x. DPow(f`y))"
 
 definition
-  L :: "i=>o" where --{*Kunen's definition VI 1.5, page 167*}
+  L :: "i=>o" where --\<open>Kunen's definition VI 1.5, page 167\<close>
   "L(x) == \<exists>i. Ord(i) & x \<in> Lset(i)"
 
-text{*NOT SUITABLE FOR REWRITING -- RECURSIVE!*}
+text\<open>NOT SUITABLE FOR REWRITING -- RECURSIVE!\<close>
 lemma Lset: "Lset(i) = (\<Union>j\<in>i. DPow(Lset(j)))"
 by (subst Lset_def [THEN def_transrec], simp)
 
@@ -609,7 +609,7 @@
 apply (blast intro: elim: equalityE)
 done
 
-subsubsection{* Transitivity *}
+subsubsection\<open>Transitivity\<close>
 
 lemma elem_subset_in_DPow: "[|X \<in> A; X \<subseteq> A|] ==> X \<in> DPow(A)"
 apply (simp add: Transset_def DPow_def)
@@ -629,7 +629,7 @@
 apply (blast intro: elem_subset_in_DPow dest: DPowD)
 done
 
-text{*Kunen's VI 1.6 (a)*}
+text\<open>Kunen's VI 1.6 (a)\<close>
 lemma Transset_Lset: "Transset(Lset(i))"
 apply (rule_tac a=i in eps_induct)
 apply (subst Lset)
@@ -641,9 +641,9 @@
 apply (simp add: Transset_def)
 done
 
-subsubsection{* Monotonicity *}
+subsubsection\<open>Monotonicity\<close>
 
-text{*Kunen's VI 1.6 (b)*}
+text\<open>Kunen's VI 1.6 (b)\<close>
 lemma Lset_mono [rule_format]:
      "\<forall>j. i<=j \<longrightarrow> Lset(i) \<subseteq> Lset(j)"
 proof (induct i rule: eps_induct, intro allI impI)
@@ -654,7 +654,7 @@
     by (force simp add: Lset [of x] Lset [of j])
 qed
 
-text{*This version lets us remove the premise @{term "Ord(i)"} sometimes.*}
+text\<open>This version lets us remove the premise @{term "Ord(i)"} sometimes.\<close>
 lemma Lset_mono_mem [rule_format]:
      "\<forall>j. i \<in> j \<longrightarrow> Lset(i) \<subseteq> Lset(j)"
 proof (induct i rule: eps_induct, intro allI impI)
@@ -667,11 +667,11 @@
 qed
 
 
-text{*Useful with Reflection to bump up the ordinal*}
+text\<open>Useful with Reflection to bump up the ordinal\<close>
 lemma subset_Lset_ltD: "[|A \<subseteq> Lset(i); i < j|] ==> A \<subseteq> Lset(j)"
 by (blast dest: ltD [THEN Lset_mono_mem])
 
-subsubsection{* 0, successor and limit equations for Lset *}
+subsubsection\<open>0, successor and limit equations for Lset\<close>
 
 lemma Lset_0 [simp]: "Lset(0) = 0"
 by (subst Lset, blast)
@@ -696,17 +696,17 @@
 lemma Lset_Union [simp]: "Lset(\<Union>(X)) = (\<Union>y\<in>X. Lset(y))"
 apply (subst Lset)
 apply (rule equalityI)
- txt{*first inclusion*}
+ txt\<open>first inclusion\<close>
  apply (rule UN_least)
  apply (erule UnionE)
  apply (rule subset_trans)
   apply (erule_tac [2] UN_upper, subst Lset, erule UN_upper)
-txt{*opposite inclusion*}
+txt\<open>opposite inclusion\<close>
 apply (rule UN_least)
 apply (subst Lset, blast)
 done
 
-subsubsection{* Lset applied to Limit ordinals *}
+subsubsection\<open>Lset applied to Limit ordinals\<close>
 
 lemma Limit_Lset_eq:
     "Limit(i) ==> Lset(i) = (\<Union>y\<in>i. Lset(y))"
@@ -726,7 +726,7 @@
 apply (blast intro: ltI  Limit_is_Ord)
 done
 
-subsubsection{* Basic closure properties *}
+subsubsection\<open>Basic closure properties\<close>
 
 lemma zero_in_Lset: "y \<in> x ==> 0 \<in> Lset(x)"
 by (subst Lset, blast intro: empty_in_DPow)
@@ -738,22 +738,22 @@
 done
 
 
-subsection{*Constructible Ordinals: Kunen's VI 1.9 (b)*}
+subsection\<open>Constructible Ordinals: Kunen's VI 1.9 (b)\<close>
 
 lemma Ords_of_Lset_eq: "Ord(i) ==> {x\<in>Lset(i). Ord(x)} = i"
 apply (erule trans_induct3)
   apply (simp_all add: Lset_succ Limit_Lset_eq Limit_Union_eq)
-txt{*The successor case remains.*}
+txt\<open>The successor case remains.\<close>
 apply (rule equalityI)
-txt{*First inclusion*}
+txt\<open>First inclusion\<close>
  apply clarify
  apply (erule Ord_linear_lt, assumption)
    apply (blast dest: DPow_imp_subset ltD notE [OF notin_Lset])
   apply blast
  apply (blast dest: ltD)
-txt{*Opposite inclusion, @{term "succ(x) \<subseteq> DPow(Lset(x)) \<inter> ON"}*}
+txt\<open>Opposite inclusion, @{term "succ(x) \<subseteq> DPow(Lset(x)) \<inter> ON"}\<close>
 apply auto
-txt{*Key case: *}
+txt\<open>Key case:\<close>
   apply (erule subst, rule Ords_in_DPow [OF Transset_Lset])
  apply (blast intro: elem_subset_in_DPow dest: OrdmemD elim: equalityE)
 apply (blast intro: Ord_in_Ord)
@@ -772,7 +772,7 @@
 lemma Ord_in_L: "Ord(i) ==> L(i)"
 by (simp add: L_def, blast intro: Ord_in_Lset)
 
-subsubsection{* Unions *}
+subsubsection\<open>Unions\<close>
 
 lemma Union_in_Lset:
      "X \<in> Lset(i) ==> \<Union>(X) \<in> Lset(succ(i))"
@@ -780,7 +780,7 @@
 apply (rule LsetI [OF succI1])
 apply (simp add: Transset_def DPow_def)
 apply (intro conjI, blast)
-txt{*Now to create the formula @{term "\<exists>y. y \<in> X \<and> x \<in> y"} *}
+txt\<open>Now to create the formula @{term "\<exists>y. y \<in> X \<and> x \<in> y"}\<close>
 apply (rule_tac x="Cons(X,Nil)" in bexI)
  apply (rule_tac x="Exists(And(Member(0,2), Member(1,0)))" in bexI)
   apply typecheck
@@ -790,7 +790,7 @@
 theorem Union_in_L: "L(X) ==> L(\<Union>(X))"
 by (simp add: L_def, blast dest: Union_in_Lset)
 
-subsubsection{* Finite sets and ordered pairs *}
+subsubsection\<open>Finite sets and ordered pairs\<close>
 
 lemma singleton_in_Lset: "a \<in> Lset(i) ==> {a} \<in> Lset(succ(i))"
 by (simp add: Lset_succ singleton_in_DPow)
@@ -808,7 +808,7 @@
 lemmas Lset_UnI1 = Un_upper1 [THEN Lset_mono [THEN subsetD]]
 lemmas Lset_UnI2 = Un_upper2 [THEN Lset_mono [THEN subsetD]]
 
-text{*Hard work is finding a single @{term"j \<in> i"} such that @{term"{a,b} \<subseteq> Lset(j)"}*}
+text\<open>Hard work is finding a single @{term"j \<in> i"} such that @{term"{a,b} \<subseteq> Lset(j)"}\<close>
 lemma doubleton_in_LLimit:
     "[| a \<in> Lset(i);  b \<in> Lset(i);  Limit(i) |] ==> {a,b} \<in> Lset(i)"
 apply (erule Limit_LsetE, assumption)
@@ -825,19 +825,19 @@
 
 lemma Pair_in_LLimit:
     "[| a \<in> Lset(i);  b \<in> Lset(i);  Limit(i) |] ==> <a,b> \<in> Lset(i)"
-txt{*Infer that a, b occur at ordinals x,xa < i.*}
+txt\<open>Infer that a, b occur at ordinals x,xa < i.\<close>
 apply (erule Limit_LsetE, assumption)
 apply (erule Limit_LsetE, assumption)
-txt{*Infer that @{term"succ(succ(x \<union> xa)) < i"} *}
+txt\<open>Infer that @{term"succ(succ(x \<union> xa)) < i"}\<close>
 apply (blast intro: lt_Ord lt_LsetI [OF Pair_in_Lset]
                     Lset_UnI1 Lset_UnI2 Limit_has_succ Un_least_lt)
 done
 
 
 
-text{*The rank function for the constructible universe*}
+text\<open>The rank function for the constructible universe\<close>
 definition
-  lrank :: "i=>i" where --{*Kunen's definition VI 1.7*}
+  lrank :: "i=>i" where --\<open>Kunen's definition VI 1.7\<close>
   "lrank(x) == \<mu> i. x \<in> Lset(succ(i))"
 
 lemma L_I: "[|x \<in> Lset(i); Ord(i)|] ==> L(x)"
@@ -858,9 +858,9 @@
 apply (blast intro: ltI Limit_is_Ord lt_trans)
 done
 
-text{*Kunen's VI 1.8.  The proof is much harder than the text would
+text\<open>Kunen's VI 1.8.  The proof is much harder than the text would
 suggest.  For a start, it needs the previous lemma, which is proved by
-induction.*}
+induction.\<close>
 lemma Lset_iff_lrank_lt: "Ord(i) ==> x \<in> Lset(i) \<longleftrightarrow> L(x) & lrank(x) < i"
 apply (simp add: L_def, auto)
  apply (blast intro: Lset_lrank_lt)
@@ -873,7 +873,7 @@
 lemma Lset_succ_lrank_iff [simp]: "x \<in> Lset(succ(lrank(x))) \<longleftrightarrow> L(x)"
 by (simp add: Lset_iff_lrank_lt)
 
-text{*Kunen's VI 1.9 (a)*}
+text\<open>Kunen's VI 1.9 (a)\<close>
 lemma lrank_of_Ord: "Ord(i) ==> lrank(i) = i"
 apply (unfold lrank_def)
 apply (rule Least_equality)
@@ -884,10 +884,10 @@
 done
 
 
-text{*This is lrank(lrank(a)) = lrank(a) *}
+text\<open>This is lrank(lrank(a)) = lrank(a)\<close>
 declare Ord_lrank [THEN lrank_of_Ord, simp]
 
-text{*Kunen's VI 1.10 *}
+text\<open>Kunen's VI 1.10\<close>
 lemma Lset_in_Lset_succ: "Lset(i) \<in> Lset(succ(i))"
 apply (simp add: Lset_succ DPow_def)
 apply (rule_tac x=Nil in bexI)
@@ -906,7 +906,7 @@
 apply (blast intro!: le_imp_subset Lset_mono)
 done
 
-text{*Kunen's VI 1.11 *}
+text\<open>Kunen's VI 1.11\<close>
 lemma Lset_subset_Vset: "Ord(i) ==> Lset(i) \<subseteq> Vset(i)"
 apply (erule trans_induct)
 apply (subst Lset)
@@ -916,14 +916,14 @@
 apply (rule Pow_mono, blast)
 done
 
-text{*Kunen's VI 1.12 *}
+text\<open>Kunen's VI 1.12\<close>
 lemma Lset_subset_Vset': "i \<in> nat ==> Lset(i) = Vset(i)"
 apply (erule nat_induct)
  apply (simp add: Vfrom_0)
 apply (simp add: Lset_succ Vset_succ Finite_Vset Finite_DPow_eq_Pow)
 done
 
-text{*Every set of constructible sets is included in some @{term Lset}*}
+text\<open>Every set of constructible sets is included in some @{term Lset}\<close>
 lemma subset_Lset:
      "(\<forall>x\<in>A. L(x)) ==> \<exists>i. Ord(i) & A \<subseteq> Lset(i)"
 by (rule_tac x = "\<Union>x\<in>A. succ(lrank(x))" in exI, force)
@@ -934,7 +934,7 @@
       ==> P"
 by (blast dest: subset_Lset)
 
-subsubsection{*For L to satisfy the Powerset axiom *}
+subsubsection\<open>For L to satisfy the Powerset axiom\<close>
 
 lemma LPow_env_typing:
     "[| y \<in> Lset(i); Ord(i); y \<subseteq> X |]
@@ -949,7 +949,7 @@
 apply (simp add: DPow_def)
 apply (intro conjI, clarify)
  apply (rule_tac a=x in UN_I, simp+)
-txt{*Now to create the formula @{term "y \<subseteq> X"} *}
+txt\<open>Now to create the formula @{term "y \<subseteq> X"}\<close>
 apply (rule_tac x="Cons(X,Nil)" in bexI)
  apply (rule_tac x="subset_fm(0,1)" in bexI)
   apply typecheck
@@ -964,7 +964,7 @@
 by (blast intro: L_I dest: L_D LPow_in_Lset)
 
 
-subsection{*Eliminating @{term arity} from the Definition of @{term Lset}*}
+subsection\<open>Eliminating @{term arity} from the Definition of @{term Lset}\<close>
 
 lemma nth_zero_eq_0: "n \<in> nat ==> nth(n,[0]) = 0"
 by (induct_tac n, auto)
@@ -995,7 +995,7 @@
 done
 
 
-text{*A simpler version of @{term DPow}: no arity check!*}
+text\<open>A simpler version of @{term DPow}: no arity check!\<close>
 definition
   DPow' :: "i => i" where
   "DPow'(A) == {X \<in> Pow(A).
@@ -1022,8 +1022,8 @@
 apply (erule DPow'_subset_DPow)
 done
 
-text{*And thus we can relativize @{term Lset} without bothering with
-      @{term arity} and @{term length}*}
+text\<open>And thus we can relativize @{term Lset} without bothering with
+      @{term arity} and @{term length}\<close>
 lemma Lset_eq_transrec_DPow': "Lset(i) = transrec(i, %x f. \<Union>y\<in>x. DPow'(f`y))"
 apply (rule_tac a=i in eps_induct)
 apply (subst Lset)
@@ -1031,8 +1031,8 @@
 apply (simp only: DPow_eq_DPow' [OF Transset_Lset], simp)
 done
 
-text{*With this rule we can specify @{term p} later and don't worry about
-      arities at all!*}
+text\<open>With this rule we can specify @{term p} later and don't worry about
+      arities at all!\<close>
 lemma DPow_LsetI [rule_format]:
   "[|\<forall>x\<in>Lset(i). P(x) \<longleftrightarrow> sats(Lset(i), p, Cons(x,env));
      env \<in> list(Lset(i));  p \<in> formula|]
--- a/src/ZF/Constructible/Internalize.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Internalize.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -4,9 +4,9 @@
 
 theory Internalize imports L_axioms Datatype_absolute begin
 
-subsection{*Internalized Forms of Data Structuring Operators*}
+subsection\<open>Internalized Forms of Data Structuring Operators\<close>
 
-subsubsection{*The Formula @{term is_Inl}, Internalized*}
+subsubsection\<open>The Formula @{term is_Inl}, Internalized\<close>
 
 (*  is_Inl(M,a,z) == \<exists>zero[M]. empty(M,zero) & pair(M,zero,a,z) *)
 definition
@@ -36,7 +36,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_Inr}, Internalized*}
+subsubsection\<open>The Formula @{term is_Inr}, Internalized\<close>
 
 (*  is_Inr(M,a,z) == \<exists>n1[M]. number1(M,n1) & pair(M,n1,a,z) *)
 definition
@@ -66,7 +66,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_Nil}, Internalized*}
+subsubsection\<open>The Formula @{term is_Nil}, Internalized\<close>
 
 (* is_Nil(M,xs) == \<exists>zero[M]. empty(M,zero) & is_Inl(M,zero,xs) *)
 
@@ -95,7 +95,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_Cons}, Internalized*}
+subsubsection\<open>The Formula @{term is_Cons}, Internalized\<close>
 
 
 (*  "is_Cons(M,a,l,Z) == \<exists>p[M]. pair(M,a,l,p) & is_Inr(M,p,Z)" *)
@@ -127,7 +127,7 @@
 apply (intro FOL_reflections pair_reflection Inr_reflection)
 done
 
-subsubsection{*The Formula @{term is_quasilist}, Internalized*}
+subsubsection\<open>The Formula @{term is_quasilist}, Internalized\<close>
 
 (* is_quasilist(M,xs) == is_Nil(M,z) | (\<exists>x[M]. \<exists>l[M]. is_Cons(M,x,l,z))" *)
 
@@ -157,10 +157,10 @@
 done
 
 
-subsection{*Absoluteness for the Function @{term nth}*}
+subsection\<open>Absoluteness for the Function @{term nth}\<close>
 
 
-subsubsection{*The Formula @{term is_hd}, Internalized*}
+subsubsection\<open>The Formula @{term is_hd}, Internalized\<close>
 
 (*   "is_hd(M,xs,H) == 
        (is_Nil(M,xs) \<longrightarrow> empty(M,H)) &
@@ -197,7 +197,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_tl}, Internalized*}
+subsubsection\<open>The Formula @{term is_tl}, Internalized\<close>
 
 (*     "is_tl(M,xs,T) ==
        (is_Nil(M,xs) \<longrightarrow> T=xs) &
@@ -234,12 +234,12 @@
 done
 
 
-subsubsection{*The Operator @{term is_bool_of_o}*}
+subsubsection\<open>The Operator @{term is_bool_of_o}\<close>
 
 (*   is_bool_of_o :: "[i=>o, o, i] => o"
    "is_bool_of_o(M,P,z) == (P & number1(M,z)) | (~P & empty(M,z))" *)
 
-text{*The formula @{term p} has no free variables.*}
+text\<open>The formula @{term p} has no free variables.\<close>
 definition
   bool_of_o_fm :: "[i, i]=>i" where
   "bool_of_o_fm(p,z) == 
@@ -272,12 +272,12 @@
 done
 
 
-subsection{*More Internalizations*}
+subsection\<open>More Internalizations\<close>
 
-subsubsection{*The Operator @{term is_lambda}*}
+subsubsection\<open>The Operator @{term is_lambda}\<close>
 
-text{*The two arguments of @{term p} are always 1, 0. Remember that
- @{term p} will be enclosed by three quantifiers.*}
+text\<open>The two arguments of @{term p} are always 1, 0. Remember that
+ @{term p} will be enclosed by three quantifiers.\<close>
 
 (* is_lambda :: "[i=>o, i, [i,i]=>o, i] => o"
     "is_lambda(M, A, is_b, z) == 
@@ -290,8 +290,8 @@
             Exists(Exists(And(Member(1,A#+3),
                            And(pair_fm(1,0,2), p))))))"
 
-text{*We call @{term p} with arguments x, y by equating them with 
-  the corresponding quantified variables with de Bruijn indices 1, 0.*}
+text\<open>We call @{term p} with arguments x, y by equating them with 
+  the corresponding quantified variables with de Bruijn indices 1, 0.\<close>
 
 lemma is_lambda_type [TC]:
      "[| p \<in> formula; x \<in> nat; y \<in> nat |] 
@@ -319,7 +319,7 @@
 apply (intro FOL_reflections is_b_reflection pair_reflection)
 done
 
-subsubsection{*The Operator @{term is_Member}, Internalized*}
+subsubsection\<open>The Operator @{term is_Member}, Internalized\<close>
 
 (*    "is_Member(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inl(M,p,u) & is_Inl(M,u,Z)" *)
@@ -352,7 +352,7 @@
 apply (intro FOL_reflections pair_reflection Inl_reflection)
 done
 
-subsubsection{*The Operator @{term is_Equal}, Internalized*}
+subsubsection\<open>The Operator @{term is_Equal}, Internalized\<close>
 
 (*    "is_Equal(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inr(M,p,u) & is_Inl(M,u,Z)" *)
@@ -385,7 +385,7 @@
 apply (intro FOL_reflections pair_reflection Inl_reflection Inr_reflection)
 done
 
-subsubsection{*The Operator @{term is_Nand}, Internalized*}
+subsubsection\<open>The Operator @{term is_Nand}, Internalized\<close>
 
 (*    "is_Nand(M,x,y,Z) ==
         \<exists>p[M]. \<exists>u[M]. pair(M,x,y,p) & is_Inl(M,p,u) & is_Inr(M,u,Z)" *)
@@ -418,7 +418,7 @@
 apply (intro FOL_reflections pair_reflection Inl_reflection Inr_reflection)
 done
 
-subsubsection{*The Operator @{term is_Forall}, Internalized*}
+subsubsection\<open>The Operator @{term is_Forall}, Internalized\<close>
 
 (* "is_Forall(M,p,Z) == \<exists>u[M]. is_Inr(M,p,u) & is_Inr(M,u,Z)" *)
 definition
@@ -450,7 +450,7 @@
 done
 
 
-subsubsection{*The Operator @{term is_and}, Internalized*}
+subsubsection\<open>The Operator @{term is_and}, Internalized\<close>
 
 (* is_and(M,a,b,z) == (number1(M,a)  & z=b) | 
                        (~number1(M,a) & empty(M,z)) *)
@@ -484,7 +484,7 @@
 done
 
 
-subsubsection{*The Operator @{term is_or}, Internalized*}
+subsubsection\<open>The Operator @{term is_or}, Internalized\<close>
 
 (* is_or(M,a,b,z) == (number1(M,a)  & number1(M,z)) | 
                      (~number1(M,a) & z=b) *)
@@ -520,7 +520,7 @@
 
 
 
-subsubsection{*The Operator @{term is_not}, Internalized*}
+subsubsection\<open>The Operator @{term is_not}, Internalized\<close>
 
 (* is_not(M,a,z) == (number1(M,a)  & empty(M,z)) | 
                      (~number1(M,a) & number1(M,z)) *)
@@ -559,11 +559,11 @@
     is_lambda_reflection Member_reflection Equal_reflection Nand_reflection
     Forall_reflection is_and_reflection is_or_reflection is_not_reflection
 
-subsection{*Well-Founded Recursion!*}
+subsection\<open>Well-Founded Recursion!\<close>
 
-subsubsection{*The Operator @{term M_is_recfun}*}
+subsubsection\<open>The Operator @{term M_is_recfun}\<close>
 
-text{*Alternative definition, minimizing nesting of quantifiers around MH*}
+text\<open>Alternative definition, minimizing nesting of quantifiers around MH\<close>
 lemma M_is_recfun_iff:
    "M_is_recfun(M,MH,r,a,f) \<longleftrightarrow>
     (\<forall>z[M]. z \<in> f \<longleftrightarrow> 
@@ -590,7 +590,7 @@
                xa \<in> r)"
 *)
 
-text{*The three arguments of @{term p} are always 2, 1, 0 and z*}
+text\<open>The three arguments of @{term p} are always 2, 1, 0 and z\<close>
 definition
   is_recfun_fm :: "[i, i, i, i]=>i" where
   "is_recfun_fm(p,r,a,f) == 
@@ -632,9 +632,9 @@
    ==> M_is_recfun(##A, MH, x, y, z) \<longleftrightarrow> sats(A, is_recfun_fm(p,i,j,k), env)"
 by (simp add: sats_is_recfun_fm [OF MH_iff_sats]) 
 
-text{*The additional variable in the premise, namely @{term f'}, is essential.
+text\<open>The additional variable in the premise, namely @{term f'}, is essential.
 It lets @{term MH} depend upon @{term x}, which seems often necessary.
-The same thing occurs in @{text is_wfrec_reflection}.*}
+The same thing occurs in @{text is_wfrec_reflection}.\<close>
 theorem is_recfun_reflection:
   assumes MH_reflection:
     "!!f' f g h. REFLECTS[\<lambda>x. MH(L, f'(x), f(x), g(x), h(x)), 
@@ -646,10 +646,10 @@
              restriction_reflection MH_reflection)
 done
 
-subsubsection{*The Operator @{term is_wfrec}*}
+subsubsection\<open>The Operator @{term is_wfrec}\<close>
 
-text{*The three arguments of @{term p} are always 2, 1, 0;
-      @{term p} is enclosed by 5 quantifiers.*}
+text\<open>The three arguments of @{term p} are always 2, 1, 0;
+      @{term p} is enclosed by 5 quantifiers.\<close>
 
 (* is_wfrec :: "[i=>o, i, [i,i,i]=>o, i, i] => o"
     "is_wfrec(M,MH,r,a,z) == 
@@ -661,11 +661,11 @@
            Exists(Exists(Exists(Exists(
              And(Equal(2,a#+5), And(Equal(1,4), And(Equal(0,z#+5), p)))))))))"
 
-text{*We call @{term p} with arguments a, f, z by equating them with 
-  the corresponding quantified variables with de Bruijn indices 2, 1, 0.*}
+text\<open>We call @{term p} with arguments a, f, z by equating them with 
+  the corresponding quantified variables with de Bruijn indices 2, 1, 0.\<close>
 
-text{*There's an additional existential quantifier to ensure that the
-      environments in both calls to MH have the same length.*}
+text\<open>There's an additional existential quantifier to ensure that the
+      environments in both calls to MH have the same length.\<close>
 
 lemma is_wfrec_type [TC]:
      "[| p \<in> formula; x \<in> nat; y \<in> nat; z \<in> nat |] 
@@ -709,9 +709,9 @@
 done
 
 
-subsection{*For Datatypes*}
+subsection\<open>For Datatypes\<close>
 
-subsubsection{*Binary Products, Internalized*}
+subsubsection\<open>Binary Products, Internalized\<close>
 
 definition
   cartprod_fm :: "[i,i,i]=>i" where
@@ -747,7 +747,7 @@
 done
 
 
-subsubsection{*Binary Sums, Internalized*}
+subsubsection\<open>Binary Sums, Internalized\<close>
 
 (* "is_sum(M,A,B,Z) ==
        \<exists>A0[M]. \<exists>n1[M]. \<exists>s1[M]. \<exists>B1[M].
@@ -787,7 +787,7 @@
 done
 
 
-subsubsection{*The Operator @{term quasinat}*}
+subsubsection\<open>The Operator @{term quasinat}\<close>
 
 (* "is_quasinat(M,z) == empty(M,z) | (\<exists>m[M]. successor(M,m,z))" *)
 definition
@@ -817,17 +817,17 @@
 done
 
 
-subsubsection{*The Operator @{term is_nat_case}*}
-text{*I could not get it to work with the more natural assumption that 
+subsubsection\<open>The Operator @{term is_nat_case}\<close>
+text\<open>I could not get it to work with the more natural assumption that 
  @{term is_b} takes two arguments.  Instead it must be a formula where 1 and 0
- stand for @{term m} and @{term b}, respectively.*}
+ stand for @{term m} and @{term b}, respectively.\<close>
 
 (* is_nat_case :: "[i=>o, i, [i,i]=>o, i, i] => o"
     "is_nat_case(M, a, is_b, k, z) ==
        (empty(M,k) \<longrightarrow> z=a) &
        (\<forall>m[M]. successor(M,m,k) \<longrightarrow> is_b(m,z)) &
        (is_quasinat(M,k) | empty(M,z))" *)
-text{*The formula @{term is_b} has free variables 1 and 0.*}
+text\<open>The formula @{term is_b} has free variables 1 and 0.\<close>
 definition
   is_nat_case_fm :: "[i, i, i, i]=>i" where
  "is_nat_case_fm(a,is_b,k,z) == 
@@ -863,9 +863,9 @@
 by (simp add: sats_is_nat_case_fm [of A is_b])
 
 
-text{*The second argument of @{term is_b} gives it direct access to @{term x},
+text\<open>The second argument of @{term is_b} gives it direct access to @{term x},
   which is essential for handling free variable references.  Without this
-  argument, we cannot prove reflection for @{term iterates_MH}.*}
+  argument, we cannot prove reflection for @{term iterates_MH}.\<close>
 theorem is_nat_case_reflection:
   assumes is_b_reflection:
     "!!h f g. REFLECTS[\<lambda>x. is_b(L, h(x), f(x), g(x)),
@@ -878,7 +878,7 @@
 done
 
 
-subsection{*The Operator @{term iterates_MH}, Needed for Iteration*}
+subsection\<open>The Operator @{term iterates_MH}, Needed for Iteration\<close>
 
 (*  iterates_MH :: "[i=>o, [i,i]=>o, i, i, i, i] => o"
    "iterates_MH(M,isF,v,n,g,z) ==
@@ -926,9 +926,9 @@
        sats(A, iterates_MH_fm(p,i',i,j,k), env)"
 by (simp add: sats_iterates_MH_fm [OF is_F_iff_sats]) 
 
-text{*The second argument of @{term p} gives it direct access to @{term x},
+text\<open>The second argument of @{term p} gives it direct access to @{term x},
   which is essential for handling free variable references.  Without this
-  argument, we cannot prove reflection for @{term list_N}.*}
+  argument, we cannot prove reflection for @{term list_N}.\<close>
 theorem iterates_MH_reflection:
   assumes p_reflection:
     "!!f g h. REFLECTS[\<lambda>x. p(L, h(x), f(x), g(x)),
@@ -941,10 +941,10 @@
 done
 
 
-subsubsection{*The Operator @{term is_iterates}*}
+subsubsection\<open>The Operator @{term is_iterates}\<close>
 
-text{*The three arguments of @{term p} are always 2, 1, 0;
-      @{term p} is enclosed by 9 (??) quantifiers.*}
+text\<open>The three arguments of @{term p} are always 2, 1, 0;
+      @{term p} is enclosed by 9 (??) quantifiers.\<close>
 
 (*    "is_iterates(M,isF,v,n,Z) == 
       \<exists>sn[M]. \<exists>msn[M]. successor(M,n,sn) & membership(M,sn,msn) &
@@ -959,8 +959,8 @@
               is_wfrec_fm(iterates_MH_fm(p, v#+7, 2, 1, 0), 
                           0, n#+2, Z#+2)))))"
 
-text{*We call @{term p} with arguments a, f, z by equating them with 
-  the corresponding quantified variables with de Bruijn indices 2, 1, 0.*}
+text\<open>We call @{term p} with arguments a, f, z by equating them with 
+  the corresponding quantified variables with de Bruijn indices 2, 1, 0.\<close>
 
 
 lemma is_iterates_type [TC]:
@@ -1002,9 +1002,9 @@
        sats(A, is_iterates_fm(p,i,j,k), env)"
 by (simp add: sats_is_iterates_fm [OF is_F_iff_sats]) 
 
-text{*The second argument of @{term p} gives it direct access to @{term x},
+text\<open>The second argument of @{term p} gives it direct access to @{term x},
   which is essential for handling free variable references.  Without this
-  argument, we cannot prove reflection for @{term list_N}.*}
+  argument, we cannot prove reflection for @{term list_N}.\<close>
 theorem is_iterates_reflection:
   assumes p_reflection:
     "!!f g h. REFLECTS[\<lambda>x. p(L, h(x), f(x), g(x)),
@@ -1017,7 +1017,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_eclose_n}, Internalized*}
+subsubsection\<open>The Formula @{term is_eclose_n}, Internalized\<close>
 
 (* is_eclose_n(M,A,n,Z) == is_iterates(M, big_union(M), A, n, Z) *)
 
@@ -1053,7 +1053,7 @@
 done
 
 
-subsubsection{*Membership in @{term "eclose(A)"}*}
+subsubsection\<open>Membership in @{term "eclose(A)"}\<close>
 
 (* mem_eclose(M,A,l) == 
       \<exists>n[M]. \<exists>eclosen[M]. 
@@ -1088,7 +1088,7 @@
 done
 
 
-subsubsection{*The Predicate ``Is @{term "eclose(A)"}''*}
+subsubsection\<open>The Predicate ``Is @{term "eclose(A)"}''\<close>
 
 (* is_eclose(M,A,Z) == \<forall>l[M]. l \<in> Z \<longleftrightarrow> mem_eclose(M,A,l) *)
 definition
@@ -1119,7 +1119,7 @@
 done
 
 
-subsubsection{*The List Functor, Internalized*}
+subsubsection\<open>The List Functor, Internalized\<close>
 
 definition
   list_functor_fm :: "[i,i,i]=>i" where
@@ -1156,7 +1156,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_list_N}, Internalized*}
+subsubsection\<open>The Formula @{term is_list_N}, Internalized\<close>
 
 (* "is_list_N(M,A,n,Z) == 
       \<exists>zero[M]. empty(M,zero) & 
@@ -1198,7 +1198,7 @@
 
 
 
-subsubsection{*The Predicate ``Is A List''*}
+subsubsection\<open>The Predicate ``Is A List''\<close>
 
 (* mem_list(M,A,l) == 
       \<exists>n[M]. \<exists>listn[M]. 
@@ -1233,7 +1233,7 @@
 done
 
 
-subsubsection{*The Predicate ``Is @{term "list(A)"}''*}
+subsubsection\<open>The Predicate ``Is @{term "list(A)"}''\<close>
 
 (* is_list(M,A,Z) == \<forall>l[M]. l \<in> Z \<longleftrightarrow> mem_list(M,A,l) *)
 definition
@@ -1264,7 +1264,7 @@
 done
 
 
-subsubsection{*The Formula Functor, Internalized*}
+subsubsection\<open>The Formula Functor, Internalized\<close>
 
 definition formula_functor_fm :: "[i,i]=>i" where
 (*     "is_formula_functor(M,X,Z) ==
@@ -1307,7 +1307,7 @@
 done
 
 
-subsubsection{*The Formula @{term is_formula_N}, Internalized*}
+subsubsection\<open>The Formula @{term is_formula_N}, Internalized\<close>
 
 (*  "is_formula_N(M,n,Z) == 
       \<exists>zero[M]. empty(M,zero) & 
@@ -1348,7 +1348,7 @@
 
 
 
-subsubsection{*The Predicate ``Is A Formula''*}
+subsubsection\<open>The Predicate ``Is A Formula''\<close>
 
 (*  mem_formula(M,p) == 
       \<exists>n[M]. \<exists>formn[M]. 
@@ -1383,7 +1383,7 @@
 
 
 
-subsubsection{*The Predicate ``Is @{term "formula"}''*}
+subsubsection\<open>The Predicate ``Is @{term "formula"}''\<close>
 
 (* is_formula(M,Z) == \<forall>p[M]. p \<in> Z \<longleftrightarrow> mem_formula(M,p) *)
 definition
@@ -1412,12 +1412,12 @@
 done
 
 
-subsubsection{*The Operator @{term is_transrec}*}
+subsubsection\<open>The Operator @{term is_transrec}\<close>
 
-text{*The three arguments of @{term p} are always 2, 1, 0.  It is buried
+text\<open>The three arguments of @{term p} are always 2, 1, 0.  It is buried
    within eight quantifiers!
    We call @{term p} with arguments a, f, z by equating them with 
-  the corresponding quantified variables with de Bruijn indices 2, 1, 0.*}
+  the corresponding quantified variables with de Bruijn indices 2, 1, 0.\<close>
 
 (* is_transrec :: "[i=>o, [i,i,i]=>o, i, i] => o"
    "is_transrec(M,MH,a,z) == 
--- a/src/ZF/Constructible/L_axioms.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/L_axioms.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,11 +2,11 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {* The ZF Axioms (Except Separation) in L *}
+section \<open>The ZF Axioms (Except Separation) in L\<close>
 
 theory L_axioms imports Formula Relative Reflection MetaExists begin
 
-text {* The class L satisfies the premises of locale @{text M_trivial} *}
+text \<open>The class L satisfies the premises of locale @{text M_trivial}\<close>
 
 lemma transL: "[| y\<in>x; L(x) |] ==> L(y)"
 apply (insert Transset_Lset)
@@ -38,7 +38,7 @@
 apply (blast intro: transL)
 done
 
-text{*We don't actually need @{term L} to satisfy the foundation axiom.*}
+text\<open>We don't actually need @{term L} to satisfy the foundation axiom.\<close>
 theorem foundation_ax: "foundation_ax(L)"
 apply (simp add: foundation_ax_def)
 apply (rule rallI) 
@@ -46,7 +46,7 @@
 apply (blast intro: transL)
 done
 
-subsection{*For L to satisfy Replacement *}
+subsection\<open>For L to satisfy Replacement\<close>
 
 (*Can't move these to Formula unless the definition of univalent is moved
 there too!*)
@@ -78,8 +78,8 @@
 apply (simp_all add: Replace_iff univalent_def, blast)
 done
 
-subsection{*Instantiating the locale @{text M_trivial}*}
-text{*No instances of Separation yet.*}
+subsection\<open>Instantiating the locale @{text M_trivial}\<close>
+text\<open>No instances of Separation yet.\<close>
 
 lemma Lset_mono_le: "mono_le_subset(Lset)"
 by (simp add: mono_le_subset_def le_imp_subset Lset_mono)
@@ -110,9 +110,9 @@
 ...and dozens of similar ones.
 *)
 
-subsection{*Instantiation of the locale @{text reflection}*}
+subsection\<open>Instantiation of the locale @{text reflection}\<close>
 
-text{*instances of locale constants*}
+text\<open>instances of locale constants\<close>
 
 definition
   L_F0 :: "[i=>o,i] => i" where
@@ -127,8 +127,8 @@
     "L_ClEx(P) == \<lambda>a. Limit(a) \<and> normalize(L_FF(P),a) = a"
 
 
-text{*We must use the meta-existential quantifier; otherwise the reflection
-      terms become enormous!*}
+text\<open>We must use the meta-existential quantifier; otherwise the reflection
+      terms become enormous!\<close>
 definition
   L_Reflects :: "[i=>o,[i,i]=>o] => prop"  ("(3REFLECTS/ [_,/ _])") where
     "REFLECTS[P,Q] == (??Cl. Closed_Unbounded(Cl) &
@@ -223,8 +223,8 @@
 apply (intro Imp_reflection All_reflection, assumption)
 done
 
-text{*This version handles an alternative form of the bounded quantifier
-      in the second argument of @{text REFLECTS}.*}
+text\<open>This version handles an alternative form of the bounded quantifier
+      in the second argument of @{text REFLECTS}.\<close>
 theorem Rex_reflection':
      "REFLECTS[\<lambda>x. P(fst(x),snd(x)), \<lambda>a x. Q(a,fst(x),snd(x))]
       ==> REFLECTS[\<lambda>x. \<exists>z[L]. P(x,z), \<lambda>a x. \<exists>z[##Lset(a)]. Q(a,x,z)]"
@@ -232,7 +232,7 @@
 apply (erule Rex_reflection [unfolded rex_def Bex_def]) 
 done
 
-text{*As above.*}
+text\<open>As above.\<close>
 theorem Rall_reflection':
      "REFLECTS[\<lambda>x. P(fst(x),snd(x)), \<lambda>a x. Q(a,fst(x),snd(x))]
       ==> REFLECTS[\<lambda>x. \<forall>z[L]. P(x,z), \<lambda>a x. \<forall>z[##Lset(a)]. Q(a,x,z)]"
@@ -263,9 +263,9 @@
 by blast
 
 
-subsection{*Internalized Formulas for some Set-Theoretic Concepts*}
+subsection\<open>Internalized Formulas for some Set-Theoretic Concepts\<close>
 
-subsubsection{*Some numbers to help write de Bruijn indices*}
+subsubsection\<open>Some numbers to help write de Bruijn indices\<close>
 
 abbreviation
   digit3 :: i   ("3") where "3 == succ(2)"
@@ -289,7 +289,7 @@
   digit9 :: i   ("9") where "9 == succ(8)"
 
 
-subsubsection{*The Empty Set, Internalized*}
+subsubsection\<open>The Empty Set, Internalized\<close>
 
 definition
   empty_fm :: "i=>i" where
@@ -317,7 +317,7 @@
 apply (intro FOL_reflections)
 done
 
-text{*Not used.  But maybe useful?*}
+text\<open>Not used.  But maybe useful?\<close>
 lemma Transset_sats_empty_fm_eq_0:
    "[| n \<in> nat; env \<in> list(A); Transset(A)|]
     ==> sats(A, empty_fm(n), env) \<longleftrightarrow> nth(n,env) = 0"
@@ -328,7 +328,7 @@
 done
 
 
-subsubsection{*Unordered Pairs, Internalized*}
+subsubsection\<open>Unordered Pairs, Internalized\<close>
 
 definition
   upair_fm :: "[i,i,i]=>i" where
@@ -354,7 +354,7 @@
        ==> upair(##A, x, y, z) \<longleftrightarrow> sats(A, upair_fm(i,j,k), env)"
 by (simp add: sats_upair_fm)
 
-text{*Useful? At least it refers to "real" unordered pairs*}
+text\<open>Useful? At least it refers to "real" unordered pairs\<close>
 lemma sats_upair_fm2 [simp]:
    "[| x \<in> nat; y \<in> nat; z < length(env); env \<in> list(A); Transset(A)|]
     ==> sats(A, upair_fm(x,y,z), env) \<longleftrightarrow>
@@ -371,7 +371,7 @@
 apply (intro FOL_reflections)
 done
 
-subsubsection{*Ordered pairs, Internalized*}
+subsubsection\<open>Ordered pairs, Internalized\<close>
 
 definition
   pair_fm :: "[i,i,i]=>i" where
@@ -404,7 +404,7 @@
 done
 
 
-subsubsection{*Binary Unions, Internalized*}
+subsubsection\<open>Binary Unions, Internalized\<close>
 
 definition
   union_fm :: "[i,i,i]=>i" where
@@ -436,7 +436,7 @@
 done
 
 
-subsubsection{*Set ``Cons,'' Internalized*}
+subsubsection\<open>Set ``Cons,'' Internalized\<close>
 
 definition
   cons_fm :: "[i,i,i]=>i" where
@@ -469,7 +469,7 @@
 done
 
 
-subsubsection{*Successor Function, Internalized*}
+subsubsection\<open>Successor Function, Internalized\<close>
 
 definition
   succ_fm :: "[i,i]=>i" where
@@ -499,7 +499,7 @@
 done
 
 
-subsubsection{*The Number 1, Internalized*}
+subsubsection\<open>The Number 1, Internalized\<close>
 
 (* "number1(M,a) == (\<exists>x[M]. empty(M,x) & successor(M,x,a))" *)
 definition
@@ -529,7 +529,7 @@
 done
 
 
-subsubsection{*Big Union, Internalized*}
+subsubsection\<open>Big Union, Internalized\<close>
 
 (*  "big_union(M,A,z) == \<forall>x[M]. x \<in> z \<longleftrightarrow> (\<exists>y[M]. y\<in>A & x \<in> y)" *)
 definition
@@ -562,13 +562,13 @@
 done
 
 
-subsubsection{*Variants of Satisfaction Definitions for Ordinals, etc.*}
+subsubsection\<open>Variants of Satisfaction Definitions for Ordinals, etc.\<close>
 
-text{*The @{text sats} theorems below are standard versions of the ones proved
+text\<open>The @{text sats} theorems below are standard versions of the ones proved
 in theory @{text Formula}.  They relate elements of type @{term formula} to
 relativized concepts such as @{term subset} or @{term ordinal} rather than to
 real concepts such as @{term Ord}.  Now that we have instantiated the locale
-@{text M_trivial}, we no longer require the earlier versions.*}
+@{text M_trivial}, we no longer require the earlier versions.\<close>
 
 lemma sats_subset_fm':
    "[|x \<in> nat; y \<in> nat; env \<in> list(A)|]
@@ -611,7 +611,7 @@
 done
 
 
-subsubsection{*Membership Relation, Internalized*}
+subsubsection\<open>Membership Relation, Internalized\<close>
 
 definition
   Memrel_fm :: "[i,i]=>i" where
@@ -645,7 +645,7 @@
 apply (intro FOL_reflections pair_reflection)
 done
 
-subsubsection{*Predecessor Set, Internalized*}
+subsubsection\<open>Predecessor Set, Internalized\<close>
 
 definition
   pred_set_fm :: "[i,i,i,i]=>i" where
@@ -682,7 +682,7 @@
 
 
 
-subsubsection{*Domain of a Relation, Internalized*}
+subsubsection\<open>Domain of a Relation, Internalized\<close>
 
 (* "is_domain(M,r,z) ==
         \<forall>x[M]. (x \<in> z \<longleftrightarrow> (\<exists>w[M]. w\<in>r & (\<exists>y[M]. pair(M,x,y,w))))" *)
@@ -717,7 +717,7 @@
 done
 
 
-subsubsection{*Range of a Relation, Internalized*}
+subsubsection\<open>Range of a Relation, Internalized\<close>
 
 (* "is_range(M,r,z) ==
         \<forall>y[M]. (y \<in> z \<longleftrightarrow> (\<exists>w[M]. w\<in>r & (\<exists>x[M]. pair(M,x,y,w))))" *)
@@ -752,7 +752,7 @@
 done
 
 
-subsubsection{*Field of a Relation, Internalized*}
+subsubsection\<open>Field of a Relation, Internalized\<close>
 
 (* "is_field(M,r,z) ==
         \<exists>dr[M]. is_domain(M,r,dr) &
@@ -789,7 +789,7 @@
 done
 
 
-subsubsection{*Image under a Relation, Internalized*}
+subsubsection\<open>Image under a Relation, Internalized\<close>
 
 (* "image(M,r,A,z) ==
         \<forall>y[M]. (y \<in> z \<longleftrightarrow> (\<exists>w[M]. w\<in>r & (\<exists>x[M]. x\<in>A & pair(M,x,y,w))))" *)
@@ -825,7 +825,7 @@
 done
 
 
-subsubsection{*Pre-Image under a Relation, Internalized*}
+subsubsection\<open>Pre-Image under a Relation, Internalized\<close>
 
 (* "pre_image(M,r,A,z) ==
         \<forall>x[M]. x \<in> z \<longleftrightarrow> (\<exists>w[M]. w\<in>r & (\<exists>y[M]. y\<in>A & pair(M,x,y,w)))" *)
@@ -861,7 +861,7 @@
 done
 
 
-subsubsection{*Function Application, Internalized*}
+subsubsection\<open>Function Application, Internalized\<close>
 
 (* "fun_apply(M,f,x,y) ==
         (\<exists>xs[M]. \<exists>fxs[M].
@@ -898,7 +898,7 @@
 done
 
 
-subsubsection{*The Concept of Relation, Internalized*}
+subsubsection\<open>The Concept of Relation, Internalized\<close>
 
 (* "is_relation(M,r) ==
         (\<forall>z[M]. z\<in>r \<longrightarrow> (\<exists>x[M]. \<exists>y[M]. pair(M,x,y,z)))" *)
@@ -930,7 +930,7 @@
 done
 
 
-subsubsection{*The Concept of Function, Internalized*}
+subsubsection\<open>The Concept of Function, Internalized\<close>
 
 (* "is_function(M,r) ==
         \<forall>x[M]. \<forall>y[M]. \<forall>y'[M]. \<forall>p[M]. \<forall>p'[M].
@@ -967,7 +967,7 @@
 done
 
 
-subsubsection{*Typed Functions, Internalized*}
+subsubsection\<open>Typed Functions, Internalized\<close>
 
 (* "typed_function(M,A,B,r) ==
         is_function(M,r) & is_relation(M,r) & is_domain(M,r,A) &
@@ -1026,7 +1026,7 @@
 done
 
 
-subsubsection{*Composition of Relations, Internalized*}
+subsubsection\<open>Composition of Relations, Internalized\<close>
 
 (* "composition(M,r,s,t) ==
         \<forall>p[M]. p \<in> t \<longleftrightarrow>
@@ -1067,7 +1067,7 @@
 done
 
 
-subsubsection{*Injections, Internalized*}
+subsubsection\<open>Injections, Internalized\<close>
 
 (* "injection(M,A,B,f) ==
         typed_function(M,A,B,f) &
@@ -1108,7 +1108,7 @@
 done
 
 
-subsubsection{*Surjections, Internalized*}
+subsubsection\<open>Surjections, Internalized\<close>
 
 (*  surjection :: "[i=>o,i,i,i] => o"
     "surjection(M,A,B,f) ==
@@ -1147,7 +1147,7 @@
 
 
 
-subsubsection{*Bijections, Internalized*}
+subsubsection\<open>Bijections, Internalized\<close>
 
 (*   bijection :: "[i=>o,i,i,i] => o"
     "bijection(M,A,B,f) == injection(M,A,B,f) & surjection(M,A,B,f)" *)
@@ -1179,7 +1179,7 @@
 done
 
 
-subsubsection{*Restriction of a Relation, Internalized*}
+subsubsection\<open>Restriction of a Relation, Internalized\<close>
 
 
 (* "restriction(M,r,A,z) ==
@@ -1215,7 +1215,7 @@
 apply (intro FOL_reflections pair_reflection)
 done
 
-subsubsection{*Order-Isomorphisms, Internalized*}
+subsubsection\<open>Order-Isomorphisms, Internalized\<close>
 
 (*  order_isomorphism :: "[i=>o,i,i,i,i,i] => o"
    "order_isomorphism(M,A,r,B,s,f) ==
@@ -1266,9 +1266,9 @@
 apply (intro FOL_reflections function_reflections bijection_reflection)
 done
 
-subsubsection{*Limit Ordinals, Internalized*}
+subsubsection\<open>Limit Ordinals, Internalized\<close>
 
-text{*A limit ordinal is a non-empty, successor-closed ordinal*}
+text\<open>A limit ordinal is a non-empty, successor-closed ordinal\<close>
 
 (* "limit_ordinal(M,a) ==
         ordinal(M,a) & ~ empty(M,a) &
@@ -1306,7 +1306,7 @@
              empty_reflection successor_reflection)
 done
 
-subsubsection{*Finite Ordinals: The Predicate ``Is A Natural Number''*}
+subsubsection\<open>Finite Ordinals: The Predicate ``Is A Natural Number''\<close>
 
 (*     "finite_ordinal(M,a) == 
         ordinal(M,a) & ~ limit_ordinal(M,a) & 
@@ -1342,7 +1342,7 @@
 done
 
 
-subsubsection{*Omega: The Set of Natural Numbers*}
+subsubsection\<open>Omega: The Set of Natural Numbers\<close>
 
 (* omega(M,a) == limit_ordinal(M,a) & (\<forall>x[M]. x\<in>a \<longrightarrow> ~ limit_ordinal(M,x)) *)
 definition
--- a/src/ZF/Constructible/MetaExists.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/MetaExists.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,12 +2,12 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section{*The meta-existential quantifier*}
+section\<open>The meta-existential quantifier\<close>
 
 theory MetaExists imports Main begin
 
-text{*Allows quantification over any term having sort @{text logic}.  Used to
-quantify over classes.  Yields a proposition rather than a FOL formula.*}
+text\<open>Allows quantification over any term having sort @{text logic}.  Used to
+quantify over classes.  Yields a proposition rather than a FOL formula.\<close>
 
 definition
   ex :: "(('a::{}) => prop) => prop"  (binder "?? " 0) where
--- a/src/ZF/Constructible/Normal.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Normal.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,20 +2,20 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Closed Unbounded Classes and Normal Functions*}
+section \<open>Closed Unbounded Classes and Normal Functions\<close>
 
 theory Normal imports Main begin
 
-text{*
+text\<open>
 One source is the book
 
 Frank R. Drake.
 \emph{Set Theory: An Introduction to Large Cardinals}.
 North-Holland, 1974.
-*}
+\<close>
 
 
-subsection {*Closed and Unbounded (c.u.) Classes of Ordinals*}
+subsection \<open>Closed and Unbounded (c.u.) Classes of Ordinals\<close>
 
 definition
   Closed :: "(i=>o) => o" where
@@ -30,7 +30,7 @@
     "Closed_Unbounded(P) == Closed(P) \<and> Unbounded(P)"
 
 
-subsubsection{*Simple facts about c.u. classes*}
+subsubsection\<open>Simple facts about c.u. classes\<close>
 
 lemma ClosedI:
      "[| !!I. [| I \<noteq> 0; \<forall>i\<in>I. Ord(i) \<and> P(i) |] ==> P(\<Union>(I)) |] 
@@ -50,16 +50,16 @@
 by (simp add: Closed_Unbounded_def) 
 
 
-text{*The universal class, V, is closed and unbounded.
-      A bit odd, since C. U. concerns only ordinals, but it's used below!*}
+text\<open>The universal class, V, is closed and unbounded.
+      A bit odd, since C. U. concerns only ordinals, but it's used below!\<close>
 theorem Closed_Unbounded_V [simp]: "Closed_Unbounded(\<lambda>x. True)"
 by (unfold Closed_Unbounded_def Closed_def Unbounded_def, blast)
 
-text{*The class of ordinals, @{term Ord}, is closed and unbounded.*}
+text\<open>The class of ordinals, @{term Ord}, is closed and unbounded.\<close>
 theorem Closed_Unbounded_Ord   [simp]: "Closed_Unbounded(Ord)"
 by (unfold Closed_Unbounded_def Closed_def Unbounded_def, blast)
 
-text{*The class of limit ordinals, @{term Limit}, is closed and unbounded.*}
+text\<open>The class of limit ordinals, @{term Limit}, is closed and unbounded.\<close>
 theorem Closed_Unbounded_Limit [simp]: "Closed_Unbounded(Limit)"
 apply (simp add: Closed_Unbounded_def Closed_def Unbounded_def Limit_Union, 
        clarify)
@@ -67,17 +67,17 @@
 apply (blast intro: oadd_lt_self oadd_LimitI Limit_nat Limit_has_0) 
 done
 
-text{*The class of cardinals, @{term Card}, is closed and unbounded.*}
+text\<open>The class of cardinals, @{term Card}, is closed and unbounded.\<close>
 theorem Closed_Unbounded_Card  [simp]: "Closed_Unbounded(Card)"
 apply (simp add: Closed_Unbounded_def Closed_def Unbounded_def Card_Union)
 apply (blast intro: lt_csucc Card_csucc)
 done
 
 
-subsubsection{*The intersection of any set-indexed family of c.u. classes is
-      c.u.*}
+subsubsection\<open>The intersection of any set-indexed family of c.u. classes is
+      c.u.\<close>
 
-text{*The constructions below come from Kunen, \emph{Set Theory}, page 78.*}
+text\<open>The constructions below come from Kunen, \emph{Set Theory}, page 78.\<close>
 locale cub_family =
   fixes P and A
   fixes next_greater -- "the next ordinal satisfying class @{term A}"
@@ -89,11 +89,11 @@
       and "sup_greater(x) == \<Union>a\<in>A. next_greater(a,x)"
  
 
-text{*Trivial that the intersection is closed.*}
+text\<open>Trivial that the intersection is closed.\<close>
 lemma (in cub_family) Closed_INT: "Closed(\<lambda>x. \<forall>i\<in>A. P(i,x))"
 by (blast intro: ClosedI ClosedD [OF closed])
 
-text{*All remaining effort goes to show that the intersection is unbounded.*}
+text\<open>All remaining effort goes to show that the intersection is unbounded.\<close>
 
 lemma (in cub_family) Ord_sup_greater:
      "Ord(sup_greater(x))"
@@ -103,8 +103,8 @@
      "Ord(next_greater(a,x))"
 by (simp add: next_greater_def Ord_Least)
 
-text{*@{term next_greater} works as expected: it returns a larger value
-and one that belongs to class @{term "P(a)"}. *}
+text\<open>@{term next_greater} works as expected: it returns a larger value
+and one that belongs to class @{term "P(a)"}.\<close>
 lemma (in cub_family) next_greater_lemma:
      "[| Ord(x); a\<in>A |] ==> P(a, next_greater(a,x)) \<and> x < next_greater(a,x)"
 apply (simp add: next_greater_def)
@@ -142,9 +142,9 @@
 apply (rule le_anti_sym)
 apply (rule le_implies_UN_le_UN) 
 apply (blast intro: leI next_greater_gt Ord_iterates Ord_sup_greater)  
-txt{*Opposite bound:
+txt\<open>Opposite bound:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (rule UN_least_le) 
 apply (blast intro: Ord_UN Ord_iterates Ord_sup_greater)  
 apply (rule_tac a="succ(n)" in UN_upper_le)
@@ -199,7 +199,7 @@
 done
 
 
-subsection {*Normal Functions*} 
+subsection \<open>Normal Functions\<close> 
 
 definition
   mono_le_subset :: "(i=>i) => o" where
@@ -218,7 +218,7 @@
     "Normal(F) == mono_Ord(F) \<and> cont_Ord(F)"
 
 
-subsubsection{*Immediate properties of the definitions*}
+subsubsection\<open>Immediate properties of the definitions\<close>
 
 lemma NormalI:
      "[|!!i j. i<j ==> F(i) < F(j);  !!l. Limit(l) ==> F(l) = (\<Union>i<l. F(i))|]
@@ -267,9 +267,9 @@
 qed
 
 
-subsubsection{*The class of fixedpoints is closed and unbounded*}
+subsubsection\<open>The class of fixedpoints is closed and unbounded\<close>
 
-text{*The proof is from Drake, pages 113--114.*}
+text\<open>The proof is from Drake, pages 113--114.\<close>
 
 lemma mono_Ord_imp_le_subset: "mono_Ord(F) ==> mono_le_subset(F)"
 apply (simp add: mono_le_subset_def, clarify)
@@ -278,26 +278,26 @@
 apply (blast intro: lt_Ord2 mono_Ord_imp_Ord mono_Ord_imp_mono) 
 done
 
-text{*The following equation is taken for granted in any set theory text.*}
+text\<open>The following equation is taken for granted in any set theory text.\<close>
 lemma cont_Ord_Union:
      "[| cont_Ord(F); mono_le_subset(F); X=0 \<longrightarrow> F(0)=0; \<forall>x\<in>X. Ord(x) |] 
       ==> F(\<Union>(X)) = (\<Union>y\<in>X. F(y))"
 apply (frule Ord_set_cases)
 apply (erule disjE, force) 
 apply (thin_tac "X=0 \<longrightarrow> Q" for Q, auto)
- txt{*The trival case of @{term "\<Union>X \<in> X"}*}
+ txt\<open>The trival case of @{term "\<Union>X \<in> X"}\<close>
  apply (rule equalityI, blast intro: Ord_Union_eq_succD) 
  apply (simp add: mono_le_subset_def UN_subset_iff le_subset_iff) 
  apply (blast elim: equalityE)
-txt{*The limit case, @{term "Limit(\<Union>X)"}:
+txt\<open>The limit case, @{term "Limit(\<Union>X)"}:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply (simp add: OUN_Union_eq cont_Ord_def)
 apply (rule equalityI) 
-txt{*First inclusion:*}
+txt\<open>First inclusion:\<close>
  apply (rule UN_least [OF OUN_least])
  apply (simp add: mono_le_subset_def, blast intro: leI) 
-txt{*Second inclusion:*}
+txt\<open>Second inclusion:\<close>
 apply (rule UN_least) 
 apply (frule Union_upper_le, blast, blast intro: Ord_Union)
 apply (erule leE, drule ltD, elim UnionE)
@@ -329,7 +329,7 @@
      "[| n\<in>nat;  Normal(F);  Ord(x) |] ==> Ord(F^n (x))"  
 by (simp add: Ord_iterates) 
 
-text{*THIS RESULT IS UNUSED*}
+text\<open>THIS RESULT IS UNUSED\<close>
 lemma iterates_omega_Limit:
      "[| Normal(F);  x < F(x) |] ==> Limit(F^\<omega> (x))"  
 apply (frule lt_Ord) 
@@ -353,9 +353,9 @@
  apply (simp_all add: iterates_omega_triv [OF sym])  (*for subgoal 2*)
 apply (simp add:  iterates_omega_def Normal_Union) 
 apply (rule equalityI, force simp add: nat_succI) 
-txt{*Opposite inclusion:
+txt\<open>Opposite inclusion:
 @{subgoals[display,indent=0,margin=65]}
-*}
+\<close>
 apply clarify
 apply (rule UN_I, assumption) 
 apply (frule iterates_Normal_increasing, assumption, assumption, simp)
@@ -382,13 +382,13 @@
               Normal_imp_fp_Unbounded)
 
 
-subsubsection{*Function @{text normalize}*}
+subsubsection\<open>Function @{text normalize}\<close>
 
-text{*Function @{text normalize} maps a function @{text F} to a 
+text\<open>Function @{text normalize} maps a function @{text F} to a 
       normal function that bounds it above.  The result is normal if and
       only if @{text F} is continuous: succ is not bounded above by any 
       normal function, by @{thm [source] Normal_imp_fp_Unbounded}.
-*}
+\<close>
 definition
   normalize :: "[i=>i, i] => i" where
     "normalize(F,a) == transrec2(a, F(0), \<lambda>x r. F(succ(x)) \<union> succ(r))"
@@ -451,9 +451,9 @@
 qed
 
 
-subsection {*The Alephs*}
-text {*This is the well-known transfinite enumeration of the cardinal 
-numbers.*}
+subsection \<open>The Alephs\<close>
+text \<open>This is the well-known transfinite enumeration of the cardinal 
+numbers.\<close>
 
 definition
   Aleph :: "i => i" where
--- a/src/ZF/Constructible/Rank.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Rank.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,12 +2,12 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Absoluteness for Order Types, Rank Functions and Well-Founded 
-         Relations*}
+section \<open>Absoluteness for Order Types, Rank Functions and Well-Founded 
+         Relations\<close>
 
 theory Rank imports WF_absolute begin
 
-subsection {*Order Types: A Direct Construction by Replacement*}
+subsection \<open>Order Types: A Direct Construction by Replacement\<close>
 
 locale M_ordertype = M_basic +
 assumes well_ord_iso_separation:
@@ -15,7 +15,7 @@
       ==> separation (M, \<lambda>x. x\<in>A \<longrightarrow> (\<exists>y[M]. (\<exists>p[M].
                      fun_apply(M,f,x,y) & pair(M,y,x,p) & p \<in> r)))"
   and obase_separation:
-     --{*part of the order type formalization*}
+     --\<open>part of the order type formalization\<close>
      "[| M(A); M(r) |]
       ==> separation(M, \<lambda>a. \<exists>x[M]. \<exists>g[M]. \<exists>mx[M]. \<exists>par[M].
              ordinal(M,x) & membership(M,x,mx) & pred_set(M,A,a,r,par) &
@@ -34,8 +34,8 @@
              pred_set(M,A,a,r,par) & order_isomorphism(M,par,r,x,mx,g))"
 
 
-text{*Inductive argument for Kunen's Lemma I 6.1, etc.
-      Simple proof from Halmos, page 72*}
+text\<open>Inductive argument for Kunen's Lemma I 6.1, etc.
+      Simple proof from Halmos, page 72\<close>
 lemma  (in M_ordertype) wellordered_iso_subset_lemma: 
      "[| wellordered(M,A,r);  f \<in> ord_iso(A,r, A',r);  A'<= A;  y \<in> A;  
        M(A);  M(f);  M(r) |] ==> ~ <f`y, y> \<in> r"
@@ -48,8 +48,8 @@
 done
 
 
-text{*Kunen's Lemma I 6.1, page 14: 
-      there's no order-isomorphism to an initial segment of a well-ordering*}
+text\<open>Kunen's Lemma I 6.1, page 14: 
+      there's no order-isomorphism to an initial segment of a well-ordering\<close>
 lemma (in M_ordertype) wellordered_iso_predD:
      "[| wellordered(M,A,r);  f \<in> ord_iso(A, r, Order.pred(A,x,r), r);  
        M(A);  M(f);  M(r) |] ==> x \<notin> A"
@@ -75,7 +75,7 @@
 done
 
 
-text{*Simple consequence of Lemma 6.1*}
+text\<open>Simple consequence of Lemma 6.1\<close>
 lemma (in M_ordertype) wellordered_iso_pred_eq:
      "[| wellordered(M,A,r);
        f \<in> ord_iso(Order.pred(A,a,r), r, Order.pred(A,c,r), r);   
@@ -89,11 +89,11 @@
 done
 
 
-text{*Following Kunen's Theorem I 7.6, page 17.  Note that this material is
-not required elsewhere.*}
+text\<open>Following Kunen's Theorem I 7.6, page 17.  Note that this material is
+not required elsewhere.\<close>
 
-text{*Can't use @{text well_ord_iso_preserving} because it needs the
-strong premise @{term "well_ord(A,r)"}*}
+text\<open>Can't use @{text well_ord_iso_preserving} because it needs the
+strong premise @{term "well_ord(A,r)"}\<close>
 lemma (in M_ordertype) ord_iso_pred_imp_lt:
      "[| f \<in> ord_iso(Order.pred(A,x,r), r, i, Memrel(i));
          g \<in> ord_iso(Order.pred(A,y,r), r, j, Memrel(j));
@@ -103,18 +103,18 @@
 apply (frule wellordered_is_trans_on, assumption)
 apply (frule_tac y=y in transM, assumption) 
 apply (rule_tac i=i and j=j in Ord_linear_lt, auto)  
-txt{*case @{term "i=j"} yields a contradiction*}
+txt\<open>case @{term "i=j"} yields a contradiction\<close>
  apply (rule_tac x1=x and A1="Order.pred(A,y,r)" in 
           wellordered_iso_predD [THEN notE]) 
    apply (blast intro: wellordered_subset [OF _ pred_subset]) 
   apply (simp add: trans_pred_pred_eq)
   apply (blast intro: Ord_iso_implies_eq ord_iso_sym ord_iso_trans) 
  apply (simp_all add: pred_iff pred_closed converse_closed comp_closed)
-txt{*case @{term "j<i"} also yields a contradiction*}
+txt\<open>case @{term "j<i"} also yields a contradiction\<close>
 apply (frule restrict_ord_iso2, assumption+) 
 apply (frule ord_iso_sym [THEN ord_iso_is_bij, THEN bij_is_fun]) 
 apply (frule apply_type, blast intro: ltD) 
-  --{*thus @{term "converse(f)`j \<in> Order.pred(A,x,r)"}*}
+  --\<open>thus @{term "converse(f)`j \<in> Order.pred(A,x,r)"}\<close>
 apply (simp add: pred_iff) 
 apply (subgoal_tac
        "\<exists>h[M]. h \<in> ord_iso(Order.pred(A,y,r), r, 
@@ -137,26 +137,26 @@
 
 definition  
   obase :: "[i=>o,i,i] => i" where
-       --{*the domain of @{text om}, eventually shown to equal @{text A}*}
+       --\<open>the domain of @{text om}, eventually shown to equal @{text A}\<close>
    "obase(M,A,r) == {a\<in>A. \<exists>x[M]. \<exists>g[M]. Ord(x) & 
                           g \<in> ord_iso(Order.pred(A,a,r),r,x,Memrel(x))}"
 
 definition
   omap :: "[i=>o,i,i,i] => o" where
-    --{*the function that maps wosets to order types*}
+    --\<open>the function that maps wosets to order types\<close>
    "omap(M,A,r,f) == 
         \<forall>z[M].
          z \<in> f \<longleftrightarrow> (\<exists>a\<in>A. \<exists>x[M]. \<exists>g[M]. z = <a,x> & Ord(x) & 
                         g \<in> ord_iso(Order.pred(A,a,r),r,x,Memrel(x)))"
 
 definition
-  otype :: "[i=>o,i,i,i] => o" where --{*the order types themselves*}
+  otype :: "[i=>o,i,i,i] => o" where --\<open>the order types themselves\<close>
    "otype(M,A,r,i) == \<exists>f[M]. omap(M,A,r,f) & is_range(M,f,i)"
 
 
-text{*Can also be proved with the premise @{term "M(z)"} instead of
+text\<open>Can also be proved with the premise @{term "M(z)"} instead of
       @{term "M(f)"}, but that version is less useful.  This lemma
-      is also more useful than the definition, @{text omap_def}.*}
+      is also more useful than the definition, @{text omap_def}.\<close>
 lemma (in M_ordertype) omap_iff:
      "[| omap(M,A,r,f); M(A); M(f) |] 
       ==> z \<in> f \<longleftrightarrow>
@@ -256,7 +256,7 @@
 done
 
 
-text{*This is not the final result: we must show @{term "oB(A,r) = A"}*}
+text\<open>This is not the final result: we must show @{term "oB(A,r) = A"}\<close>
 lemma (in M_ordertype) omap_ord_iso:
      "[| wellordered(M,A,r); omap(M,A,r,f); otype(M,A,r,i); 
        M(A); M(r); M(f); M(i) |] ==> f \<in> ord_iso(obase(M,A,r),r,i,Memrel(i))"
@@ -266,15 +266,15 @@
 apply (frule_tac a=x in apply_Pair, assumption) 
 apply (frule_tac a=y in apply_Pair, assumption) 
 apply (auto simp add: omap_iff)
- txt{*direction 1: assuming @{term "\<langle>x,y\<rangle> \<in> r"}*}
+ txt\<open>direction 1: assuming @{term "\<langle>x,y\<rangle> \<in> r"}\<close>
  apply (blast intro: ltD ord_iso_pred_imp_lt)
- txt{*direction 2: proving @{term "\<langle>x,y\<rangle> \<in> r"} using linearity of @{term r}*}
+ txt\<open>direction 2: proving @{term "\<langle>x,y\<rangle> \<in> r"} using linearity of @{term r}\<close>
 apply (rename_tac x y g ga) 
 apply (frule wellordered_is_linear, assumption, 
        erule_tac x=x and y=y in linearE, assumption+) 
-txt{*the case @{term "x=y"} leads to immediate contradiction*} 
+txt\<open>the case @{term "x=y"} leads to immediate contradiction\<close> 
 apply (blast elim: mem_irrefl) 
-txt{*the case @{term "\<langle>y,x\<rangle> \<in> r"}: handle like the opposite direction*}
+txt\<open>the case @{term "\<langle>y,x\<rangle> \<in> r"}: handle like the opposite direction\<close>
 apply (blast dest: ord_iso_pred_imp_lt ltD elim: mem_asym) 
 done
 
@@ -284,7 +284,7 @@
 apply (frule wellordered_is_trans_on, assumption)
 apply (rule OrdI) 
         prefer 2 apply (simp add: image_iff omap_iff Ord_def, blast) 
-txt{*Hard part is to show that the image is a transitive set.*}
+txt\<open>Hard part is to show that the image is a transitive set.\<close>
 apply (simp add: Transset_def, clarify) 
 apply (simp add: image_iff pred_iff apply_iff [OF omap_funtype [of A r f i]])
 apply (rename_tac c j, clarify)
@@ -331,8 +331,8 @@
 
 
 
-text{*Main result: @{term om} gives the order-isomorphism 
-      @{term "\<langle>A,r\<rangle> \<cong> \<langle>i, Memrel(i)\<rangle>"} *}
+text\<open>Main result: @{term om} gives the order-isomorphism 
+      @{term "\<langle>A,r\<rangle> \<cong> \<langle>i, Memrel(i)\<rangle>"}\<close>
 theorem (in M_ordertype) omap_ord_iso_otype:
      "[| wellordered(M,A,r); omap(M,A,r,f); otype(M,A,r,i);
        M(A); M(r); M(f); M(i) |] ==> f \<in> ord_iso(A, r, i, Memrel(i))"
@@ -390,15 +390,15 @@
 apply (blast intro: well_ord_ord_iso well_ord_Memrel)  
 done
 
-subsection {*Kunen's theorem 5.4, page 127*}
+subsection \<open>Kunen's theorem 5.4, page 127\<close>
 
-text{*(a) The notion of Wellordering is absolute*}
+text\<open>(a) The notion of Wellordering is absolute\<close>
 theorem (in M_ordertype) well_ord_abs [simp]: 
      "[| M(A); M(r) |] ==> wellordered(M,A,r) \<longleftrightarrow> well_ord(A,r)" 
 by (blast intro: well_ord_imp_relativized relativized_imp_well_ord)  
 
 
-text{*(b) Order types are absolute*}
+text\<open>(b) Order types are absolute\<close>
 theorem (in M_ordertype) 
      "[| wellordered(M,A,r); f \<in> ord_iso(A, r, i, Memrel(i));
        M(A); M(r); M(f); M(i); Ord(i) |] ==> i = ordertype(A,r)"
@@ -406,11 +406,11 @@
                  Ord_iso_implies_eq ord_iso_sym ord_iso_trans)
 
 
-subsection{*Ordinal Arithmetic: Two Examples of Recursion*}
+subsection\<open>Ordinal Arithmetic: Two Examples of Recursion\<close>
 
-text{*Note: the remainder of this theory is not needed elsewhere.*}
+text\<open>Note: the remainder of this theory is not needed elsewhere.\<close>
 
-subsubsection{*Ordinal Addition*}
+subsubsection\<open>Ordinal Addition\<close>
 
 (*FIXME: update to use new techniques!!*)
  (*This expresses ordinal addition in the language of ZF.  It also 
@@ -478,7 +478,7 @@
 
 
 
-text{*@{text is_oadd_fun}: Relating the pure "language of set theory" to Isabelle/ZF*}
+text\<open>@{text is_oadd_fun}: Relating the pure "language of set theory" to Isabelle/ZF\<close>
 lemma (in M_ord_arith) is_oadd_fun_iff:
    "[| a\<le>j; M(i); M(j); M(a); M(f) |] 
     ==> is_oadd_fun(M,i,j,a,f) \<longleftrightarrow>
@@ -563,7 +563,7 @@
 done
 
 
-subsubsection{*Ordinal Multiplication*}
+subsubsection\<open>Ordinal Multiplication\<close>
 
 lemma omult_eqns_unique:
      "[| omult_eqns(i,x,g,z); omult_eqns(i,x,g,z') |] ==> z=z'"
@@ -602,7 +602,7 @@
     "[| M(i); M(x); M(g); function(g) |] 
      ==> M(THE z. omult_eqns(i, x, g, z))"
 apply (case_tac "Ord(x)")
- prefer 2 apply (simp add: omult_eqns_Not) --{*trivial, non-Ord case*}
+ prefer 2 apply (simp add: omult_eqns_Not) --\<open>trivial, non-Ord case\<close>
 apply (erule Ord_cases) 
   apply (simp add: omult_eqns_0)
  apply (simp add: omult_eqns_succ apply_closed oadd_closed) 
@@ -669,11 +669,11 @@
 
 
 
-subsection {*Absoluteness of Well-Founded Relations*}
+subsection \<open>Absoluteness of Well-Founded Relations\<close>
 
-text{*Relativized to @{term M}: Every well-founded relation is a subset of some
+text\<open>Relativized to @{term M}: Every well-founded relation is a subset of some
 inverse image of an ordinal.  Key step is the construction (in @{term M}) of a
-rank function.*}
+rank function.\<close>
 
 locale M_wfrank = M_trancl +
   assumes wfrank_separation:
@@ -698,8 +698,8 @@
              ordinal(M,rangef)))" 
 
 
-text{*Proving that the relativized instances of Separation or Replacement
-agree with the "real" ones.*}
+text\<open>Proving that the relativized instances of Separation or Replacement
+agree with the "real" ones.\<close>
 
 lemma (in M_wfrank) wfrank_separation':
      "M(r) ==>
@@ -726,8 +726,8 @@
 apply (simp add: relation2_def is_recfun_abs [of "%x. range"])
 done
 
-text{*This function, defined using replacement, is a rank function for
-well-founded relations within the class M.*}
+text\<open>This function, defined using replacement, is a rank function for
+well-founded relations within the class M.\<close>
 definition
   wellfoundedrank :: "[i=>o,i,i] => i" where
     "wellfoundedrank(M,r,A) ==
@@ -766,16 +766,16 @@
 apply (rule wellfounded_induct, assumption, erule (1) transM)
   apply simp
  apply (blast intro: Ord_wfrank_separation', clarify)
-txt{*The reasoning in both cases is that we get @{term y} such that
+txt\<open>The reasoning in both cases is that we get @{term y} such that
    @{term "\<langle>y, x\<rangle> \<in> r^+"}.  We find that
-   @{term "f`y = restrict(f, r^+ -`` {y})"}. *}
+   @{term "f`y = restrict(f, r^+ -`` {y})"}.\<close>
 apply (rule OrdI [OF _ Ord_is_Transset])
- txt{*An ordinal is a transitive set...*}
+ txt\<open>An ordinal is a transitive set...\<close>
  apply (simp add: Transset_def)
  apply clarify
  apply (frule apply_recfun2, assumption)
  apply (force simp add: restrict_iff)
-txt{*...of ordinals.  This second case requires the induction hyp.*}
+txt\<open>...of ordinals.  This second case requires the induction hyp.\<close>
 apply clarify
 apply (rename_tac i y)
 apply (frule apply_recfun2, assumption)
@@ -799,9 +799,9 @@
 apply (simp add: wellfoundedrank_def)
 apply (rule OrdI [OF _ Ord_is_Transset])
  prefer 2
- txt{*by our previous result the range consists of ordinals.*}
+ txt\<open>by our previous result the range consists of ordinals.\<close>
  apply (blast intro: Ord_wfrank_range)
-txt{*We still must show that the range is a transitive set.*}
+txt\<open>We still must show that the range is a transitive set.\<close>
 apply (simp add: Transset_def, clarify, simp)
 apply (rename_tac x i f u)
 apply (frule is_recfun_imp_in_r, assumption)
@@ -814,7 +814,7 @@
    apply (blast intro: is_recfun_restrict trans_trancl dest: apply_recfun2)
   apply simp 
 apply blast 
-txt{*Unicity requirement of Replacement*}
+txt\<open>Unicity requirement of Replacement\<close>
 apply clarify
 apply (frule apply_recfun2, assumption)
 apply (simp add: trans_trancl is_recfun_cut)
@@ -824,7 +824,7 @@
     "[| wellfounded(M,r); M(r); M(A)|]
      ==> function(wellfoundedrank(M,r,A))"
 apply (simp add: wellfoundedrank_def function_def, clarify)
-txt{*Uniqueness: repeated below!*}
+txt\<open>Uniqueness: repeated below!\<close>
 apply (drule is_recfun_functional, assumption)
      apply (blast intro: wellfounded_trancl)
     apply (simp_all add: trancl_subset_times trans_trancl)
@@ -841,7 +841,7 @@
 apply (rule_tac x=x in ReplaceI)
   apply simp 
   apply (rule_tac x=f in rexI, blast, simp_all)
-txt{*Uniqueness (for Replacement): repeated above!*}
+txt\<open>Uniqueness (for Replacement): repeated above!\<close>
 apply clarify
 apply (drule is_recfun_functional, assumption)
     apply (blast intro: wellfounded_trancl)
@@ -875,7 +875,7 @@
   apply (rule_tac x="range(f)" in rexI) 
   apply blast
  apply simp_all
-txt{*Unicity requirement of Replacement*}
+txt\<open>Unicity requirement of Replacement\<close>
 apply clarify
 apply (drule is_recfun_functional, assumption)
     apply (blast intro: wellfounded_trancl)
@@ -897,12 +897,12 @@
 apply (frule is_recfun_restrict [of concl: "r^+" a])
     apply (rule trans_trancl, assumption)
    apply (simp_all add: r_into_trancl trancl_subset_times)
-txt{*Still the same goal, but with new @{text is_recfun} assumptions.*}
+txt\<open>Still the same goal, but with new @{text is_recfun} assumptions.\<close>
 apply (simp add: wellfoundedrank_eq)
 apply (frule_tac a=a in wellfoundedrank_eq, assumption+)
    apply (simp_all add: transM [of a])
-txt{*We have used equations for wellfoundedrank and now must use some
-    for  @{text is_recfun}. *}
+txt\<open>We have used equations for wellfoundedrank and now must use some
+    for  @{text is_recfun}.\<close>
 apply (rule_tac a=a in rangeI)
 apply (simp add: is_recfun_type [THEN apply_iff] vimage_singleton_iff
                  r_into_trancl apply_recfun r_into_trancl)
--- a/src/ZF/Constructible/Rank_Separation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Rank_Separation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,19 +2,19 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Separation for Facts About Order Types, Rank Functions and 
-      Well-Founded Relations*}
+section \<open>Separation for Facts About Order Types, Rank Functions and 
+      Well-Founded Relations\<close>
 
 theory Rank_Separation imports Rank Rec_Separation begin
 
 
-text{*This theory proves all instances needed for locales
+text\<open>This theory proves all instances needed for locales
  @{text "M_ordertype"} and  @{text "M_wfrank"}.  But the material is not
- needed for proving the relative consistency of AC.*}
+ needed for proving the relative consistency of AC.\<close>
 
-subsection{*The Locale @{text "M_ordertype"}*}
+subsection\<open>The Locale @{text "M_ordertype"}\<close>
 
-subsubsection{*Separation for Order-Isomorphisms*}
+subsubsection\<open>Separation for Order-Isomorphisms\<close>
 
 lemma well_ord_iso_Reflects:
   "REFLECTS[\<lambda>x. x\<in>A \<longrightarrow>
@@ -34,7 +34,7 @@
 done
 
 
-subsubsection{*Separation for @{term "obase"}*}
+subsubsection\<open>Separation for @{term "obase"}\<close>
 
 lemma obase_reflects:
   "REFLECTS[\<lambda>a. \<exists>x[L]. \<exists>g[L]. \<exists>mx[L]. \<exists>par[L].
@@ -46,7 +46,7 @@
 by (intro FOL_reflections function_reflections fun_plus_reflections)
 
 lemma obase_separation:
-     --{*part of the order type formalization*}
+     --\<open>part of the order type formalization\<close>
      "[| L(A); L(r) |]
       ==> separation(L, \<lambda>a. \<exists>x[L]. \<exists>g[L]. \<exists>mx[L]. \<exists>par[L].
              ordinal(L,x) & membership(L,x,mx) & pred_set(L,A,a,r,par) &
@@ -57,7 +57,7 @@
 done
 
 
-subsubsection{*Separation for a Theorem about @{term "obase"}*}
+subsubsection\<open>Separation for a Theorem about @{term "obase"}\<close>
 
 lemma obase_equals_reflects:
   "REFLECTS[\<lambda>x. x\<in>A \<longrightarrow> ~(\<exists>y[L]. \<exists>g[L].
@@ -82,7 +82,7 @@
 done
 
 
-subsubsection{*Replacement for @{term "omap"}*}
+subsubsection\<open>Replacement for @{term "omap"}\<close>
 
 lemma omap_reflects:
  "REFLECTS[\<lambda>z. \<exists>a[L]. a\<in>B & (\<exists>x[L]. \<exists>g[L]. \<exists>mx[L]. \<exists>par[L].
@@ -109,9 +109,9 @@
 
 
 
-subsection{*Instantiating the locale @{text M_ordertype}*}
-text{*Separation (and Strong Replacement) for basic set-theoretic constructions
-such as intersection, Cartesian Product and image.*}
+subsection\<open>Instantiating the locale @{text M_ordertype}\<close>
+text\<open>Separation (and Strong Replacement) for basic set-theoretic constructions
+such as intersection, Cartesian Product and image.\<close>
 
 lemma M_ordertype_axioms_L: "M_ordertype_axioms(L)"
   apply (rule M_ordertype_axioms.intro)
@@ -127,9 +127,9 @@
   done
 
 
-subsection{*The Locale @{text "M_wfrank"}*}
+subsection\<open>The Locale @{text "M_wfrank"}\<close>
 
-subsubsection{*Separation for @{term "wfrank"}*}
+subsubsection\<open>Separation for @{term "wfrank"}\<close>
 
 lemma wfrank_Reflects:
  "REFLECTS[\<lambda>x. \<forall>rplus[L]. tran_closure(L,r,rplus) \<longrightarrow>
@@ -150,7 +150,7 @@
 done
 
 
-subsubsection{*Replacement for @{term "wfrank"}*}
+subsubsection\<open>Replacement for @{term "wfrank"}\<close>
 
 lemma wfrank_replacement_Reflects:
  "REFLECTS[\<lambda>z. \<exists>x[L]. x \<in> A &
@@ -182,7 +182,7 @@
 done
 
 
-subsubsection{*Separation for Proving @{text Ord_wfrank_range}*}
+subsubsection\<open>Separation for Proving @{text Ord_wfrank_range}\<close>
 
 lemma Ord_wfrank_Reflects:
  "REFLECTS[\<lambda>x. \<forall>rplus[L]. tran_closure(L,r,rplus) \<longrightarrow>
@@ -213,7 +213,7 @@
 done
 
 
-subsubsection{*Instantiating the locale @{text M_wfrank}*}
+subsubsection\<open>Instantiating the locale @{text M_wfrank}\<close>
 
 lemma M_wfrank_axioms_L: "M_wfrank_axioms(L)"
   apply (rule M_wfrank_axioms.intro)
--- a/src/ZF/Constructible/Rec_Separation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Rec_Separation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,22 +2,22 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Separation for Facts About Recursion*}
+section \<open>Separation for Facts About Recursion\<close>
 
 theory Rec_Separation imports Separation Internalize begin
 
-text{*This theory proves all instances needed for locales @{text
-"M_trancl"} and @{text "M_datatypes"}*}
+text\<open>This theory proves all instances needed for locales @{text
+"M_trancl"} and @{text "M_datatypes"}\<close>
 
 lemma eq_succ_imp_lt: "[|i = succ(j); Ord(i)|] ==> j<i"
 by simp
 
 
-subsection{*The Locale @{text "M_trancl"}*}
+subsection\<open>The Locale @{text "M_trancl"}\<close>
 
-subsubsection{*Separation for Reflexive/Transitive Closure*}
+subsubsection\<open>Separation for Reflexive/Transitive Closure\<close>
 
-text{*First, The Defining Formula*}
+text\<open>First, The Defining Formula\<close>
 
 (* "rtran_closure_mem(M,A,r,p) ==
       \<exists>nnat[M]. \<exists>n[M]. \<exists>n'[M].
@@ -72,7 +72,7 @@
 apply (intro FOL_reflections function_reflections fun_plus_reflections)
 done
 
-text{*Separation for @{term "rtrancl(r)"}.*}
+text\<open>Separation for @{term "rtrancl(r)"}.\<close>
 lemma rtrancl_separation:
      "[| L(r); L(A) |] ==> separation (L, rtran_closure_mem(L,A,r))"
 apply (rule gen_separation_multi [OF rtran_closure_mem_reflection, of "{r,A}"],
@@ -82,7 +82,7 @@
 done
 
 
-subsubsection{*Reflexive/Transitive Closure, Internalized*}
+subsubsection\<open>Reflexive/Transitive Closure, Internalized\<close>
 
 (*  "rtran_closure(M,r,s) ==
         \<forall>A[M]. is_field(M,r,A) \<longrightarrow>
@@ -118,7 +118,7 @@
 done
 
 
-subsubsection{*Transitive Closure of a Relation, Internalized*}
+subsubsection\<open>Transitive Closure of a Relation, Internalized\<close>
 
 (*  "tran_closure(M,r,t) ==
          \<exists>s[M]. rtran_closure(M,r,s) & composition(M,r,s,t)" *)
@@ -152,7 +152,7 @@
 done
 
 
-subsubsection{*Separation for the Proof of @{text "wellfounded_on_trancl"}*}
+subsubsection\<open>Separation for the Proof of @{text "wellfounded_on_trancl"}\<close>
 
 lemma wellfounded_trancl_reflects:
   "REFLECTS[\<lambda>x. \<exists>w[L]. \<exists>wx[L]. \<exists>rp[L].
@@ -175,7 +175,7 @@
 done
 
 
-subsubsection{*Instantiating the locale @{text M_trancl}*}
+subsubsection\<open>Instantiating the locale @{text M_trancl}\<close>
 
 lemma M_trancl_axioms_L: "M_trancl_axioms(L)"
   apply (rule M_trancl_axioms.intro)
@@ -188,9 +188,9 @@
 interpretation L?: M_trancl L by (rule M_trancl_L)
 
 
-subsection{*@{term L} is Closed Under the Operator @{term list}*}
+subsection\<open>@{term L} is Closed Under the Operator @{term list}\<close>
 
-subsubsection{*Instances of Replacement for Lists*}
+subsubsection\<open>Instances of Replacement for Lists\<close>
 
 lemma list_replacement1_Reflects:
  "REFLECTS
@@ -238,9 +238,9 @@
 done
 
 
-subsection{*@{term L} is Closed Under the Operator @{term formula}*}
+subsection\<open>@{term L} is Closed Under the Operator @{term formula}\<close>
 
-subsubsection{*Instances of Replacement for Formulas*}
+subsubsection\<open>Instances of Replacement for Formulas\<close>
 
 (*FIXME: could prove a lemma iterates_replacementI to eliminate the 
 need to expand iterates_replacement and wfrec_replacement*)
@@ -287,11 +287,11 @@
 apply (rule sep_rules formula_functor_iff_sats is_iterates_iff_sats | simp)+
 done
 
-text{*NB The proofs for type @{term formula} are virtually identical to those
-for @{term "list(A)"}.  It was a cut-and-paste job! *}
+text\<open>NB The proofs for type @{term formula} are virtually identical to those
+for @{term "list(A)"}.  It was a cut-and-paste job!\<close>
 
 
-subsubsection{*The Formula @{term is_nth}, Internalized*}
+subsubsection\<open>The Formula @{term is_nth}, Internalized\<close>
 
 (* "is_nth(M,n,l,Z) ==
       \<exists>X[M]. is_iterates(M, is_tl(M), l, n, X) & is_hd(M,X,Z)" *)
@@ -328,7 +328,7 @@
 done
 
 
-subsubsection{*An Instance of Replacement for @{term nth}*}
+subsubsection\<open>An Instance of Replacement for @{term nth}\<close>
 
 (*FIXME: could prove a lemma iterates_replacementI to eliminate the 
 need to expand iterates_replacement and wfrec_replacement*)
@@ -356,7 +356,7 @@
 done
 
 
-subsubsection{*Instantiating the locale @{text M_datatypes}*}
+subsubsection\<open>Instantiating the locale @{text M_datatypes}\<close>
 
 lemma M_datatypes_axioms_L: "M_datatypes_axioms(L)"
   apply (rule M_datatypes_axioms.intro)
@@ -375,9 +375,9 @@
 interpretation L?: M_datatypes L by (rule M_datatypes_L)
 
 
-subsection{*@{term L} is Closed Under the Operator @{term eclose}*}
+subsection\<open>@{term L} is Closed Under the Operator @{term eclose}\<close>
 
-subsubsection{*Instances of Replacement for @{term eclose}*}
+subsubsection\<open>Instances of Replacement for @{term eclose}\<close>
 
 lemma eclose_replacement1_Reflects:
  "REFLECTS
@@ -422,7 +422,7 @@
 done
 
 
-subsubsection{*Instantiating the locale @{text M_eclose}*}
+subsubsection\<open>Instantiating the locale @{text M_eclose}\<close>
 
 lemma M_eclose_axioms_L: "M_eclose_axioms(L)"
   apply (rule M_eclose_axioms.intro)
--- a/src/ZF/Constructible/Reflection.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Reflection.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {* The Reflection Theorem*}
+section \<open>The Reflection Theorem\<close>
 
 theory Reflection imports Normal begin
 
@@ -12,14 +12,14 @@
 lemma ball_iff_not_bex_not: "(\<forall>x\<in>A. P(x)) \<longleftrightarrow> (~ (\<exists>x\<in>A. ~ P(x)))"
 by blast
 
-text{*From the notes of A. S. Kechris, page 6, and from
+text\<open>From the notes of A. S. Kechris, page 6, and from
       Andrzej Mostowski, \emph{Constructible Sets with Applications},
-      North-Holland, 1969, page 23.*}
+      North-Holland, 1969, page 23.\<close>
 
 
-subsection{*Basic Definitions*}
+subsection\<open>Basic Definitions\<close>
 
-text{*First part: the cumulative hierarchy defining the class @{text M}.
+text\<open>First part: the cumulative hierarchy defining the class @{text M}.
 To avoid handling multiple arguments, we assume that @{text "Mset(l)"} is
 closed under ordered pairing provided @{text l} is limit.  Possibly this
 could be avoided: the induction hypothesis @{term Cl_reflects}
@@ -28,7 +28,7 @@
 uses of @{term Pair_in_Mset}.  But there isn't much point in doing so, since
 ultimately the @{text ex_reflection} proof is packaged up using the
 predicate @{text Reflects}.
-*}
+\<close>
 locale reflection =
   fixes Mset and M and Reflects
   assumes Mset_mono_le : "mono_le_subset(Mset)"
@@ -38,9 +38,9 @@
   defines "M(x) == \<exists>a. Ord(a) & x \<in> Mset(a)"
       and "Reflects(Cl,P,Q) == Closed_Unbounded(Cl) &
                               (\<forall>a. Cl(a) \<longrightarrow> (\<forall>x\<in>Mset(a). P(x) \<longleftrightarrow> Q(a,x)))"
-  fixes F0 --{*ordinal for a specific value @{term y}*}
-  fixes FF --{*sup over the whole level, @{term "y\<in>Mset(a)"}*}
-  fixes ClEx --{*Reflecting ordinals for the formula @{term "\<exists>z. P"}*}
+  fixes F0 --\<open>ordinal for a specific value @{term y}\<close>
+  fixes FF --\<open>sup over the whole level, @{term "y\<in>Mset(a)"}\<close>
+  fixes ClEx --\<open>Reflecting ordinals for the formula @{term "\<exists>z. P"}\<close>
   defines "F0(P,y) == \<mu> b. (\<exists>z. M(z) & P(<y,z>)) \<longrightarrow>
                                (\<exists>z\<in>Mset(b). P(<y,z>))"
       and "FF(P)   == \<lambda>a. \<Union>y\<in>Mset(a). F0(P,y)"
@@ -51,14 +51,14 @@
 apply (simp add: mono_le_subset_def leI)
 done
 
-text{*Awkward: we need a version of @{text ClEx_def} as an equality
-      at the level of classes, which do not really exist*}
+text\<open>Awkward: we need a version of @{text ClEx_def} as an equality
+      at the level of classes, which do not really exist\<close>
 lemma (in reflection) ClEx_eq:
      "ClEx(P) == \<lambda>a. Limit(a) & normalize(FF(P),a) = a"
 by (simp add: ClEx_def [symmetric])
 
 
-subsection{*Easy Cases of the Reflection Theorem*}
+subsection\<open>Easy Cases of the Reflection Theorem\<close>
 
 theorem (in reflection) Triv_reflection [intro]:
      "Reflects(Ord, P, \<lambda>a x. P(x))"
@@ -94,7 +94,7 @@
                    \<lambda>a x. Q(a,x) \<longleftrightarrow> Q'(a,x))"
 by (simp add: Reflects_def Closed_Unbounded_Int, blast)
 
-subsection{*Reflection for Existential Quantifiers*}
+subsection\<open>Reflection for Existential Quantifiers\<close>
 
 lemma (in reflection) F0_works:
      "[| y\<in>Mset(a); Ord(a); M(z); P(<y,z>) |] ==> \<exists>z\<in>Mset(F0(P,y)). P(<y,z>)"
@@ -115,8 +115,8 @@
 apply (simp add: cont_Ord_def FF_def, blast)
 done
 
-text{*Recall that @{term F0} depends upon @{term "y\<in>Mset(a)"},
-while @{term FF} depends only upon @{term a}. *}
+text\<open>Recall that @{term F0} depends upon @{term "y\<in>Mset(a)"},
+while @{term FF} depends only upon @{term a}.\<close>
 lemma (in reflection) FF_works:
      "[| M(z); y\<in>Mset(a); P(<y,z>); Ord(a) |] ==> \<exists>z\<in>Mset(FF(P,a)). P(<y,z>)"
 apply (simp add: FF_def)
@@ -133,7 +133,7 @@
 done
 
 
-text{*Locale for the induction hypothesis*}
+text\<open>Locale for the induction hypothesis\<close>
 
 locale ex_reflection = reflection +
   fixes P  --"the original formula"
@@ -159,13 +159,13 @@
              intro: Limit_is_Ord Pair_in_Mset)
 done
 
-text{*Class @{text ClEx} indeed consists of reflecting ordinals...*}
+text\<open>Class @{text ClEx} indeed consists of reflecting ordinals...\<close>
 lemma (in ex_reflection) ZF_ClEx_iff:
      "[| y\<in>Mset(a); Cl(a); ClEx(P,a) |]
       ==> (\<exists>z. M(z) & P(<y,z>)) \<longleftrightarrow> (\<exists>z\<in>Mset(a). Q(a,<y,z>))"
 by (blast intro: dest: ClEx_downward ClEx_upward)
 
-text{*...and it is closed and unbounded*}
+text\<open>...and it is closed and unbounded\<close>
 lemma (in ex_reflection) ZF_Closed_Unbounded_ClEx:
      "Closed_Unbounded(ClEx(P))"
 apply (simp add: ClEx_eq)
@@ -173,9 +173,9 @@
                    Closed_Unbounded_Limit Normal_normalize)
 done
 
-text{*The same two theorems, exported to locale @{text reflection}.*}
+text\<open>The same two theorems, exported to locale @{text reflection}.\<close>
 
-text{*Class @{text ClEx} indeed consists of reflecting ordinals...*}
+text\<open>Class @{text ClEx} indeed consists of reflecting ordinals...\<close>
 lemma (in reflection) ClEx_iff:
      "[| y\<in>Mset(a); Cl(a); ClEx(P,a);
         !!a. [| Cl(a); Ord(a) |] ==> \<forall>x\<in>Mset(a). P(x) \<longleftrightarrow> Q(a,x) |]
@@ -203,7 +203,7 @@
 apply (blast intro: ex_reflection_axioms.intro)
 done
 
-subsection{*Packaging the Quantifier Reflection Rules*}
+subsection\<open>Packaging the Quantifier Reflection Rules\<close>
 
 lemma (in reflection) Ex_reflection_0:
      "Reflects(Cl,P0,Q0)
@@ -243,7 +243,7 @@
 by (rule All_reflection_0 [of _ "\<lambda>x. P(fst(x),snd(x))"
                                 "\<lambda>a x. Q(a,fst(x),snd(x))", simplified])
 
-text{*And again, this time using class-bounded quantifiers*}
+text\<open>And again, this time using class-bounded quantifiers\<close>
 
 theorem (in reflection) Rex_reflection [intro]:
      "Reflects(Cl, \<lambda>x. P(fst(x),snd(x)), \<lambda>a x. Q(a,fst(x),snd(x)))
@@ -260,23 +260,23 @@
 by (unfold rall_def, blast)
 
 
-text{*No point considering bounded quantifiers, where reflection is trivial.*}
+text\<open>No point considering bounded quantifiers, where reflection is trivial.\<close>
 
 
-subsection{*Simple Examples of Reflection*}
+subsection\<open>Simple Examples of Reflection\<close>
 
-text{*Example 1: reflecting a simple formula.  The reflecting class is first
+text\<open>Example 1: reflecting a simple formula.  The reflecting class is first
 given as the variable @{text ?Cl} and later retrieved from the final
-proof state.*}
+proof state.\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & x \<in> y,
                \<lambda>a x. \<exists>y\<in>Mset(a). x \<in> y)"
 by fast
 
-text{*Problem here: there needs to be a conjunction (class intersection)
+text\<open>Problem here: there needs to be a conjunction (class intersection)
 in the class of reflecting ordinals.  The @{term "Ord(a)"} is redundant,
-though harmless.*}
+though harmless.\<close>
 lemma (in reflection)
      "Reflects(\<lambda>a. Ord(a) & ClEx(\<lambda>x. fst(x) \<in> snd(x), a),
                \<lambda>x. \<exists>y. M(y) & x \<in> y,
@@ -284,14 +284,14 @@
 by fast
 
 
-text{*Example 2*}
+text\<open>Example 2\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & (\<forall>z. M(z) \<longrightarrow> z \<subseteq> x \<longrightarrow> z \<in> y),
                \<lambda>a x. \<exists>y\<in>Mset(a). \<forall>z\<in>Mset(a). z \<subseteq> x \<longrightarrow> z \<in> y)"
 by fast
 
-text{*Example 2'.  We give the reflecting class explicitly. *}
+text\<open>Example 2'.  We give the reflecting class explicitly.\<close>
 lemma (in reflection)
   "Reflects
     (\<lambda>a. (Ord(a) &
@@ -301,56 +301,56 @@
             \<lambda>a x. \<exists>y\<in>Mset(a). \<forall>z\<in>Mset(a). z \<subseteq> x \<longrightarrow> z \<in> y)"
 by fast
 
-text{*Example 2''.  We expand the subset relation.*}
+text\<open>Example 2''.  We expand the subset relation.\<close>
 schematic_lemma (in reflection)
   "Reflects(?Cl,
         \<lambda>x. \<exists>y. M(y) & (\<forall>z. M(z) \<longrightarrow> (\<forall>w. M(w) \<longrightarrow> w\<in>z \<longrightarrow> w\<in>x) \<longrightarrow> z\<in>y),
         \<lambda>a x. \<exists>y\<in>Mset(a). \<forall>z\<in>Mset(a). (\<forall>w\<in>Mset(a). w\<in>z \<longrightarrow> w\<in>x) \<longrightarrow> z\<in>y)"
 by fast
 
-text{*Example 2'''.  Single-step version, to reveal the reflecting class.*}
+text\<open>Example 2'''.  Single-step version, to reveal the reflecting class.\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & (\<forall>z. M(z) \<longrightarrow> z \<subseteq> x \<longrightarrow> z \<in> y),
                \<lambda>a x. \<exists>y\<in>Mset(a). \<forall>z\<in>Mset(a). z \<subseteq> x \<longrightarrow> z \<in> y)"
 apply (rule Ex_reflection)
-txt{*
+txt\<open>
 @{goals[display,indent=0,margin=60]}
-*}
+\<close>
 apply (rule All_reflection)
-txt{*
+txt\<open>
 @{goals[display,indent=0,margin=60]}
-*}
+\<close>
 apply (rule Triv_reflection)
-txt{*
+txt\<open>
 @{goals[display,indent=0,margin=60]}
-*}
+\<close>
 done
 
-text{*Example 3.  Warning: the following examples make sense only
-if @{term P} is quantifier-free, since it is not being relativized.*}
+text\<open>Example 3.  Warning: the following examples make sense only
+if @{term P} is quantifier-free, since it is not being relativized.\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & (\<forall>z. M(z) \<longrightarrow> z \<in> y \<longleftrightarrow> z \<in> x & P(z)),
                \<lambda>a x. \<exists>y\<in>Mset(a). \<forall>z\<in>Mset(a). z \<in> y \<longleftrightarrow> z \<in> x & P(z))"
 by fast
 
-text{*Example 3'*}
+text\<open>Example 3'\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & y = Collect(x,P),
                \<lambda>a x. \<exists>y\<in>Mset(a). y = Collect(x,P))"
 by fast
 
-text{*Example 3''*}
+text\<open>Example 3''\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>x. \<exists>y. M(y) & y = Replace(x,P),
                \<lambda>a x. \<exists>y\<in>Mset(a). y = Replace(x,P))"
 by fast
 
-text{*Example 4: Axiom of Choice.  Possibly wrong, since @{text \<Pi>} needs
-to be relativized.*}
+text\<open>Example 4: Axiom of Choice.  Possibly wrong, since @{text \<Pi>} needs
+to be relativized.\<close>
 schematic_lemma (in reflection)
      "Reflects(?Cl,
                \<lambda>A. 0\<notin>A \<longrightarrow> (\<exists>f. M(f) & f \<in> (\<Pi> X \<in> A. X)),
--- a/src/ZF/Constructible/Relative.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Relative.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,11 +2,11 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Relativization and Absoluteness*}
+section \<open>Relativization and Absoluteness\<close>
 
 theory Relative imports Main begin
 
-subsection{* Relativized versions of standard set-theoretic concepts *}
+subsection\<open>Relativized versions of standard set-theoretic concepts\<close>
 
 definition
   empty :: "[i=>o,i] => o" where
@@ -123,10 +123,10 @@
 
 definition
   is_range :: "[i=>o,i,i] => o" where
-    --{*the cleaner
+    --\<open>the cleaner
       @{term "\<exists>r'[M]. is_converse(M,r,r') & is_domain(M,r',z)"}
       unfortunately needs an instance of separation in order to prove
-        @{term "M(converse(r))"}.*}
+        @{term "M(converse(r))"}.\<close>
     "is_range(M,r,z) ==
         \<forall>y[M]. y \<in> z \<longleftrightarrow> (\<exists>w[M]. w\<in>r & (\<exists>x[M]. pair(M,x,y,w)))"
 
@@ -200,32 +200,32 @@
 
 definition
   ordinal :: "[i=>o,i] => o" where
-     --{*an ordinal is a transitive set of transitive sets*}
+     --\<open>an ordinal is a transitive set of transitive sets\<close>
     "ordinal(M,a) == transitive_set(M,a) & (\<forall>x[M]. x\<in>a \<longrightarrow> transitive_set(M,x))"
 
 definition
   limit_ordinal :: "[i=>o,i] => o" where
-    --{*a limit ordinal is a non-empty, successor-closed ordinal*}
+    --\<open>a limit ordinal is a non-empty, successor-closed ordinal\<close>
     "limit_ordinal(M,a) ==
         ordinal(M,a) & ~ empty(M,a) &
         (\<forall>x[M]. x\<in>a \<longrightarrow> (\<exists>y[M]. y\<in>a & successor(M,x,y)))"
 
 definition
   successor_ordinal :: "[i=>o,i] => o" where
-    --{*a successor ordinal is any ordinal that is neither empty nor limit*}
+    --\<open>a successor ordinal is any ordinal that is neither empty nor limit\<close>
     "successor_ordinal(M,a) ==
         ordinal(M,a) & ~ empty(M,a) & ~ limit_ordinal(M,a)"
 
 definition
   finite_ordinal :: "[i=>o,i] => o" where
-    --{*an ordinal is finite if neither it nor any of its elements are limit*}
+    --\<open>an ordinal is finite if neither it nor any of its elements are limit\<close>
     "finite_ordinal(M,a) ==
         ordinal(M,a) & ~ limit_ordinal(M,a) &
         (\<forall>x[M]. x\<in>a \<longrightarrow> ~ limit_ordinal(M,x))"
 
 definition
   omega :: "[i=>o,i] => o" where
-    --{*omega is a limit ordinal none of whose elements are limit*}
+    --\<open>omega is a limit ordinal none of whose elements are limit\<close>
     "omega(M,a) == limit_ordinal(M,a) & (\<forall>x[M]. x\<in>a \<longrightarrow> ~ limit_ordinal(M,x))"
 
 definition
@@ -245,7 +245,7 @@
 
 definition
   Relation1 :: "[i=>o, i, [i,i]=>o, i=>i] => o" where
-    --{*as above, but typed*}
+    --\<open>as above, but typed\<close>
     "Relation1(M,A,is_f,f) ==
         \<forall>x[M]. \<forall>y[M]. x\<in>A \<longrightarrow> is_f(x,y) \<longleftrightarrow> y = f(x)"
 
@@ -275,7 +275,7 @@
        \<forall>u[M]. \<forall>x[M]. \<forall>y[M]. \<forall>z[M]. \<forall>a[M]. is_f(u,x,y,z,a) \<longleftrightarrow> a = f(u,x,y,z)"
 
 
-text{*Useful when absoluteness reasoning has replaced the predicates by terms*}
+text\<open>Useful when absoluteness reasoning has replaced the predicates by terms\<close>
 lemma triv_Relation1:
      "Relation1(M, A, \<lambda>x y. y = f(x), f)"
 by (simp add: Relation1_def)
@@ -285,7 +285,7 @@
 by (simp add: Relation2_def)
 
 
-subsection {*The relativized ZF axioms*}
+subsection \<open>The relativized ZF axioms\<close>
 
 definition
   extensionality :: "(i=>o) => o" where
@@ -294,10 +294,10 @@
 
 definition
   separation :: "[i=>o, i=>o] => o" where
-    --{*The formula @{text P} should only involve parameters
+    --\<open>The formula @{text P} should only involve parameters
         belonging to @{text M} and all its quantifiers must be relativized
         to @{text M}.  We do not have separation as a scheme; every instance
-        that we need must be assumed (and later proved) separately.*}
+        that we need must be assumed (and later proved) separately.\<close>
     "separation(M,P) ==
         \<forall>z[M]. \<exists>y[M]. \<forall>x[M]. x \<in> y \<longleftrightarrow> x \<in> z & P(x)"
 
@@ -336,11 +336,11 @@
         \<forall>x[M]. (\<exists>y[M]. y\<in>x) \<longrightarrow> (\<exists>y[M]. y\<in>x & ~(\<exists>z[M]. z\<in>x & z \<in> y))"
 
 
-subsection{*A trivial consistency proof for $V_\omega$ *}
+subsection\<open>A trivial consistency proof for $V_\omega$\<close>
 
-text{*We prove that $V_\omega$
+text\<open>We prove that $V_\omega$
       (or @{text univ} in Isabelle) satisfies some ZF axioms.
-     Kunen, Theorem IV 3.13, page 123.*}
+     Kunen, Theorem IV 3.13, page 123.\<close>
 
 lemma univ0_downwards_mem: "[| y \<in> x; x \<in> univ(0) |] ==> y \<in> univ(0)"
 apply (insert Transset_univ [OF Transset_0])
@@ -355,7 +355,7 @@
      "A \<in> univ(0) ==> (\<exists>x\<in>A. x \<in> univ(0) & P(x)) \<longleftrightarrow> (\<exists>x\<in>A. P(x))"
 by (blast intro: univ0_downwards_mem)
 
-text{*Congruence rule for separation: can assume the variable is in @{text M}*}
+text\<open>Congruence rule for separation: can assume the variable is in @{text M}\<close>
 lemma separation_cong [cong]:
      "(!!x. M(x) ==> P(x) \<longleftrightarrow> P'(x))
       ==> separation(M, %x. P(x)) \<longleftrightarrow> separation(M, %x. P'(x))"
@@ -374,20 +374,20 @@
      "univalent(M,A,Q) ==> univalent(M, A, \<lambda>x y. P(x,y) & Q(x,y))"
 by (simp add: univalent_def, blast)
 
-text{*Congruence rule for replacement*}
+text\<open>Congruence rule for replacement\<close>
 lemma strong_replacement_cong [cong]:
      "[| !!x y. [| M(x); M(y) |] ==> P(x,y) \<longleftrightarrow> P'(x,y) |]
       ==> strong_replacement(M, %x y. P(x,y)) \<longleftrightarrow>
           strong_replacement(M, %x y. P'(x,y))"
 by (simp add: strong_replacement_def)
 
-text{*The extensionality axiom*}
+text\<open>The extensionality axiom\<close>
 lemma "extensionality(\<lambda>x. x \<in> univ(0))"
 apply (simp add: extensionality_def)
 apply (blast intro: univ0_downwards_mem)
 done
 
-text{*The separation axiom requires some lemmas*}
+text\<open>The separation axiom requires some lemmas\<close>
 lemma Collect_in_Vfrom:
      "[| X \<in> Vfrom(A,j);  Transset(A) |] ==> Collect(X,P) \<in> Vfrom(A, succ(j))"
 apply (drule Transset_Vfrom)
@@ -412,13 +412,13 @@
 apply (blast intro: Collect_in_univ Transset_0)+
 done
 
-text{*Unordered pairing axiom*}
+text\<open>Unordered pairing axiom\<close>
 lemma "upair_ax(\<lambda>x. x \<in> univ(0))"
 apply (simp add: upair_ax_def upair_def)
 apply (blast intro: doubleton_in_univ)
 done
 
-text{*Union axiom*}
+text\<open>Union axiom\<close>
 lemma "Union_ax(\<lambda>x. x \<in> univ(0))"
 apply (simp add: Union_ax_def big_union_def, clarify)
 apply (rule_tac x="\<Union>x" in bexI)
@@ -426,7 +426,7 @@
 apply (blast intro: Union_in_univ Transset_0)
 done
 
-text{*Powerset axiom*}
+text\<open>Powerset axiom\<close>
 
 lemma Pow_in_univ:
      "[| X \<in> univ(A);  Transset(A) |] ==> Pow(X) \<in> univ(A)"
@@ -440,7 +440,7 @@
 apply (blast intro: Pow_in_univ Transset_0)
 done
 
-text{*Foundation axiom*}
+text\<open>Foundation axiom\<close>
 lemma "foundation_ax(\<lambda>x. x \<in> univ(0))"
 apply (simp add: foundation_ax_def, clarify)
 apply (cut_tac A=x in foundation)
@@ -450,12 +450,12 @@
 lemma "replacement(\<lambda>x. x \<in> univ(0), P)"
 apply (simp add: replacement_def, clarify)
 oops
-text{*no idea: maybe prove by induction on the rank of A?*}
+text\<open>no idea: maybe prove by induction on the rank of A?\<close>
 
-text{*Still missing: Replacement, Choice*}
+text\<open>Still missing: Replacement, Choice\<close>
 
-subsection{*Lemmas Needed to Reduce Some Set Constructions to Instances
-      of Separation*}
+subsection\<open>Lemmas Needed to Reduce Some Set Constructions to Instances
+      of Separation\<close>
 
 lemma image_iff_Collect: "r `` A = {y \<in> \<Union>(\<Union>(r)). \<exists>p\<in>r. \<exists>x\<in>A. p=<x,y>}"
 apply (rule equalityI, auto)
@@ -468,8 +468,8 @@
 apply (simp add: Pair_def, blast)
 done
 
-text{*These two lemmas lets us prove @{text domain_closed} and
-      @{text range_closed} without new instances of separation*}
+text\<open>These two lemmas lets us prove @{text domain_closed} and
+      @{text range_closed} without new instances of separation\<close>
 
 lemma domain_eq_vimage: "domain(r) = r -`` Union(Union(r))"
 apply (rule equalityI, auto)
@@ -498,7 +498,7 @@
 by (simp add: separation_def)
 
 
-text{*More constants, for order types*}
+text\<open>More constants, for order types\<close>
 
 definition
   order_isomorphism :: "[i=>o,i,i,i,i,i] => o" where
@@ -515,15 +515,15 @@
         \<forall>y[M]. y \<in> B \<longleftrightarrow> (\<exists>p[M]. p\<in>r & y \<in> A & pair(M,y,x,p))"
 
 definition
-  membership :: "[i=>o,i,i] => o" where --{*membership relation*}
+  membership :: "[i=>o,i,i] => o" where --\<open>membership relation\<close>
     "membership(M,A,r) ==
         \<forall>p[M]. p \<in> r \<longleftrightarrow> (\<exists>x[M]. x\<in>A & (\<exists>y[M]. y\<in>A & x\<in>y & pair(M,x,y,p)))"
 
 
-subsection{*Introducing a Transitive Class Model*}
+subsection\<open>Introducing a Transitive Class Model\<close>
 
-text{*The class M is assumed to be transitive and to satisfy some
-      relativized ZF axioms*}
+text\<open>The class M is assumed to be transitive and to satisfy some
+      relativized ZF axioms\<close>
 locale M_trivial =
   fixes M
   assumes transM:           "[| y\<in>x; M(x) |] ==> M(y)"
@@ -534,8 +534,8 @@
       and M_nat [iff]:      "M(nat)"           (*i.e. the axiom of infinity*)
 
 
-text{*Automatically discovers the proof using @{text transM}, @{text nat_0I}
-and @{text M_nat}.*}
+text\<open>Automatically discovers the proof using @{text transM}, @{text nat_0I}
+and @{text M_nat}.\<close>
 lemma (in M_trivial) nonempty [simp]: "M(0)"
 by (blast intro: transM)
 
@@ -552,16 +552,16 @@
                (\<forall>x\<in>A. P(x)) & (\<forall>x. P(x) \<longrightarrow> M(x) \<longrightarrow> x\<in>A)"
 by (blast intro: transM)
 
-text{*Simplifies proofs of equalities when there's an iff-equality
+text\<open>Simplifies proofs of equalities when there's an iff-equality
       available for rewriting, universally quantified over M.
       But it's not the only way to prove such equalities: its
-      premises @{term "M(A)"} and  @{term "M(B)"} can be too strong.*}
+      premises @{term "M(A)"} and  @{term "M(B)"} can be too strong.\<close>
 lemma (in M_trivial) M_equalityI:
      "[| !!x. M(x) ==> x\<in>A \<longleftrightarrow> x\<in>B; M(A); M(B) |] ==> A=B"
 by (blast intro!: equalityI dest: transM)
 
 
-subsubsection{*Trivial Absoluteness Proofs: Empty Set, Pairs, etc.*}
+subsubsection\<open>Trivial Absoluteness Proofs: Empty Set, Pairs, etc.\<close>
 
 lemma (in M_trivial) empty_abs [simp]:
      "M(z) ==> empty(M,z) \<longleftrightarrow> z=0"
@@ -615,7 +615,7 @@
 apply (blast dest: transM)
 done
 
-subsubsection{*Absoluteness for Unions and Intersections*}
+subsubsection\<open>Absoluteness for Unions and Intersections\<close>
 
 lemma (in M_trivial) union_abs [simp]:
      "[| M(a); M(b); M(z) |] ==> union(M,a,b,z) \<longleftrightarrow> z = a \<union> b"
@@ -667,7 +667,7 @@
 apply (blast intro: transM)
 done
 
-subsubsection{*Absoluteness for Separation and Replacement*}
+subsubsection\<open>Absoluteness for Separation and Replacement\<close>
 
 lemma (in M_trivial) separation_closed [intro,simp]:
      "[| separation(M,P); M(A) |] ==> M(Collect(A,P))"
@@ -687,7 +687,7 @@
 apply (blast intro!: equalityI dest: transM)
 done
 
-text{*Probably the premise and conclusion are equivalent*}
+text\<open>Probably the premise and conclusion are equivalent\<close>
 lemma (in M_trivial) strong_replacementI [rule_format]:
     "[| \<forall>B[M]. separation(M, %u. \<exists>x[M]. x\<in>B & P(x,u)) |]
      ==> strong_replacement(M,P)"
@@ -698,7 +698,7 @@
 apply (rule_tac x=y in rexI, force, assumption)
 done
 
-subsubsection{*The Operator @{term is_Replace}*}
+subsubsection\<open>The Operator @{term is_Replace}\<close>
 
 
 lemma is_Replace_cong [cong]:
@@ -758,8 +758,8 @@
 lemma Replace_conj_eq: "{y . x \<in> A, x\<in>A & y=f(x)} = {y . x\<in>A, y=f(x)}"
 by simp
 
-text{*Better than @{text RepFun_closed} when having the formula @{term "x\<in>A"}
-      makes relativization easier.*}
+text\<open>Better than @{text RepFun_closed} when having the formula @{term "x\<in>A"}
+      makes relativization easier.\<close>
 lemma (in M_trivial) RepFun_closed2:
      "[| strong_replacement(M, \<lambda>x y. x\<in>A & y = f(x)); M(A); \<forall>x\<in>A. M(f(x)) |]
       ==> M(RepFun(A, %x. f(x)))"
@@ -768,7 +768,7 @@
 apply (auto dest: transM  simp add: Replace_conj_eq univalent_def)
 done
 
-subsubsection {*Absoluteness for @{term Lambda}*}
+subsubsection \<open>Absoluteness for @{term Lambda}\<close>
 
 definition
  is_lambda :: "[i=>o, i, [i,i]=>o, i] => o" where
@@ -781,7 +781,7 @@
       ==> M(\<lambda>x\<in>A. b(x))"
 by (simp add: lam_def, blast intro: RepFun_closed dest: transM)
 
-text{*Better than @{text lam_closed}: has the formula @{term "x\<in>A"}*}
+text\<open>Better than @{text lam_closed}: has the formula @{term "x\<in>A"}\<close>
 lemma (in M_trivial) lam_closed2:
   "[|strong_replacement(M, \<lambda>x y. x\<in>A & y = \<langle>x, b(x)\<rangle>);
      M(A); \<forall>m[M]. m\<in>A \<longrightarrow> M(b(m))|] ==> M(Lambda(A,b))"
@@ -816,22 +816,22 @@
  apply (blast intro!: equalityI dest: transM, blast)
 done
 
-text{*What about @{text Pow_abs}?  Powerset is NOT absolute!
-      This result is one direction of absoluteness.*}
+text\<open>What about @{text Pow_abs}?  Powerset is NOT absolute!
+      This result is one direction of absoluteness.\<close>
 
 lemma (in M_trivial) powerset_Pow:
      "powerset(M, x, Pow(x))"
 by (simp add: powerset_def)
 
-text{*But we can't prove that the powerset in @{text M} includes the
-      real powerset.*}
+text\<open>But we can't prove that the powerset in @{text M} includes the
+      real powerset.\<close>
 lemma (in M_trivial) powerset_imp_subset_Pow:
      "[| powerset(M,x,y); M(y) |] ==> y \<subseteq> Pow(x)"
 apply (simp add: powerset_def)
 apply (blast dest: transM)
 done
 
-subsubsection{*Absoluteness for the Natural Numbers*}
+subsubsection\<open>Absoluteness for the Natural Numbers\<close>
 
 lemma (in M_trivial) nat_into_M [intro]:
      "n \<in> nat ==> M(n)"
@@ -870,8 +870,8 @@
 by (simp add: is_nat_case_def)
 
 
-subsection{*Absoluteness for Ordinals*}
-text{*These results constitute Theorem IV 5.1 of Kunen (page 126).*}
+subsection\<open>Absoluteness for Ordinals\<close>
+text\<open>These results constitute Theorem IV 5.1 of Kunen (page 126).\<close>
 
 lemma (in M_trivial) lt_closed:
      "[| j<i; M(i) |] ==> M(j)"
@@ -936,7 +936,7 @@
      "M(a) ==> number3(M,a) \<longleftrightarrow> a = succ(succ(1))"
 by (simp add: number3_def)
 
-text{*Kunen continued to 20...*}
+text\<open>Kunen continued to 20...\<close>
 
 (*Could not get this to work.  The \<lambda>x\<in>nat is essential because everything
   but the recursion variable must stay unchanged.  But then the recursion
@@ -959,7 +959,7 @@
        "natnumber(M,0,x) == x=0"
 *)
 
-subsection{*Some instances of separation and strong replacement*}
+subsection\<open>Some instances of separation and strong replacement\<close>
 
 locale M_basic = M_trivial +
 assumes Inter_separation:
@@ -992,7 +992,7 @@
                 pair(M,f,b,p) & pair(M,n,b,nb) & is_cons(M,nb,f,cnbf) &
                 upair(M,cnbf,cnbf,z))"
   and is_recfun_separation:
-     --{*for well-founded recursion: used to prove @{text is_recfun_equal}*}
+     --\<open>for well-founded recursion: used to prove @{text is_recfun_equal}\<close>
      "[| M(r); M(f); M(g); M(a); M(b) |]
      ==> separation(M,
             \<lambda>x. \<exists>xa[M]. \<exists>xb[M].
@@ -1022,7 +1022,7 @@
 defer 1
   apply (simp add: powerset_def)
  apply blast
-txt{*Final, difficult case: the left-to-right direction of the theorem.*}
+txt\<open>Final, difficult case: the left-to-right direction of the theorem.\<close>
 apply (insert power_ax, simp add: power_ax_def)
 apply (frule_tac x="A \<union> B" and P="\<lambda>x. rex(M,Q(x))" for Q in rspec)
 apply (blast, clarify)
@@ -1042,8 +1042,8 @@
 apply (insert cartprod_separation [of A B], simp)
 done
 
-text{*All the lemmas above are necessary because Powerset is not absolute.
-      I should have used Replacement instead!*}
+text\<open>All the lemmas above are necessary because Powerset is not absolute.
+      I should have used Replacement instead!\<close>
 lemma (in M_basic) cartprod_closed [intro,simp]:
      "[| M(A); M(B) |] ==> M(A*B)"
 by (frule cartprod_closed_lemma, assumption, force)
@@ -1073,7 +1073,7 @@
 by (simp add: is_Inr_def Inr_def)
 
 
-subsubsection {*converse of a relation*}
+subsubsection \<open>converse of a relation\<close>
 
 lemma (in M_basic) M_converse_iff:
      "M(r) ==>
@@ -1103,7 +1103,7 @@
 done
 
 
-subsubsection {*image, preimage, domain, range*}
+subsubsection \<open>image, preimage, domain, range\<close>
 
 lemma (in M_basic) image_closed [intro,simp]:
      "[| M(A); M(r) |] ==> M(r``A)"
@@ -1123,7 +1123,7 @@
 by (simp add: vimage_def)
 
 
-subsubsection{*Domain, range and field*}
+subsubsection\<open>Domain, range and field\<close>
 
 lemma (in M_basic) domain_abs [simp]:
      "[| M(r); M(z) |] ==> is_domain(M,r,z) \<longleftrightarrow> z = domain(r)"
@@ -1156,7 +1156,7 @@
 by (simp add: domain_closed range_closed Un_closed field_def)
 
 
-subsubsection{*Relations, functions and application*}
+subsubsection\<open>Relations, functions and application\<close>
 
 lemma (in M_basic) relation_abs [simp]:
      "M(r) ==> is_relation(M,r) \<longleftrightarrow> relation(r)"
@@ -1201,7 +1201,7 @@
 by (simp add: bijection_def bij_def)
 
 
-subsubsection{*Composition of relations*}
+subsubsection\<open>Composition of relations\<close>
 
 lemma (in M_basic) M_comp_iff:
      "[| M(r); M(s) |]
@@ -1224,17 +1224,17 @@
 lemma (in M_basic) composition_abs [simp]:
      "[| M(r); M(s); M(t) |] ==> composition(M,r,s,t) \<longleftrightarrow> t = r O s"
 apply safe
- txt{*Proving @{term "composition(M, r, s, r O s)"}*}
+ txt\<open>Proving @{term "composition(M, r, s, r O s)"}\<close>
  prefer 2
  apply (simp add: composition_def comp_def)
  apply (blast dest: transM)
-txt{*Opposite implication*}
+txt\<open>Opposite implication\<close>
 apply (rule M_equalityI)
   apply (simp add: composition_def comp_def)
   apply (blast del: allE dest: transM)+
 done
 
-text{*no longer needed*}
+text\<open>no longer needed\<close>
 lemma (in M_basic) restriction_is_function:
      "[| restriction(M,f,A,z); function(f); M(f); M(A); M(z) |]
       ==> function(z)"
@@ -1280,7 +1280,7 @@
      "[|M(A); M(B)|] ==> M(A-B)"
 by (insert Diff_separation, simp add: Diff_def)
 
-subsubsection{*Some Facts About Separation Axioms*}
+subsubsection\<open>Some Facts About Separation Axioms\<close>
 
 lemma (in M_basic) separation_conj:
      "[|separation(M,P); separation(M,Q)|] ==> separation(M, \<lambda>z. P(z) & Q(z))"
@@ -1318,9 +1318,9 @@
       ==> separation(M, \<lambda>z. P(z) \<longrightarrow> Q(z))"
 by (simp add: separation_neg separation_disj not_disj_iff_imp [symmetric])
 
-text{*This result is a hint of how little can be done without the Reflection
+text\<open>This result is a hint of how little can be done without the Reflection
   Theorem.  The quantifier has to be bounded by a set.  We also need another
-  instance of Separation!*}
+  instance of Separation!\<close>
 lemma (in M_basic) separation_rall:
      "[|M(Y); \<forall>y[M]. separation(M, \<lambda>x. P(x,y));
         \<forall>z[M]. strong_replacement(M, \<lambda>x y. y = {u \<in> z . P(u,x)})|]
@@ -1331,10 +1331,10 @@
 done
 
 
-subsubsection{*Functions and function space*}
+subsubsection\<open>Functions and function space\<close>
 
-text{*The assumption @{term "M(A->B)"} is unusual, but essential: in
-all but trivial cases, A->B cannot be expected to belong to @{term M}.*}
+text\<open>The assumption @{term "M(A->B)"} is unusual, but essential: in
+all but trivial cases, A->B cannot be expected to belong to @{term M}.\<close>
 lemma (in M_basic) is_funspace_abs [simp]:
      "[|M(A); M(B); M(F); M(A->B)|] ==> is_funspace(M,A,B,F) \<longleftrightarrow> F = A->B"
 apply (simp add: is_funspace_def)
@@ -1358,9 +1358,9 @@
 apply (force simp add: succ_fun_eq2 univalent_def)
 done
 
-text{*@{term M} contains all finite function spaces.  Needed to prove the
+text\<open>@{term M} contains all finite function spaces.  Needed to prove the
 absoluteness of transitive closure.  See the definition of
-@{text rtrancl_alt} in in @{text WF_absolute.thy}.*}
+@{text rtrancl_alt} in in @{text WF_absolute.thy}.\<close>
 lemma (in M_basic) finite_funspace_closed [intro,simp]:
      "[|n\<in>nat; M(B)|] ==> M(n->B)"
 apply (induct_tac n, simp)
@@ -1368,7 +1368,7 @@
 done
 
 
-subsection{*Relativization and Absoluteness for Boolean Operators*}
+subsection\<open>Relativization and Absoluteness for Boolean Operators\<close>
 
 definition
   is_bool_of_o :: "[i=>o, o, i] => o" where
@@ -1424,16 +1424,16 @@
 by (simp add: Bool.not_def cond_def)
 
 
-subsection{*Relativization and Absoluteness for List Operators*}
+subsection\<open>Relativization and Absoluteness for List Operators\<close>
 
 definition
   is_Nil :: "[i=>o, i] => o" where
-     --{* because @{prop "[] \<equiv> Inl(0)"}*}
+     --\<open>because @{prop "[] \<equiv> Inl(0)"}\<close>
     "is_Nil(M,xs) == \<exists>zero[M]. empty(M,zero) & is_Inl(M,zero,xs)"
 
 definition
   is_Cons :: "[i=>o,i,i,i] => o" where
-     --{* because @{prop "Cons(a, l) \<equiv> Inr(\<langle>a,l\<rangle>)"}*}
+     --\<open>because @{prop "Cons(a, l) \<equiv> Inr(\<langle>a,l\<rangle>)"}\<close>
     "is_Cons(M,a,l,Z) == \<exists>p[M]. pair(M,a,l,p) & is_Inr(M,p,Z)"
 
 
@@ -1461,13 +1461,13 @@
 
 definition
   list_case' :: "[i, [i,i]=>i, i] => i" where
-    --{*A version of @{term list_case} that's always defined.*}
+    --\<open>A version of @{term list_case} that's always defined.\<close>
     "list_case'(a,b,xs) ==
        if quasilist(xs) then list_case(a,b,xs) else 0"
 
 definition
   is_list_case :: "[i=>o, i, [i,i,i]=>o, i, i] => o" where
-    --{*Returns 0 for non-lists*}
+    --\<open>Returns 0 for non-lists\<close>
     "is_list_case(M, a, is_b, xs, z) ==
        (is_Nil(M,xs) \<longrightarrow> z=a) &
        (\<forall>x[M]. \<forall>l[M]. is_Cons(M,x,l,xs) \<longrightarrow> is_b(x,l,z)) &
@@ -1475,18 +1475,18 @@
 
 definition
   hd' :: "i => i" where
-    --{*A version of @{term hd} that's always defined.*}
+    --\<open>A version of @{term hd} that's always defined.\<close>
     "hd'(xs) == if quasilist(xs) then hd(xs) else 0"
 
 definition
   tl' :: "i => i" where
-    --{*A version of @{term tl} that's always defined.*}
+    --\<open>A version of @{term tl} that's always defined.\<close>
     "tl'(xs) == if quasilist(xs) then tl(xs) else 0"
 
 definition
   is_hd :: "[i=>o,i,i] => o" where
-     --{* @{term "hd([]) = 0"} no constraints if not a list.
-          Avoiding implication prevents the simplifier's looping.*}
+     --\<open>@{term "hd([]) = 0"} no constraints if not a list.
+          Avoiding implication prevents the simplifier's looping.\<close>
     "is_hd(M,xs,H) ==
        (is_Nil(M,xs) \<longrightarrow> empty(M,H)) &
        (\<forall>x[M]. \<forall>l[M]. ~ is_Cons(M,x,l,xs) | H=x) &
@@ -1494,13 +1494,13 @@
 
 definition
   is_tl :: "[i=>o,i,i] => o" where
-     --{* @{term "tl([]) = []"}; see comments about @{term is_hd}*}
+     --\<open>@{term "tl([]) = []"}; see comments about @{term is_hd}\<close>
     "is_tl(M,xs,T) ==
        (is_Nil(M,xs) \<longrightarrow> T=xs) &
        (\<forall>x[M]. \<forall>l[M]. ~ is_Cons(M,x,l,xs) | T=l) &
        (is_quasilist(M,xs) | empty(M,T))"
 
-subsubsection{*@{term quasilist}: For Case-Splitting with @{term list_case'}*}
+subsubsection\<open>@{term quasilist}: For Case-Splitting with @{term list_case'}\<close>
 
 lemma [iff]: "quasilist(Nil)"
 by (simp add: quasilist_def)
@@ -1511,7 +1511,7 @@
 lemma list_imp_quasilist: "l \<in> list(A) ==> quasilist(l)"
 by (erule list.cases, simp_all)
 
-subsubsection{*@{term list_case'}, the Modified Version of @{term list_case}*}
+subsubsection\<open>@{term list_case'}, the Modified Version of @{term list_case}\<close>
 
 lemma list_case'_Nil [simp]: "list_case'(a,b,Nil) = a"
 by (simp add: list_case'_def quasilist_def)
@@ -1550,7 +1550,7 @@
 done
 
 
-subsubsection{*The Modified Operators @{term hd'} and @{term tl'}*}
+subsubsection\<open>The Modified Operators @{term hd'} and @{term tl'}\<close>
 
 lemma (in M_trivial) is_hd_Nil: "is_hd(M,[],Z) \<longleftrightarrow> empty(M,Z)"
 by (simp add: is_hd_def)
--- a/src/ZF/Constructible/Satisfies_absolute.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Satisfies_absolute.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,14 +2,14 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Absoluteness for the Satisfies Relation on Formulas*}
+section \<open>Absoluteness for the Satisfies Relation on Formulas\<close>
 
 theory Satisfies_absolute imports Datatype_absolute Rec_Separation begin 
 
 
-subsection {*More Internalization*}
+subsection \<open>More Internalization\<close>
 
-subsubsection{*The Formula @{term is_depth}, Internalized*}
+subsubsection\<open>The Formula @{term is_depth}, Internalized\<close>
 
 (*    "is_depth(M,p,n) == 
        \<exists>sn[M]. \<exists>formula_n[M]. \<exists>formula_sn[M]. 
@@ -52,10 +52,10 @@
 
 
 
-subsubsection{*The Operator @{term is_formula_case}*}
+subsubsection\<open>The Operator @{term is_formula_case}\<close>
 
-text{*The arguments of @{term is_a} are always 2, 1, 0, and the formula
-      will be enclosed by three quantifiers.*}
+text\<open>The arguments of @{term is_a} are always 2, 1, 0, and the formula
+      will be enclosed by three quantifiers.\<close>
 
 (* is_formula_case :: 
     "[i=>o, [i,i,i]=>o, [i,i,i]=>o, [i,i,i]=>o, [i,i]=>o, i, i] => o"
@@ -145,9 +145,9 @@
                                        is_c_iff_sats is_d_iff_sats])
 
 
-text{*The second argument of @{term is_a} gives it direct access to @{term x},
+text\<open>The second argument of @{term is_a} gives it direct access to @{term x},
   which is essential for handling free variable references.  Treatment is
-  based on that of @{text is_nat_case_reflection}.*}
+  based on that of @{text is_nat_case_reflection}.\<close>
 theorem is_formula_case_reflection:
   assumes is_a_reflection:
     "!!h f g g'. REFLECTS[\<lambda>x. is_a(L, h(x), f(x), g(x), g'(x)),
@@ -172,11 +172,11 @@
 
 
 
-subsection {*Absoluteness for the Function @{term satisfies}*}
+subsection \<open>Absoluteness for the Function @{term satisfies}\<close>
 
 definition
   is_depth_apply :: "[i=>o,i,i,i] => o" where
-   --{*Merely a useful abbreviation for the sequel.*}
+   --\<open>Merely a useful abbreviation for the sequel.\<close>
   "is_depth_apply(M,h,p,z) ==
     \<exists>dp[M]. \<exists>sdp[M]. \<exists>hsdp[M]. 
         finite_ordinal(M,dp) & is_depth(M,p,dp) & successor(M,dp,sdp) &
@@ -189,11 +189,11 @@
 
 
 
-text{*There is at present some redundancy between the relativizations in
- e.g. @{text satisfies_is_a} and those in e.g. @{text Member_replacement}.*}
+text\<open>There is at present some redundancy between the relativizations in
+ e.g. @{text satisfies_is_a} and those in e.g. @{text Member_replacement}.\<close>
 
-text{*These constants let us instantiate the parameters @{term a}, @{term b},
-      @{term c}, @{term d}, etc., of the locale @{text Formula_Rec}.*}
+text\<open>These constants let us instantiate the parameters @{term a}, @{term b},
+      @{term c}, @{term d}, etc., of the locale @{text Formula_Rec}.\<close>
 definition
   satisfies_a :: "[i,i,i]=>i" where
    "satisfies_a(A) == 
@@ -216,8 +216,8 @@
 
 definition
   satisfies_is_b :: "[i=>o,i,i,i,i]=>o" where
-   --{*We simplify the formula to have just @{term nx} rather than 
-       introducing @{term ny} with  @{term "nx=ny"} *}
+   --\<open>We simplify the formula to have just @{term nx} rather than 
+       introducing @{term ny} with  @{term "nx=ny"}\<close>
   "satisfies_is_b(M,A) == 
     \<lambda>x y zz. \<forall>lA[M]. is_list(M,A,lA) \<longrightarrow>
              is_lambda(M, lA, 
@@ -259,8 +259,8 @@
 
 definition
   satisfies_MH :: "[i=>o,i,i,i,i]=>o" where
-    --{*The variable @{term u} is unused, but gives @{term satisfies_MH} 
-        the correct arity.*}
+    --\<open>The variable @{term u} is unused, but gives @{term satisfies_MH} 
+        the correct arity.\<close>
   "satisfies_MH == 
     \<lambda>M A u f z. 
          \<forall>fml[M]. is_formula(M,fml) \<longrightarrow>
@@ -275,9 +275,9 @@
   "is_satisfies(M,A) == is_formula_rec (M, satisfies_MH(M,A))"
 
 
-text{*This lemma relates the fragments defined above to the original primitive
+text\<open>This lemma relates the fragments defined above to the original primitive
       recursion in @{term satisfies}.
-      Induction is not required: the definitions are directly equal!*}
+      Induction is not required: the definitions are directly equal!\<close>
 lemma satisfies_eq:
   "satisfies(A,p) = 
    formula_rec (satisfies_a(A), satisfies_b(A), 
@@ -285,9 +285,9 @@
 by (simp add: satisfies_formula_def satisfies_a_def satisfies_b_def 
               satisfies_c_def satisfies_d_def) 
 
-text{*Further constraints on the class @{term M} in order to prove
+text\<open>Further constraints on the class @{term M} in order to prove
       absoluteness for the constants defined above.  The ultimate goal
-      is the absoluteness of the function @{term satisfies}. *}
+      is the absoluteness of the function @{term satisfies}.\<close>
 locale M_satisfies = M_eclose +
  assumes 
    Member_replacement:
@@ -327,11 +327,11 @@
               pair(M,env,bo,z))"
  and
   formula_rec_replacement: 
-      --{*For the @{term transrec}*}
+      --\<open>For the @{term transrec}\<close>
    "[|n \<in> nat; M(A)|] ==> transrec_replacement(M, satisfies_MH(M,A), n)"
  and
   formula_rec_lambda_replacement:  
-      --{*For the @{text "\<lambda>-abstraction"} in the @{term transrec} body*}
+      --\<open>For the @{text "\<lambda>-abstraction"} in the @{term transrec} body\<close>
    "[|M(g); M(A)|] ==>
     strong_replacement (M, 
        \<lambda>x y. mem_formula(M,x) &
@@ -460,8 +460,8 @@
 
 
 
-text{*Instantiate locale @{text Formula_Rec} for the 
-      Function @{term satisfies}*}
+text\<open>Instantiate locale @{text Formula_Rec} for the 
+      Function @{term satisfies}\<close>
 
 lemma (in M_satisfies) Formula_Rec_axioms_M:
    "M(A) ==>
@@ -505,9 +505,9 @@
                satisfies_eq is_satisfies_def satisfies_MH_def)
 
 
-subsection{*Internalizations Needed to Instantiate @{text "M_satisfies"}*}
+subsection\<open>Internalizations Needed to Instantiate @{text "M_satisfies"}\<close>
 
-subsubsection{*The Operator @{term is_depth_apply}, Internalized*}
+subsubsection\<open>The Operator @{term is_depth_apply}, Internalized\<close>
 
 (* is_depth_apply(M,h,p,z) ==
     \<exists>dp[M]. \<exists>sdp[M]. \<exists>hsdp[M]. 
@@ -548,7 +548,7 @@
 done
 
 
-subsubsection{*The Operator @{term satisfies_is_a}, Internalized*}
+subsubsection\<open>The Operator @{term satisfies_is_a}, Internalized\<close>
 
 (* satisfies_is_a(M,A) == 
     \<lambda>x y zz. \<forall>lA[M]. is_list(M,A,lA) \<longrightarrow>
@@ -601,7 +601,7 @@
 done
 
 
-subsubsection{*The Operator @{term satisfies_is_b}, Internalized*}
+subsubsection\<open>The Operator @{term satisfies_is_b}, Internalized\<close>
 
 (* satisfies_is_b(M,A) == 
     \<lambda>x y zz. \<forall>lA[M]. is_list(M,A,lA) \<longrightarrow>
@@ -650,7 +650,7 @@
 done
 
 
-subsubsection{*The Operator @{term satisfies_is_c}, Internalized*}
+subsubsection\<open>The Operator @{term satisfies_is_c}, Internalized\<close>
 
 (* satisfies_is_c(M,A,h) == 
     \<lambda>p q zz. \<forall>lA[M]. is_list(M,A,lA) \<longrightarrow>
@@ -701,7 +701,7 @@
              is_list_reflection)
 done
 
-subsubsection{*The Operator @{term satisfies_is_d}, Internalized*}
+subsubsection\<open>The Operator @{term satisfies_is_d}, Internalized\<close>
 
 (* satisfies_is_d(M,A,h) == 
     \<lambda>p zz. \<forall>lA[M]. is_list(M,A,lA) \<longrightarrow>
@@ -758,7 +758,7 @@
 done
 
 
-subsubsection{*The Operator @{term satisfies_MH}, Internalized*}
+subsubsection\<open>The Operator @{term satisfies_MH}, Internalized\<close>
 
 (* satisfies_MH == 
     \<lambda>M A u f zz. 
@@ -815,10 +815,10 @@
 done
 
 
-subsection{*Lemmas for Instantiating the Locale @{text "M_satisfies"}*}
+subsection\<open>Lemmas for Instantiating the Locale @{text "M_satisfies"}\<close>
 
 
-subsubsection{*The @{term "Member"} Case*}
+subsubsection\<open>The @{term "Member"} Case\<close>
 
 lemma Member_Reflects:
  "REFLECTS[\<lambda>u. \<exists>v[L]. v \<in> B \<and> (\<exists>bo[L]. \<exists>nx[L]. \<exists>ny[L].
@@ -848,7 +848,7 @@
 done
 
 
-subsubsection{*The @{term "Equal"} Case*}
+subsubsection\<open>The @{term "Equal"} Case\<close>
 
 lemma Equal_Reflects:
  "REFLECTS[\<lambda>u. \<exists>v[L]. v \<in> B \<and> (\<exists>bo[L]. \<exists>nx[L]. \<exists>ny[L].
@@ -877,7 +877,7 @@
 apply (rule sep_rules nth_iff_sats is_bool_of_o_iff_sats | simp)+
 done
 
-subsubsection{*The @{term "Nand"} Case*}
+subsubsection\<open>The @{term "Nand"} Case\<close>
 
 lemma Nand_Reflects:
     "REFLECTS [\<lambda>x. \<exists>u[L]. u \<in> B \<and>
@@ -910,7 +910,7 @@
 done
 
 
-subsubsection{*The @{term "Forall"} Case*}
+subsubsection\<open>The @{term "Forall"} Case\<close>
 
 lemma Forall_Reflects:
  "REFLECTS [\<lambda>x. \<exists>u[L]. u \<in> B \<and> (\<exists>bo[L]. u \<in> list(A) \<and>
@@ -948,7 +948,7 @@
 apply (rule sep_rules is_bool_of_o_iff_sats Cons_iff_sats | simp)+
 done
 
-subsubsection{*The @{term "transrec_replacement"} Case*}
+subsubsection\<open>The @{term "transrec_replacement"} Case\<close>
 
 lemma formula_rec_replacement_Reflects:
  "REFLECTS [\<lambda>x. \<exists>u[L]. u \<in> B \<and> (\<exists>y[L]. pair(L, u, y, x) \<and>
@@ -959,7 +959,7 @@
           is_wfrec_reflection) 
 
 lemma formula_rec_replacement: 
-      --{*For the @{term transrec}*}
+      --\<open>For the @{term transrec}\<close>
    "[|n \<in> nat; L(A)|] ==> transrec_replacement(L, satisfies_MH(L,A), n)"
 apply (rule transrec_replacementI, simp add: nat_into_M) 
 apply (rule strong_replacementI)
@@ -971,7 +971,7 @@
 done
 
 
-subsubsection{*The Lambda Replacement Case*}
+subsubsection\<open>The Lambda Replacement Case\<close>
 
 lemma formula_rec_lambda_replacement_Reflects:
  "REFLECTS [\<lambda>x. \<exists>u[L]. u \<in> B &
@@ -995,7 +995,7 @@
           satisfies_is_d_reflection)  
 
 lemma formula_rec_lambda_replacement: 
-      --{*For the @{term transrec}*}
+      --\<open>For the @{term transrec}\<close>
    "[|L(g); L(A)|] ==>
     strong_replacement (L, 
        \<lambda>x y. mem_formula(L,x) &
@@ -1016,7 +1016,7 @@
 done
 
 
-subsection{*Instantiating @{text M_satisfies}*}
+subsection\<open>Instantiating @{text M_satisfies}\<close>
 
 lemma M_satisfies_axioms_L: "M_satisfies_axioms(L)"
   apply (rule M_satisfies_axioms.intro)
@@ -1032,7 +1032,7 @@
   apply (rule M_satisfies_axioms_L)
   done
 
-text{*Finally: the point of the whole theory!*}
+text\<open>Finally: the point of the whole theory!\<close>
 lemmas satisfies_closed = M_satisfies.satisfies_closed [OF M_satisfies_L]
    and satisfies_abs = M_satisfies.satisfies_abs [OF M_satisfies_L]
 
--- a/src/ZF/Constructible/Separation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Separation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,13 +2,13 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section{*Early Instances of Separation and Strong Replacement*}
+section\<open>Early Instances of Separation and Strong Replacement\<close>
 
 theory Separation imports L_axioms WF_absolute begin
 
-text{*This theory proves all instances needed for locale @{text "M_basic"}*}
+text\<open>This theory proves all instances needed for locale @{text "M_basic"}\<close>
 
-text{*Helps us solve for de Bruijn indices!*}
+text\<open>Helps us solve for de Bruijn indices!\<close>
 lemma nth_ConsI: "[|nth(n,l) = x; n \<in> nat|] ==> nth(succ(n), Cons(a,l)) = x"
 by simp
 
@@ -36,7 +36,7 @@
 apply simp_all
 done
 
-text{*Reduces the original comprehension to the reflected one*}
+text\<open>Reduces the original comprehension to the reflected one\<close>
 lemma reflection_imp_L_separation:
       "[| \<forall>x\<in>Lset(j). P(x) \<longleftrightarrow> Q(x);
           {x \<in> Lset(j) . Q(x)} \<in> DPow(Lset(j));
@@ -49,8 +49,8 @@
 apply (simp add: Lset_succ Collect_conj_in_DPow_Lset)
 done
 
-text{*Encapsulates the standard proof script for proving instances of 
-      Separation.*}
+text\<open>Encapsulates the standard proof script for proving instances of 
+      Separation.\<close>
 lemma gen_separation:
  assumes reflection: "REFLECTS [P,Q]"
      and Lu:         "L(u)"
@@ -66,10 +66,10 @@
 apply (rule collI, assumption)
 done
 
-text{*As above, but typically @{term u} is a finite enumeration such as
+text\<open>As above, but typically @{term u} is a finite enumeration such as
   @{term "{a,b}"}; thus the new subgoal gets the assumption
   @{term "{a,b} \<subseteq> Lset(i)"}, which is logically equivalent to 
-  @{term "a \<in> Lset(i)"} and @{term "b \<in> Lset(i)"}.*}
+  @{term "a \<in> Lset(i)"} and @{term "b \<in> Lset(i)"}.\<close>
 lemma gen_separation_multi:
  assumes reflection: "REFLECTS [P,Q]"
      and Lu:         "L(u)"
@@ -82,7 +82,7 @@
 done
 
 
-subsection{*Separation for Intersection*}
+subsection\<open>Separation for Intersection\<close>
 
 lemma Inter_Reflects:
      "REFLECTS[\<lambda>x. \<forall>y[L]. y\<in>A \<longrightarrow> x \<in> y,
@@ -93,8 +93,8 @@
      "L(A) ==> separation(L, \<lambda>x. \<forall>y[L]. y\<in>A \<longrightarrow> x\<in>y)"
 apply (rule gen_separation [OF Inter_Reflects], simp)
 apply (rule DPow_LsetI)
- txt{*I leave this one example of a manual proof.  The tedium of manually
-      instantiating @{term i}, @{term j} and @{term env} is obvious. *}
+ txt\<open>I leave this one example of a manual proof.  The tedium of manually
+      instantiating @{term i}, @{term j} and @{term env} is obvious.\<close>
 apply (rule ball_iff_sats)
 apply (rule imp_iff_sats)
 apply (rule_tac [2] i=1 and j=0 and env="[y,x,A]" in mem_iff_sats)
@@ -102,7 +102,7 @@
 apply (simp_all add: succ_Un_distrib [symmetric])
 done
 
-subsection{*Separation for Set Difference*}
+subsection\<open>Separation for Set Difference\<close>
 
 lemma Diff_Reflects:
      "REFLECTS[\<lambda>x. x \<notin> B, \<lambda>i x. x \<notin> B]"
@@ -115,7 +115,7 @@
 apply (rule sep_rules | simp)+
 done
 
-subsection{*Separation for Cartesian Product*}
+subsection\<open>Separation for Cartesian Product\<close>
 
 lemma cartprod_Reflects:
      "REFLECTS[\<lambda>z. \<exists>x[L]. x\<in>A & (\<exists>y[L]. y\<in>B & pair(L,x,y,z)),
@@ -131,7 +131,7 @@
 apply (rule sep_rules | simp)+
 done
 
-subsection{*Separation for Image*}
+subsection\<open>Separation for Image\<close>
 
 lemma image_Reflects:
      "REFLECTS[\<lambda>y. \<exists>p[L]. p\<in>r & (\<exists>x[L]. x\<in>A & pair(L,x,y,p)),
@@ -147,7 +147,7 @@
 done
 
 
-subsection{*Separation for Converse*}
+subsection\<open>Separation for Converse\<close>
 
 lemma converse_Reflects:
   "REFLECTS[\<lambda>z. \<exists>p[L]. p\<in>r & (\<exists>x[L]. \<exists>y[L]. pair(L,x,y,p) & pair(L,y,x,z)),
@@ -164,7 +164,7 @@
 done
 
 
-subsection{*Separation for Restriction*}
+subsection\<open>Separation for Restriction\<close>
 
 lemma restrict_Reflects:
      "REFLECTS[\<lambda>z. \<exists>x[L]. x\<in>A & (\<exists>y[L]. pair(L,x,y,z)),
@@ -179,7 +179,7 @@
 done
 
 
-subsection{*Separation for Composition*}
+subsection\<open>Separation for Composition\<close>
 
 lemma comp_Reflects:
      "REFLECTS[\<lambda>xz. \<exists>x[L]. \<exists>y[L]. \<exists>z[L]. \<exists>xy[L]. \<exists>yz[L].
@@ -196,16 +196,16 @@
                   pair(L,x,z,xz) & pair(L,x,y,xy) & pair(L,y,z,yz) &
                   xy\<in>s & yz\<in>r)"
 apply (rule gen_separation_multi [OF comp_Reflects, of "{r,s}"], auto)
-txt{*Subgoals after applying general ``separation'' rule:
-     @{subgoals[display,indent=0,margin=65]}*}
+txt\<open>Subgoals after applying general ``separation'' rule:
+     @{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule_tac env="[r,s]" in DPow_LsetI)
-txt{*Subgoals ready for automatic synthesis of a formula:
-     @{subgoals[display,indent=0,margin=65]}*}
+txt\<open>Subgoals ready for automatic synthesis of a formula:
+     @{subgoals[display,indent=0,margin=65]}\<close>
 apply (rule sep_rules | simp)+
 done
 
 
-subsection{*Separation for Predecessors in an Order*}
+subsection\<open>Separation for Predecessors in an Order\<close>
 
 lemma pred_Reflects:
      "REFLECTS[\<lambda>y. \<exists>p[L]. p\<in>r & pair(L,y,x,p),
@@ -220,7 +220,7 @@
 done
 
 
-subsection{*Separation for the Membership Relation*}
+subsection\<open>Separation for the Membership Relation\<close>
 
 lemma Memrel_Reflects:
      "REFLECTS[\<lambda>z. \<exists>x[L]. \<exists>y[L]. pair(L,x,y,z) & x \<in> y,
@@ -235,7 +235,7 @@
 done
 
 
-subsection{*Replacement for FunSpace*}
+subsection\<open>Replacement for FunSpace\<close>
 
 lemma funspace_succ_Reflects:
  "REFLECTS[\<lambda>z. \<exists>p[L]. p\<in>A & (\<exists>f[L]. \<exists>b[L]. \<exists>nb[L]. \<exists>cnbf[L].
@@ -260,7 +260,7 @@
 done
 
 
-subsection{*Separation for a Theorem about @{term "is_recfun"}*}
+subsection\<open>Separation for a Theorem about @{term "is_recfun"}\<close>
 
 lemma is_recfun_reflects:
   "REFLECTS[\<lambda>x. \<exists>xa[L]. \<exists>xb[L].
@@ -274,7 +274,7 @@
 by (intro FOL_reflections function_reflections fun_plus_reflections)
 
 lemma is_recfun_separation:
-     --{*for well-founded recursion*}
+     --\<open>for well-founded recursion\<close>
      "[| L(r); L(f); L(g); L(a); L(b) |]
      ==> separation(L,
             \<lambda>x. \<exists>xa[L]. \<exists>xb[L].
@@ -288,9 +288,9 @@
 done
 
 
-subsection{*Instantiating the locale @{text M_basic}*}
-text{*Separation (and Strong Replacement) for basic set-theoretic constructions
-such as intersection, Cartesian Product and image.*}
+subsection\<open>Instantiating the locale @{text M_basic}\<close>
+text\<open>Separation (and Strong Replacement) for basic set-theoretic constructions
+such as intersection, Cartesian Product and image.\<close>
 
 lemma M_basic_axioms_L: "M_basic_axioms(L)"
   apply (rule M_basic_axioms.intro)
--- a/src/ZF/Constructible/WF_absolute.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/WF_absolute.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,11 +2,11 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Absoluteness of Well-Founded Recursion*}
+section \<open>Absoluteness of Well-Founded Recursion\<close>
 
 theory WF_absolute imports WFrec begin
 
-subsection{*Transitive closure without fixedpoints*}
+subsection\<open>Transitive closure without fixedpoints\<close>
 
 definition
   rtrancl_alt :: "[i,i]=>i" where
@@ -38,11 +38,11 @@
 apply (simp add: rtrancl_alt_def, clarify)
 apply (frule rtrancl_type [THEN subsetD], clarify, simp)
 apply (erule rtrancl_induct)
- txt{*Base case, trivial*}
+ txt\<open>Base case, trivial\<close>
  apply (rule_tac x=0 in bexI)
   apply (rule_tac x="\<lambda>x\<in>1. xa" in bexI)
    apply simp_all
-txt{*Inductive step*}
+txt\<open>Inductive step\<close>
 apply clarify
 apply (rename_tac n f)
 apply (rule_tac x="succ(n)" in bexI)
@@ -60,7 +60,7 @@
 
 definition
   rtran_closure_mem :: "[i=>o,i,i,i] => o" where
-    --{*The property of belonging to @{text "rtran_closure(r)"}*}
+    --\<open>The property of belonging to @{text "rtran_closure(r)"}\<close>
     "rtran_closure_mem(M,A,r,p) ==
               \<exists>nnat[M]. \<exists>n[M]. \<exists>n'[M]. 
                omega(M,nnat) & n\<in>nnat & successor(M,n,n') &
@@ -120,7 +120,7 @@
 lemma (in M_trancl) rtrancl_abs [simp]:
      "[| M(r); M(z) |] ==> rtran_closure(M,r,z) \<longleftrightarrow> z = rtrancl(r)"
 apply (rule iffI)
- txt{*Proving the right-to-left implication*}
+ txt\<open>Proving the right-to-left implication\<close>
  prefer 2 apply (blast intro: rtran_closure_rtrancl)
 apply (rule M_equalityI)
 apply (simp add: rtran_closure_def rtrancl_alt_eq_rtrancl [symmetric]
@@ -140,8 +140,8 @@
      "[| M(r); M(Z) |] ==> separation (M, \<lambda>x. \<exists>w[M]. w \<in> Z & <w,x> \<in> r^+)"
 by (insert wellfounded_trancl_separation [of r Z], simp) 
 
-text{*Alternative proof of @{text wf_on_trancl}; inspiration for the
-      relativized version.  Original version is on theory WF.*}
+text\<open>Alternative proof of @{text wf_on_trancl}; inspiration for the
+      relativized version.  Original version is on theory WF.\<close>
 lemma "[| wf[A](r);  r-``A \<subseteq> A |] ==> wf[A](r^+)"
 apply (simp add: wf_on_def wf_def)
 apply (safe intro!: equalityI)
@@ -176,7 +176,7 @@
 done
 
 
-text{*Absoluteness for wfrec-defined functions.*}
+text\<open>Absoluteness for wfrec-defined functions.\<close>
 
 (*first use is_recfun, then M_is_recfun*)
 
@@ -200,9 +200,9 @@
 done
 
 
-text{*Assuming @{term r} is transitive simplifies the occurrences of @{text H}.
+text\<open>Assuming @{term r} is transitive simplifies the occurrences of @{text H}.
       The premise @{term "relation(r)"} is necessary 
-      before we can replace @{term "r^+"} by @{term r}. *}
+      before we can replace @{term "r^+"} by @{term r}.\<close>
 theorem (in M_trancl) trans_wfrec_relativize:
   "[|wf(r);  trans(r);  relation(r);  M(r);  M(a);
      wfrec_replacement(M,MH,r);  relation2(M,MH,H);
@@ -230,15 +230,15 @@
        (\<exists>f[M]. is_recfun(r,x,H,f) & y = <x, H(x,f)>)"
 apply safe 
  apply (simp add: trans_wfrec_relativize [THEN iff_sym, of concl: _ x]) 
-txt{*converse direction*}
+txt\<open>converse direction\<close>
 apply (rule sym)
 apply (simp add: trans_wfrec_relativize, blast) 
 done
 
 
-subsection{*M is closed under well-founded recursion*}
+subsection\<open>M is closed under well-founded recursion\<close>
 
-text{*Lemma with the awkward premise mentioning @{text wfrec}.*}
+text\<open>Lemma with the awkward premise mentioning @{text wfrec}.\<close>
 lemma (in M_trancl) wfrec_closed_lemma [rule_format]:
      "[|wf(r); M(r); 
         strong_replacement(M, \<lambda>x y. y = \<langle>x, wfrec(r, x, H)\<rangle>);
@@ -252,7 +252,7 @@
 apply (blast intro: lam_closed dest: pair_components_in_M) 
 done
 
-text{*Eliminates one instance of replacement.*}
+text\<open>Eliminates one instance of replacement.\<close>
 lemma (in M_trancl) wfrec_replacement_iff:
      "strong_replacement(M, \<lambda>x z. 
           \<exists>y[M]. pair(M,x,y,z) & (\<exists>g[M]. is_recfun(r,x,H,g) & y = H(x,g))) \<longleftrightarrow>
@@ -262,7 +262,7 @@
 apply (rule strong_replacement_cong, blast) 
 done
 
-text{*Useful version for transitive relations*}
+text\<open>Useful version for transitive relations\<close>
 theorem (in M_trancl) trans_wfrec_closed:
      "[|wf(r); trans(r); relation(r); M(r); M(a);
        wfrec_replacement(M,MH,r);  relation2(M,MH,H);
@@ -274,7 +274,7 @@
 apply (simp_all add: wfrec_replacement_iff trans_eq_pair_wfrec_iff) 
 done
 
-subsection{*Absoluteness without assuming transitivity*}
+subsection\<open>Absoluteness without assuming transitivity\<close>
 lemma (in M_trancl) eq_pair_wfrec_iff:
   "[|wf(r);  M(r);  M(y); 
      strong_replacement(M, \<lambda>x z. \<exists>y[M]. \<exists>g[M].
@@ -287,12 +287,12 @@
             y = <x, H(x,restrict(f,r-``{x}))>)"
 apply safe  
  apply (simp add: wfrec_relativize [THEN iff_sym, of concl: _ x]) 
-txt{*converse direction*}
+txt\<open>converse direction\<close>
 apply (rule sym)
 apply (simp add: wfrec_relativize, blast) 
 done
 
-text{*Full version not assuming transitivity, but maybe not very useful.*}
+text\<open>Full version not assuming transitivity, but maybe not very useful.\<close>
 theorem (in M_trancl) wfrec_closed:
      "[|wf(r); M(r); M(a);
         wfrec_replacement(M,MH,r^+);  
--- a/src/ZF/Constructible/WFrec.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/WFrec.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,12 +2,12 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section{*Relativized Well-Founded Recursion*}
+section\<open>Relativized Well-Founded Recursion\<close>
 
 theory WFrec imports Wellorderings begin
 
 
-subsection{*General Lemmas*}
+subsection\<open>General Lemmas\<close>
 
 (*Many of these might be useful in WF.thy*)
 
@@ -18,7 +18,7 @@
 apply (simp add: function_apply_equality [OF _ is_recfun_imp_function])
 done
 
-text{*Expresses @{text is_recfun} as a recursion equation*}
+text\<open>Expresses @{text is_recfun} as a recursion equation\<close>
 lemma is_recfun_iff_equation:
      "is_recfun(r,a,H,f) \<longleftrightarrow>
            f \<in> r -`` {a} \<rightarrow> range(f) &
@@ -56,21 +56,21 @@
 apply (simp_all add: vimage_singleton_iff Int_lower2)  
 done
 
-text{*For @{text is_recfun} we need only pay attention to functions
-      whose domains are initial segments of @{term r}.*}
+text\<open>For @{text is_recfun} we need only pay attention to functions
+      whose domains are initial segments of @{term r}.\<close>
 lemma is_recfun_cong:
   "[| r = r'; a = a'; f = f'; 
       !!x g. [| <x,a'> \<in> r'; relation(g); domain(g) \<subseteq> r' -``{x} |] 
              ==> H(x,g) = H'(x,g) |]
    ==> is_recfun(r,a,H,f) \<longleftrightarrow> is_recfun(r',a',H',f')"
 apply (rule iffI)
-txt{*Messy: fast and blast don't work for some reason*}
+txt\<open>Messy: fast and blast don't work for some reason\<close>
 apply (erule is_recfun_cong_lemma, auto) 
 apply (erule is_recfun_cong_lemma)
 apply (blast intro: sym)+
 done
 
-subsection{*Reworking of the Recursion Theory Within @{term M}*}
+subsection\<open>Reworking of the Recursion Theory Within @{term M}\<close>
 
 lemma (in M_basic) is_recfun_separation':
     "[| f \<in> r -`` {a} \<rightarrow> range(f); g \<in> r -`` {b} \<rightarrow> range(g);
@@ -80,13 +80,13 @@
 apply (simp add: vimage_singleton_iff)
 done
 
-text{*Stated using @{term "trans(r)"} rather than
+text\<open>Stated using @{term "trans(r)"} rather than
       @{term "transitive_rel(M,A,r)"} because the latter rewrites to
       the former anyway, by @{text transitive_rel_abs}.
       As always, theorems should be expressed in simplified form.
       The last three M-premises are redundant because of @{term "M(r)"}, 
       but without them we'd have to undertake
-      more work to set up the induction formula.*}
+      more work to set up the induction formula.\<close>
 lemma (in M_basic) is_recfun_equal [rule_format]: 
     "[|is_recfun(r,a,H,f);  is_recfun(r,b,H,g);  
        wellfounded(M,r);  trans(r);
@@ -96,9 +96,9 @@
 apply (frule_tac f=g in is_recfun_type) 
 apply (simp add: is_recfun_def)
 apply (erule_tac a=x in wellfounded_induct, assumption+)
-txt{*Separation to justify the induction*}
+txt\<open>Separation to justify the induction\<close>
  apply (blast intro: is_recfun_separation') 
-txt{*Now the inductive argument itself*}
+txt\<open>Now the inductive argument itself\<close>
 apply clarify 
 apply (erule ssubst)+
 apply (simp (no_asm_simp) add: vimage_singleton_iff restrict_def)
@@ -130,7 +130,7 @@
 apply (blast intro!: is_recfun_equal dest: transM) 
 done 
 
-text{*Tells us that @{text is_recfun} can (in principle) be relativized.*}
+text\<open>Tells us that @{text is_recfun} can (in principle) be relativized.\<close>
 lemma (in M_basic) is_recfun_relativize:
   "[| M(r); M(f); \<forall>x[M]. \<forall>g[M]. function(g) \<longrightarrow> M(H(x,g)) |] 
    ==> is_recfun(r,a,H,f) \<longleftrightarrow>
@@ -145,8 +145,8 @@
  apply simp  
  apply blast
 apply (subgoal_tac "is_function(M,f)")
- txt{*We use @{term "is_function"} rather than @{term "function"} because
-      the subgoal's easier to prove with relativized quantifiers!*}
+ txt\<open>We use @{term "is_function"} rather than @{term "function"} because
+      the subgoal's easier to prove with relativized quantifiers!\<close>
  prefer 2 apply (simp add: is_function_def) 
 apply (frule pair_components_in_M, assumption) 
 apply (simp add: is_recfun_imp_function function_restrictI) 
@@ -180,14 +180,14 @@
        ==> restrict(Y, r -`` {x}) = f"
 apply (subgoal_tac "\<forall>y \<in> r-``{x}. \<forall>z. <y,z>:Y \<longleftrightarrow> <y,z>:f") 
  apply (simp (no_asm_simp) add: restrict_def) 
- apply (thin_tac "rall(M,P)" for P)+  --{*essential for efficiency*}
+ apply (thin_tac "rall(M,P)" for P)+  --\<open>essential for efficiency\<close>
  apply (frule is_recfun_type [THEN fun_is_rel], blast)
 apply (frule pair_components_in_M, assumption, clarify) 
 apply (rule iffI)
  apply (frule_tac y="<y,z>" in transM, assumption)
  apply (clarsimp simp add: vimage_singleton_iff is_recfun_type [THEN apply_iff]
                            apply_recfun is_recfun_cut) 
-txt{*Opposite inclusion: something in f, show in Y*}
+txt\<open>Opposite inclusion: something in f, show in Y\<close>
 apply (frule_tac y="<y,z>" in transM, assumption)  
 apply (simp add: vimage_singleton_iff) 
 apply (rule conjI) 
@@ -197,7 +197,7 @@
                      apply_recfun is_recfun_type [THEN apply_iff]) 
 done
 
-text{*For typical applications of Replacement for recursive definitions*}
+text\<open>For typical applications of Replacement for recursive definitions\<close>
 lemma (in M_basic) univalent_is_recfun:
      "[|wellfounded(M,r); trans(r); M(r)|]
       ==> univalent (M, A, \<lambda>x p. 
@@ -207,8 +207,8 @@
 done
 
 
-text{*Proof of the inductive step for @{text exists_is_recfun}, since
-      we must prove two versions.*}
+text\<open>Proof of the inductive step for @{text exists_is_recfun}, since
+      we must prove two versions.\<close>
 lemma (in M_basic) exists_is_recfun_indstep:
     "[|\<forall>y. \<langle>y, a1\<rangle> \<in> r \<longrightarrow> (\<exists>f[M]. is_recfun(r, y, H, f)); 
        wellfounded(M,r); trans(r); M(r); M(a1);
@@ -218,30 +218,30 @@
       ==> \<exists>f[M]. is_recfun(r,a1,H,f)"
 apply (drule_tac A="r-``{a1}" in strong_replacementD)
   apply blast 
- txt{*Discharge the "univalent" obligation of Replacement*}
+ txt\<open>Discharge the "univalent" obligation of Replacement\<close>
  apply (simp add: univalent_is_recfun) 
-txt{*Show that the constructed object satisfies @{text is_recfun}*} 
+txt\<open>Show that the constructed object satisfies @{text is_recfun}\<close> 
 apply clarify 
 apply (rule_tac x=Y in rexI)  
-txt{*Unfold only the top-level occurrence of @{term is_recfun}*}
+txt\<open>Unfold only the top-level occurrence of @{term is_recfun}\<close>
 apply (simp (no_asm_simp) add: is_recfun_relativize [of concl: _ a1])
-txt{*The big iff-formula defining @{term Y} is now redundant*}
+txt\<open>The big iff-formula defining @{term Y} is now redundant\<close>
 apply safe 
  apply (simp add: vimage_singleton_iff restrict_Y_lemma [of r H _ a1]) 
-txt{*one more case*}
+txt\<open>one more case\<close>
 apply (simp (no_asm_simp) add: Bex_def vimage_singleton_iff)
 apply (drule_tac x1=x in spec [THEN mp], assumption, clarify) 
 apply (rename_tac f) 
 apply (rule_tac x=f in rexI) 
 apply (simp_all add: restrict_Y_lemma [of r H])
-txt{*FIXME: should not be needed!*}
+txt\<open>FIXME: should not be needed!\<close>
 apply (subst restrict_Y_lemma [of r H])
 apply (simp add: vimage_singleton_iff)+
 apply blast+
 done
 
-text{*Relativized version, when we have the (currently weaker) premise
-      @{term "wellfounded(M,r)"}*}
+text\<open>Relativized version, when we have the (currently weaker) premise
+      @{term "wellfounded(M,r)"}\<close>
 lemma (in M_basic) wellfounded_exists_is_recfun:
     "[|wellfounded(M,r);  trans(r);  
        separation(M, \<lambda>x. ~ (\<exists>f[M]. is_recfun(r, x, H, f)));
@@ -268,7 +268,7 @@
 done
 
 
-subsection{*Relativization of the ZF Predicate @{term is_recfun}*}
+subsection\<open>Relativization of the ZF Predicate @{term is_recfun}\<close>
 
 definition
   M_is_recfun :: "[i=>o, [i,i,i]=>o, i, i, i] => o" where
@@ -312,7 +312,7 @@
           (\<exists>g[M]. is_recfun(r,a,H,g) & z = H(a,g))"
 by (simp add: is_wfrec_def relation2_def is_recfun_abs)
 
-text{*Relating @{term wfrec_replacement} to native constructs*}
+text\<open>Relating @{term wfrec_replacement} to native constructs\<close>
 lemma (in M_basic) wfrec_replacement':
   "[|wfrec_replacement(M,MH,r);
      \<forall>x[M]. \<forall>g[M]. function(g) \<longrightarrow> M(H(x,g)); 
--- a/src/ZF/Constructible/Wellorderings.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Constructible/Wellorderings.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,18 +2,18 @@
     Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
 *)
 
-section {*Relativized Wellorderings*}
+section \<open>Relativized Wellorderings\<close>
 
 theory Wellorderings imports Relative begin
 
-text{*We define functions analogous to @{term ordermap} @{term ordertype} 
+text\<open>We define functions analogous to @{term ordermap} @{term ordertype} 
       but without using recursion.  Instead, there is a direct appeal
       to Replacement.  This will be the basis for a version relativized
       to some class @{text M}.  The main result is Theorem I 7.6 in Kunen,
-      page 17.*}
+      page 17.\<close>
 
 
-subsection{*Wellorderings*}
+subsection\<open>Wellorderings\<close>
 
 definition
   irreflexive :: "[i=>o,i,i]=>o" where
@@ -32,23 +32,23 @@
 
 definition
   wellfounded :: "[i=>o,i]=>o" where
-    --{*EVERY non-empty set has an @{text r}-minimal element*}
+    --\<open>EVERY non-empty set has an @{text r}-minimal element\<close>
     "wellfounded(M,r) == 
         \<forall>x[M]. x\<noteq>0 \<longrightarrow> (\<exists>y[M]. y\<in>x & ~(\<exists>z[M]. z\<in>x & <z,y> \<in> r))"
 definition
   wellfounded_on :: "[i=>o,i,i]=>o" where
-    --{*every non-empty SUBSET OF @{text A} has an @{text r}-minimal element*}
+    --\<open>every non-empty SUBSET OF @{text A} has an @{text r}-minimal element\<close>
     "wellfounded_on(M,A,r) == 
         \<forall>x[M]. x\<noteq>0 \<longrightarrow> x\<subseteq>A \<longrightarrow> (\<exists>y[M]. y\<in>x & ~(\<exists>z[M]. z\<in>x & <z,y> \<in> r))"
 
 definition
   wellordered :: "[i=>o,i,i]=>o" where
-    --{*linear and wellfounded on @{text A}*}
+    --\<open>linear and wellfounded on @{text A}\<close>
     "wellordered(M,A,r) == 
         transitive_rel(M,A,r) & linear_rel(M,A,r) & wellfounded_on(M,A,r)"
 
 
-subsubsection {*Trivial absoluteness proofs*}
+subsubsection \<open>Trivial absoluteness proofs\<close>
 
 lemma (in M_basic) irreflexive_abs [simp]: 
      "M(A) ==> irreflexive(M,A,r) \<longleftrightarrow> irrefl(A,r)"
@@ -83,7 +83,7 @@
 by (simp add: wellfounded_on_def, blast)
 
 
-subsubsection {*Well-founded relations*}
+subsubsection \<open>Well-founded relations\<close>
 
 lemma  (in M_basic) wellfounded_on_iff_wellfounded:
      "wellfounded_on(M,A,r) \<longleftrightarrow> wellfounded(M, r \<inter> A*A)"
@@ -126,7 +126,7 @@
 done
 
 
-subsubsection {*Kunen's lemma IV 3.14, page 123*}
+subsubsection \<open>Kunen's lemma IV 3.14, page 123\<close>
 
 lemma (in M_basic) linear_imp_relativized: 
      "linear(A,r) ==> linear_rel(M,A,r)" 
@@ -153,11 +153,11 @@
 by (simp add: wellordered_def well_ord_def tot_ord_def part_ord_def
        linear_imp_relativized trans_on_imp_relativized wf_on_imp_relativized)
 
-text{*The property being well founded (and hence of being well ordered) is not absolute: 
+text\<open>The property being well founded (and hence of being well ordered) is not absolute: 
 the set that doesn't contain a minimal element may not exist in the class M. 
-However, every set that is well founded in a transitive model M is well founded (page 124).*}
+However, every set that is well founded in a transitive model M is well founded (page 124).\<close>
 
-subsection{* Relativized versions of order-isomorphisms and order types *}
+subsection\<open>Relativized versions of order-isomorphisms and order types\<close>
 
 lemma (in M_basic) order_isomorphism_abs [simp]: 
      "[| M(A); M(B); M(f) |] 
@@ -203,9 +203,9 @@
 done
 
 
-subsection {* Main results of Kunen, Chapter 1 section 6 *}
+subsection \<open>Main results of Kunen, Chapter 1 section 6\<close>
 
-text{*Subset properties-- proved outside the locale*}
+text\<open>Subset properties-- proved outside the locale\<close>
 
 lemma linear_rel_subset: 
     "[| linear_rel(M,A,r);  B<=A |] ==> linear_rel(M,B,r)"
--- a/src/ZF/Datatype_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Datatype_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1997  University of Cambridge
 *)
 
-section{*Datatype and CoDatatype Definitions*}
+section\<open>Datatype and CoDatatype Definitions\<close>
 
 theory Datatype_ZF
 imports Inductive_ZF Univ QUniv
@@ -12,7 +12,7 @@
 
 ML_file "Tools/datatype_package.ML"
 
-ML {*
+ML \<open>
 (*Typechecking rules for most datatypes involving univ*)
 structure Data_Arg =
   struct
@@ -107,10 +107,10 @@
  val conv = Simplifier.simproc_global @{theory} "data_free" ["(x::i) = y"] proc;
 
 end;
-*}
+\<close>
 
-setup {*
+setup \<open>
   Simplifier.map_theory_simpset (fn ctxt => ctxt addsimprocs [DataFree.conv])
-*}
+\<close>
 
 end
--- a/src/ZF/Epsilon.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Epsilon.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section{*Epsilon Induction and Recursion*}
+section\<open>Epsilon Induction and Recursion\<close>
 
 theory Epsilon imports Nat_ZF begin
 
@@ -37,7 +37,7 @@
     "rec(k,a,b) == recursor(a,b,k)"
 
 
-subsection{*Basic Closure Properties*}
+subsection\<open>Basic Closure Properties\<close>
 
 lemma arg_subset_eclose: "A \<subseteq> eclose(A)"
 apply (unfold eclose_def)
@@ -80,7 +80,7 @@
 by (rule arg_in_eclose_sing [THEN eclose_induct], blast)
 
 
-subsection{*Leastness of @{term eclose}*}
+subsection\<open>Leastness of @{term eclose}\<close>
 
 (** eclose(A) is the least transitive set including A as a subset. **)
 
@@ -116,7 +116,7 @@
 apply (rule subset_refl)
 done
 
-text{*A transitive set either is empty or contains the empty set.*}
+text\<open>A transitive set either is empty or contains the empty set.\<close>
 lemma Transset_0_lemma [rule_format]: "Transset(A) ==> x\<in>A \<longrightarrow> 0\<in>A"
 apply (simp add: Transset_def)
 apply (rule_tac a=x in eps_induct, clarify)
@@ -128,7 +128,7 @@
 by (blast dest: Transset_0_lemma)
 
 
-subsection{*Epsilon Recursion*}
+subsection\<open>Epsilon Recursion\<close>
 
 (*Unused...*)
 lemma mem_eclose_trans: "[| A \<in> eclose(B);  B \<in> eclose(C) |] ==> A \<in> eclose(C)"
@@ -216,7 +216,7 @@
              dest: Ord_in_Ord [THEN eclose_sing_Ord, THEN subsetD])
 done
 
-subsection{*Rank*}
+subsection\<open>Rank\<close>
 
 (*NOT SUITABLE FOR REWRITING -- RECURSIVE!*)
 lemma rank: "rank(a) = (\<Union>y\<in>a. succ(rank(y)))"
@@ -319,7 +319,7 @@
 done
 
 
-subsection{*Corollaries of Leastness*}
+subsection\<open>Corollaries of Leastness\<close>
 
 lemma mem_eclose_subset: "A \<in> B ==> eclose(A)<=eclose(B)"
 apply (rule Transset_eclose [THEN eclose_least])
--- a/src/ZF/EquivClass.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/EquivClass.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Equivalence Relations*}
+section\<open>Equivalence Relations\<close>
 
 theory EquivClass imports Trancl Perm begin
 
@@ -27,11 +27,11 @@
 abbreviation
   RESPECTS2 ::"[i=>i=>i, i] => o"  (infixr "respects2 " 80) where
   "f respects2 r == congruent2(r,r,f)"
-    --{*Abbreviation for the common case where the relations are identical*}
+    --\<open>Abbreviation for the common case where the relations are identical\<close>
 
 
-subsection{*Suppes, Theorem 70:
-    @{term r} is an equiv relation iff @{term "converse(r) O r = r"}*}
+subsection\<open>Suppes, Theorem 70:
+    @{term r} is an equiv relation iff @{term "converse(r) O r = r"}\<close>
 
 (** first half: equiv(A,r) ==> converse(r) O r = r **)
 
@@ -123,7 +123,7 @@
 apply (unfold equiv_def trans_def sym_def, blast)
 done
 
-subsection{*Defining Unary Operations upon Equivalence Classes*}
+subsection\<open>Defining Unary Operations upon Equivalence Classes\<close>
 
 (** Could have a locale with the premises equiv(A,r)  and  congruent(r,b)
 **)
@@ -159,7 +159,7 @@
 done
 
 
-subsection{*Defining Binary Operations upon Equivalence Classes*}
+subsection\<open>Defining Binary Operations upon Equivalence Classes\<close>
 
 lemma congruent2_implies_congruent:
     "[| equiv(A,r1);  congruent2(r1,r2,b);  a \<in> A |] ==> congruent(r2,b(a))"
--- a/src/ZF/Finite.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Finite.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,7 +5,7 @@
 prove:  b \<in> Fin(A) ==> inj(b,b) \<subseteq> surj(b,b)
 *)
 
-section{*Finite Powerset Operator and Finite Function Space*}
+section\<open>Finite Powerset Operator and Finite Function Space\<close>
 
 theory Finite imports Inductive_ZF Epsilon Nat_ZF begin
 
@@ -38,7 +38,7 @@
   type_intros Fin.intros
 
 
-subsection {* Finite Powerset Operator *}
+subsection \<open>Finite Powerset Operator\<close>
 
 lemma Fin_mono: "A<=B ==> Fin(A) \<subseteq> Fin(B)"
 apply (unfold Fin.defs)
@@ -126,7 +126,7 @@
 done
 
 
-subsection{*Finite Function Space*}
+subsection\<open>Finite Function Space\<close>
 
 lemma FiniteFun_mono:
     "[| A<=C;  B<=D |] ==> A -||> B  \<subseteq>  C -||> D"
@@ -197,7 +197,7 @@
 done
 
 
-subsection{*The Contents of a Singleton Set*}
+subsection\<open>The Contents of a Singleton Set\<close>
 
 definition
   contents :: "i=>i"  where
--- a/src/ZF/Fixedpt.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Fixedpt.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section{*Least and Greatest Fixed Points; the Knaster-Tarski Theorem*}
+section\<open>Least and Greatest Fixed Points; the Knaster-Tarski Theorem\<close>
 
 theory Fixedpt imports equalities begin
 
@@ -20,10 +20,10 @@
   gfp      :: "[i,i=>i]=>i"  where
      "gfp(D,h) == \<Union>({X: Pow(D). X \<subseteq> h(X)})"
 
-text{*The theorem is proved in the lattice of subsets of @{term D}, 
-      namely @{term "Pow(D)"}, with Inter as the greatest lower bound.*}
+text\<open>The theorem is proved in the lattice of subsets of @{term D}, 
+      namely @{term "Pow(D)"}, with Inter as the greatest lower bound.\<close>
 
-subsection{*Monotone Operators*}
+subsection\<open>Monotone Operators\<close>
 
 lemma bnd_monoI:
     "[| h(D)<=D;   
@@ -69,7 +69,7 @@
 apply (erule bnd_monoD2, rule Int_lower2, assumption) 
 done
 
-subsection{*Proof of Knaster-Tarski Theorem using @{term lfp}*}
+subsection\<open>Proof of Knaster-Tarski Theorem using @{term lfp}\<close>
 
 (*lfp is contained in each pre-fixedpoint*)
 lemma lfp_lowerbound: 
@@ -124,7 +124,7 @@
 apply (erule lfp_unfold)
 done
 
-subsection{*General Induction Rule for Least Fixedpoints*}
+subsection\<open>General Induction Rule for Least Fixedpoints\<close>
 
 lemma Collect_is_pre_fixedpt:
     "[| bnd_mono(D,h);  !!x. x \<in> h(Collect(lfp(D,h),P)) ==> P(x) |]
@@ -189,7 +189,7 @@
 done 
 
 
-subsection{*Proof of Knaster-Tarski Theorem using @{term gfp}*}
+subsection\<open>Proof of Knaster-Tarski Theorem using @{term gfp}\<close>
 
 (*gfp contains each post-fixedpoint that is contained in D*)
 lemma gfp_upperbound: "[| A \<subseteq> h(A);  A<=D |] ==> A \<subseteq> gfp(D,h)"
@@ -250,7 +250,7 @@
 done
 
 
-subsection{*Coinduction Rules for Greatest Fixed Points*}
+subsection\<open>Coinduction Rules for Greatest Fixed Points\<close>
 
 (*weak version*)
 lemma weak_coinduct: "[| a: X;  X \<subseteq> h(X);  X \<subseteq> D |] ==> a \<in> gfp(D,h)"
--- a/src/ZF/IMP/Com.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/IMP/Com.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,12 +2,12 @@
     Author:     Heiko Loetzbeyer and Robert Sandner, TU München
 *)
 
-section {* Arithmetic expressions, boolean expressions, commands *}
+section \<open>Arithmetic expressions, boolean expressions, commands\<close>
 
 theory Com imports Main begin
 
 
-subsection {* Arithmetic expressions *}
+subsection \<open>Arithmetic expressions\<close>
 
 consts
   loc :: i
@@ -37,7 +37,7 @@
   type_intros aexp.intros apply_funtype
 
 
-subsection {* Boolean expressions *}
+subsection \<open>Boolean expressions\<close>
 
 consts bexp :: i
 
@@ -73,7 +73,7 @@
   type_elims   evala.dom_subset [THEN subsetD, elim_format]
 
 
-subsection {* Commands *}
+subsection \<open>Commands\<close>
 
 consts com :: i
 datatype com =
@@ -121,7 +121,7 @@
                evalb.dom_subset [THEN subsetD, elim_format]
 
 
-subsection {* Misc lemmas *}
+subsection \<open>Misc lemmas\<close>
 
 lemmas evala_1 [simp] = evala.dom_subset [THEN subsetD, THEN SigmaD1, THEN SigmaD1]
   and evala_2 [simp] = evala.dom_subset [THEN subsetD, THEN SigmaD1, THEN SigmaD2]
--- a/src/ZF/IMP/Denotation.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/IMP/Denotation.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,11 +2,11 @@
     Author:     Heiko Loetzbeyer and Robert Sandner, TU München
 *)
 
-section {* Denotational semantics of expressions and commands *}
+section \<open>Denotational semantics of expressions and commands\<close>
 
 theory Denotation imports Com begin
 
-subsection {* Definitions *}
+subsection \<open>Definitions\<close>
 
 consts
   A     :: "i => i => i"
@@ -43,7 +43,7 @@
   "C(\<WHILE> b \<DO> c) = lfp((loc->nat) \<times> (loc->nat), \<Gamma>(b,C(c)))"
 
 
-subsection {* Misc lemmas *}
+subsection \<open>Misc lemmas\<close>
 
 lemma A_type [TC]: "[|a \<in> aexp; sigma \<in> loc->nat|] ==> A(a,sigma) \<in> nat"
   by (erule aexp.induct) simp_all
--- a/src/ZF/IMP/Equiv.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/IMP/Equiv.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
     Author:     Heiko Loetzbeyer and Robert Sandner, TU München
 *)
 
-section {* Equivalence *}
+section \<open>Equivalence\<close>
 
 theory Equiv imports Denotation Com begin
 
@@ -38,14 +38,14 @@
 lemma com1: "<c,sigma> -c-> sigma' ==> <sigma,sigma'> \<in> C(c)"
   apply (erule evalc.induct)
         apply (simp_all (no_asm_simp))
-     txt {* @{text assign} *}
+     txt \<open>@{text assign}\<close>
      apply (simp add: update_type)
-    txt {* @{text comp} *}
+    txt \<open>@{text comp}\<close>
     apply fast
-   txt {* @{text while} *}
+   txt \<open>@{text while}\<close>
    apply (erule Gamma_bnd_mono [THEN lfp_unfold, THEN ssubst, OF C_subset])
    apply (simp add: Gamma_def)
-  txt {* recursive case of @{text while} *}
+  txt \<open>recursive case of @{text while}\<close>
   apply (erule Gamma_bnd_mono [THEN lfp_unfold, THEN ssubst, OF C_subset])
   apply (auto simp add: Gamma_def)
   done
@@ -56,24 +56,24 @@
 
 lemma com2 [rule_format]: "c \<in> com ==> \<forall>x \<in> C(c). <c,fst(x)> -c-> snd(x)"
   apply (erule com.induct)
-      txt {* @{text skip} *}
+      txt \<open>@{text skip}\<close>
       apply force
-     txt {* @{text assign} *}
+     txt \<open>@{text assign}\<close>
      apply force
-    txt {* @{text comp} *}
+    txt \<open>@{text comp}\<close>
     apply force
-   txt {* @{text while} *}
+   txt \<open>@{text while}\<close>
    apply safe
    apply simp_all
    apply (frule Gamma_bnd_mono [OF C_subset], erule Fixedpt.induct, assumption)
    apply (unfold Gamma_def)
    apply force
-  txt {* @{text "if"} *}
+  txt \<open>@{text "if"}\<close>
   apply auto
   done
 
 
-subsection {* Main theorem *}
+subsection \<open>Main theorem\<close>
 
 theorem com_equivalence:
     "c \<in> com ==> C(c) = {io \<in> (loc->nat) \<times> (loc->nat). <c,fst(io)> -c-> snd(io)}"
--- a/src/ZF/Induct/Acc.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Acc.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,13 +3,13 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* The accessible part of a relation *}
+section \<open>The accessible part of a relation\<close>
 
 theory Acc imports Main begin
 
-text {*
+text \<open>
   Inductive definition of @{text "acc(r)"}; see @{cite "paulin-tlca"}.
-*}
+\<close>
 
 consts
   acc :: "i => i"
@@ -20,13 +20,13 @@
     vimage:  "[| r-``{a}: Pow(acc(r)); a \<in> field(r) |] ==> a \<in> acc(r)"
   monos      Pow_mono
 
-text {*
+text \<open>
   The introduction rule must require @{prop "a \<in> field(r)"},
   otherwise @{text "acc(r)"} would be a proper class!
 
   \medskip
   The intended introduction rule:
-*}
+\<close>
 
 lemma accI: "[| !!b. <b,a>:r ==> b \<in> acc(r);  a \<in> field(r) |] ==> a \<in> acc(r)"
   by (blast intro: acc.intros)
--- a/src/ZF/Induct/Binary_Trees.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Binary_Trees.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1992  University of Cambridge
 *)
 
-section {* Binary trees *}
+section \<open>Binary trees\<close>
 
 theory Binary_Trees imports Main begin
 
-subsection {* Datatype definition *}
+subsection \<open>Datatype definition\<close>
 
 consts
   bt :: "i => i"
@@ -27,10 +27,10 @@
 inductive_cases BrE: "Br(a, l, r) \<in> bt(A)"
   -- "An elimination rule, for type-checking."
 
-text {*
+text \<open>
   \medskip Lemmas to justify using @{term bt} in other recursive type
   definitions.
-*}
+\<close>
 
 lemma bt_mono: "A \<subseteq> B ==> bt(A) \<subseteq> bt(B)"
   apply (unfold bt.defs)
@@ -58,13 +58,13 @@
     !!x y z r s. [| x \<in> A;  y \<in> bt(A);  z \<in> bt(A);  r \<in> C(y);  s \<in> C(z) |] ==>
     h(x, y, z, r, s) \<in> C(Br(x, y, z))
   |] ==> bt_rec(c, h, t) \<in> C(t)"
-  -- {* Type checking for recursor -- example only; not really needed. *}
+  -- \<open>Type checking for recursor -- example only; not really needed.\<close>
   apply (induct_tac t)
    apply simp_all
   done
 
 
-subsection {* Number of nodes, with an example of tail-recursion *}
+subsection \<open>Number of nodes, with an example of tail-recursion\<close>
 
 consts  n_nodes :: "i => i"
 primrec
@@ -95,7 +95,7 @@
   by (simp add: n_nodes_tail_def n_nodes_aux_eq)
 
 
-subsection {* Number of leaves *}
+subsection \<open>Number of leaves\<close>
 
 consts
   n_leaves :: "i => i"
@@ -107,7 +107,7 @@
   by (induct set: bt) auto
 
 
-subsection {* Reflecting trees *}
+subsection \<open>Reflecting trees\<close>
 
 consts
   bt_reflect :: "i => i"
@@ -118,9 +118,9 @@
 lemma bt_reflect_type [simp]: "t \<in> bt(A) ==> bt_reflect(t) \<in> bt(A)"
   by (induct set: bt) auto
 
-text {*
+text \<open>
   \medskip Theorems about @{term n_leaves}.
-*}
+\<close>
 
 lemma n_leaves_reflect: "t \<in> bt(A) ==> n_leaves(bt_reflect(t)) = n_leaves(t)"
   by (induct set: bt) (simp_all add: add_commute)
@@ -128,9 +128,9 @@
 lemma n_leaves_nodes: "t \<in> bt(A) ==> n_leaves(t) = succ(n_nodes(t))"
   by (induct set: bt) simp_all
 
-text {*
+text \<open>
   Theorems about @{term bt_reflect}.
-*}
+\<close>
 
 lemma bt_reflect_bt_reflect_ident: "t \<in> bt(A) ==> bt_reflect(bt_reflect(t)) = t"
   by (induct set: bt) simp_all
--- a/src/ZF/Induct/Brouwer.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Brouwer.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Infinite branching datatype definitions *}
+section \<open>Infinite branching datatype definitions\<close>
 
 theory Brouwer imports Main_ZFC begin
 
-subsection {* The Brouwer ordinals *}
+subsection \<open>The Brouwer ordinals\<close>
 
 consts
   brouwer :: i
@@ -28,7 +28,7 @@
       "!!b. [| b \<in> brouwer;  P(b) |] ==> P(Suc(b))"
       "!!h. [| h \<in> nat -> brouwer;  \<forall>i \<in> nat. P(h`i) |] ==> P(Lim(h))"
   shows "P(b)"
-  -- {* A nicer induction rule than the standard one. *}
+  -- \<open>A nicer induction rule than the standard one.\<close>
   using b
   apply induct
     apply (rule cases(1))
@@ -39,13 +39,13 @@
   done
 
 
-subsection {* The Martin-Löf wellordering type *}
+subsection \<open>The Martin-Löf wellordering type\<close>
 
 consts
   Well :: "[i, i => i] => i"
 
 datatype \<subseteq> "Vfrom(A \<union> (\<Union>x \<in> A. B(x)), csucc(nat \<union> |\<Union>x \<in> A. B(x)|))"
-    -- {* The union with @{text nat} ensures that the cardinal is infinite. *}
+    -- \<open>The union with @{text nat} ensures that the cardinal is infinite.\<close>
   "Well(A, B)" = Sup ("a \<in> A", "f \<in> B(a) -> Well(A, B)")
   monos Pi_mono
   type_intros le_trans [OF UN_upper_cardinal le_nat_Un_cardinal] inf_datatype_intros
@@ -59,7 +59,7 @@
   assumes w: "w \<in> Well(A, B)"
     and step: "!!a f. [| a \<in> A;  f \<in> B(a) -> Well(A,B);  \<forall>y \<in> B(a). P(f`y) |] ==> P(Sup(a,f))"
   shows "P(w)"
-  -- {* A nicer induction rule than the standard one. *}
+  -- \<open>A nicer induction rule than the standard one.\<close>
   using w
   apply induct
   apply (assumption | rule step)+
@@ -68,8 +68,8 @@
   done
 
 lemma Well_bool_unfold: "Well(bool, \<lambda>x. x) = 1 + (1 -> Well(bool, \<lambda>x. x))"
-  -- {* In fact it's isomorphic to @{text nat}, but we need a recursion operator *}
-  -- {* for @{text Well} to prove this. *}
+  -- \<open>In fact it's isomorphic to @{text nat}, but we need a recursion operator\<close>
+  -- \<open>for @{text Well} to prove this.\<close>
   apply (rule Well_unfold [THEN trans])
   apply (simp add: Sigma_bool succ_def)
   done
--- a/src/ZF/Induct/Comb.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Comb.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,19 +3,19 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Combinatory Logic example: the Church-Rosser Theorem *}
+section \<open>Combinatory Logic example: the Church-Rosser Theorem\<close>
 
 theory Comb imports Main begin
 
-text {*
+text \<open>
   Curiously, combinators do not include free variables.
 
   Example taken from @{cite camilleri92}.
-*}
+\<close>
 
-subsection {* Definitions *}
+subsection \<open>Definitions\<close>
 
-text {* Datatype definition of combinators @{text S} and @{text K}. *}
+text \<open>Datatype definition of combinators @{text S} and @{text K}.\<close>
 
 consts comb :: i
 datatype comb =
@@ -26,10 +26,10 @@
 notation (xsymbols)
   app  (infixl "\<bullet>" 90)
 
-text {*
+text \<open>
   Inductive definition of contractions, @{text "-1->"} and
   (multi-step) reductions, @{text "-\<longrightarrow>"}.
-*}
+\<close>
 
 consts
   contract  :: i
@@ -51,10 +51,10 @@
     Ap2: "[| p-1->q;  r \<in> comb |] ==> r\<bullet>p -1-> r\<bullet>q"
   type_intros comb.intros
 
-text {*
+text \<open>
   Inductive definition of parallel contractions, @{text "=1=>"} and
   (multi-step) parallel reductions, @{text "===>"}.
-*}
+\<close>
 
 consts
   parcontract :: i
@@ -76,9 +76,9 @@
     Ap:   "[| p=1=>q;  r=1=>s |] ==> p\<bullet>r =1=> q\<bullet>s"
   type_intros comb.intros
 
-text {*
+text \<open>
   Misc definitions.
-*}
+\<close>
 
 definition
   I :: i  where
@@ -90,7 +90,7 @@
     \<forall>x y. <x,y>\<in>r \<longrightarrow> (\<forall>y'. <x,y'>\<in>r \<longrightarrow> (\<exists>z. <y,z>\<in>r & <y',z> \<in> r))"
 
 
-subsection {* Transitive closure preserves the Church-Rosser property *}
+subsection \<open>Transitive closure preserves the Church-Rosser property\<close>
 
 lemma diamond_strip_lemmaD [rule_format]:
   "[| diamond(r);  <x,y>:r^+ |] ==>
@@ -115,12 +115,12 @@
 inductive_cases Ap_E [elim!]: "p\<bullet>q \<in> comb"
 
 
-subsection {* Results about Contraction *}
+subsection \<open>Results about Contraction\<close>
 
-text {*
+text \<open>
   For type checking: replaces @{term "a -1-> b"} by @{text "a, b \<in>
   comb"}.
-*}
+\<close>
 
 lemmas contract_combE2 = contract.dom_subset [THEN subsetD, THEN SigmaE2]
   and contract_combD1 = contract.dom_subset [THEN subsetD, THEN SigmaD1]
@@ -144,16 +144,16 @@
   contract.Ap2 [THEN rtrancl_into_rtrancl2]
 
 lemma "p \<in> comb ==> I\<bullet>p -\<longrightarrow> p"
-  -- {* Example only: not used *}
+  -- \<open>Example only: not used\<close>
   by (unfold I_def) (blast intro: reduction_rls)
 
 lemma comb_I: "I \<in> comb"
   by (unfold I_def) blast
 
 
-subsection {* Non-contraction results *}
+subsection \<open>Non-contraction results\<close>
 
-text {* Derive a case for each combinator constructor. *}
+text \<open>Derive a case for each combinator constructor.\<close>
 
 inductive_cases
       K_contractE [elim!]: "K -1-> r"
@@ -184,7 +184,7 @@
                       contract_combD2 reduction_rls)
   done
 
-text {* Counterexample to the diamond property for @{text "-1->"}. *}
+text \<open>Counterexample to the diamond property for @{text "-1->"}.\<close>
 
 lemma KIII_contract1: "K\<bullet>I\<bullet>(I\<bullet>I) -1-> I"
   by (blast intro: comb_I)
@@ -202,10 +202,10 @@
   done
 
 
-subsection {* Results about Parallel Contraction *}
+subsection \<open>Results about Parallel Contraction\<close>
 
-text {* For type checking: replaces @{text "a =1=> b"} by @{text "a, b
-  \<in> comb"} *}
+text \<open>For type checking: replaces @{text "a =1=> b"} by @{text "a, b
+  \<in> comb"}\<close>
 lemmas parcontract_combE2 = parcontract.dom_subset [THEN subsetD, THEN SigmaE2]
   and parcontract_combD1 = parcontract.dom_subset [THEN subsetD, THEN SigmaD1]
   and parcontract_combD2 = parcontract.dom_subset [THEN subsetD, THEN SigmaD2]
@@ -213,7 +213,7 @@
 lemma field_parcontract_eq: "field(parcontract) = comb"
   by (blast intro: parcontract.K elim!: parcontract_combE2)
 
-text {* Derive a case for each combinator constructor. *}
+text \<open>Derive a case for each combinator constructor.\<close>
 inductive_cases
       K_parcontractE [elim!]: "K =1=> r"
   and S_parcontractE [elim!]: "S =1=> r"
@@ -222,7 +222,7 @@
 declare parcontract.intros [intro]
 
 
-subsection {* Basic properties of parallel contraction *}
+subsection \<open>Basic properties of parallel contraction\<close>
 
 lemma K1_parcontractD [dest!]:
     "K\<bullet>p =1=> r ==> (\<exists>p'. r = K\<bullet>p' & p =1=> p')"
@@ -237,16 +237,16 @@
   by auto
 
 lemma diamond_parcontract: "diamond(parcontract)"
-  -- {* Church-Rosser property for parallel contraction *}
+  -- \<open>Church-Rosser property for parallel contraction\<close>
   apply (unfold diamond_def)
   apply (rule impI [THEN allI, THEN allI])
   apply (erule parcontract.induct)
      apply (blast elim!: comb.free_elims  intro: parcontract_combD2)+
   done
 
-text {*
+text \<open>
   \medskip Equivalence of @{prop "p -\<longrightarrow> q"} and @{prop "p ===> q"}.
-*}
+\<close>
 
 lemma contract_imp_parcontract: "p-1->q ==> p=1=>q"
   by (induct set: contract) auto
--- a/src/ZF/Induct/Datatypes.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Datatypes.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,16 +3,16 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Sample datatype definitions *}
+section \<open>Sample datatype definitions\<close>
 
 theory Datatypes imports Main begin
 
-subsection {* A type with four constructors *}
+subsection \<open>A type with four constructors\<close>
 
-text {*
+text \<open>
   It has four contructors, of arities 0--3, and two parameters @{text
   A} and @{text B}.
-*}
+\<close>
 
 consts
   data :: "[i, i] => i"
@@ -27,10 +27,10 @@
   by (fast intro!: data.intros [unfolded data.con_defs]
     elim: data.cases [unfolded data.con_defs])
 
-text {*
+text \<open>
   \medskip Lemmas to justify using @{term data} in other recursive
   type definitions.
-*}
+\<close>
 
 lemma data_mono: "[| A \<subseteq> C; B \<subseteq> D |] ==> data(A, B) \<subseteq> data(C, D)"
   apply (unfold data.defs)
@@ -51,12 +51,12 @@
   by (rule subset_trans [OF data_mono data_univ])
 
 
-subsection {* Example of a big enumeration type *}
+subsection \<open>Example of a big enumeration type\<close>
 
-text {*
+text \<open>
   Can go up to at least 100 constructors, but it takes nearly 7
   minutes \dots\ (back in 1994 that is).
-*}
+\<close>
 
 consts
   enum :: i
--- a/src/ZF/Induct/FoldSet.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/FoldSet.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -106,10 +106,10 @@
 apply (simp add: Fin_into_Finite [THEN Finite_imp_cardinal_cons])
 apply (case_tac "x=xb", auto) 
 apply (simp add: cons_lemma1, blast)
-txt{*case @{term "x\<noteq>xb"}*}
+txt\<open>case @{term "x\<noteq>xb"}\<close>
 apply (drule cons_lemma2, safe)
 apply (frule Diff_sing_imp, assumption+) 
-txt{** LEVEL 17*}
+txt\<open>* LEVEL 17\<close>
 apply (subgoal_tac "|Ca| \<le> |Cb|")
  prefer 2
  apply (rule succ_le_imp_le)
@@ -117,7 +117,7 @@
                   Fin_into_Finite [THEN Finite_imp_cardinal_cons])
 apply (rule_tac C1 = "Ca-{xb}" in Fin_imp_fold_set [THEN exE])
  apply (blast intro: Diff_subset [THEN Fin_subset])
-txt{** LEVEL 24 **}
+txt\<open>* LEVEL 24 *\<close>
 apply (frule Diff1_fold_set, blast, blast)
 apply (blast dest!: ftype fold_set.dom_subset [THEN subsetD])
 apply (subgoal_tac "ya = f(xb,xa) ")
@@ -159,7 +159,7 @@
 apply (blast elim!: empty_fold_setE intro: fold_set.intros)
 done
 
-text{*This result is the right-to-left direction of the subsequent result*}
+text\<open>This result is the right-to-left direction of the subsequent result\<close>
 lemma (in fold_typing) fold_set_imp_cons: 
      "[| <C, y> \<in> fold_set(C, B, f, e); C \<in> Fin(A); c \<in> A; c\<notin>C |]
       ==> <cons(c, C), f(c,y)> \<in> fold_set(cons(c, C), B, f, e)"
@@ -237,7 +237,7 @@
 apply (blast intro: Fin_mono [THEN subsetD])
 done
 
-subsection{*The Operator @{term setsum}*}
+subsection\<open>The Operator @{term setsum}\<close>
 
 lemma setsum_0 [simp]: "setsum(g, 0) = #0"
 by (simp add: setsum_def)
--- a/src/ZF/Induct/ListN.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/ListN.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Lists of n elements *}
+section \<open>Lists of n elements\<close>
 
 theory ListN imports Main begin
 
-text {*
+text \<open>
   Inductive definition of lists of @{text n} elements; see
   @{cite "paulin-tlca"}.
-*}
+\<close>
 
 consts listn :: "i=>i"
 inductive
--- a/src/ZF/Induct/Multiset.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Multiset.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -12,7 +12,7 @@
 begin
 
 abbreviation (input)
-  -- {* Short cut for multiset space *}
+  -- \<open>Short cut for multiset space\<close>
   Mult :: "i=>i" where
   "Mult(A) == A -||> nat-{0}"
 
@@ -110,7 +110,7 @@
   "M <#= N == (omultiset(M) & M = N) | M <# N"
 
 
-subsection{*Properties of the original "restrict" from ZF.thy*}
+subsection\<open>Properties of the original "restrict" from ZF.thy\<close>
 
 lemma funrestrict_subset: "[| f \<in> Pi(C,B);  A\<subseteq>C |] ==> funrestrict(f,A) \<subseteq> f"
 by (auto simp add: funrestrict_def lam_def intro: apply_Pair)
@@ -142,7 +142,7 @@
 declare domainE [rule del]
 
 
-text{* A useful simplification rule *}
+text\<open>A useful simplification rule\<close>
 lemma multiset_fun_iff:
      "(f \<in> A -> nat-{0}) \<longleftrightarrow> f \<in> A->nat&(\<forall>a \<in> A. f`a \<in> nat & 0 < f`a)"
 apply safe
@@ -177,7 +177,7 @@
 by (auto simp add: Mult_iff_multiset)
 
 
-text{*The @{term multiset} operator*}
+text\<open>The @{term multiset} operator\<close>
 
 (* the empty multiset is 0 *)
 
@@ -185,7 +185,7 @@
 by (auto intro: FiniteFun.intros simp add: multiset_iff_Mult_mset_of)
 
 
-text{*The @{term mset_of} operator*}
+text\<open>The @{term mset_of} operator\<close>
 
 lemma multiset_set_of_Finite [simp]: "multiset(M) ==> Finite(mset_of(M))"
 by (simp add: multiset_def mset_of_def, auto)
@@ -522,7 +522,7 @@
 apply (drule add_diff_inverse2, auto)
 done
 
-text{*Specialized for use in the proof below.*}
+text\<open>Specialized for use in the proof below.\<close>
 lemma multiset_funrestict:
      "\<lbrakk>\<forall>a\<in>A. M ` a \<in> nat \<and> 0 < M ` a; Finite(A)\<rbrakk>
       \<Longrightarrow> multiset(funrestrict(M, A - {a}))"
@@ -714,7 +714,7 @@
 by (auto simp add: Mult_iff_multiset melem_diff_single munion_eq_conv_diff)
 
 
-subsection{*Multiset Orderings*}
+subsection\<open>Multiset Orderings\<close>
 
 (* multiset on a domain A are finite functions from A to nat-{0} *)
 
@@ -735,7 +735,7 @@
 by (auto simp add: multirel1_def Mult_iff_multiset Bex_def)
 
 
-text{*Monotonicity of @{term multirel1}*}
+text\<open>Monotonicity of @{term multirel1}\<close>
 
 lemma multirel1_mono1: "A\<subseteq>B ==> multirel1(A, r)\<subseteq>multirel1(B, r)"
 apply (auto simp add: multirel1_def)
@@ -762,7 +762,7 @@
 apply (rule_tac [2] multirel1_mono2, auto)
 done
 
-subsection{* Toward the proof of well-foundedness of multirel1 *}
+subsection\<open>Toward the proof of well-foundedness of multirel1\<close>
 
 lemma not_less_0 [iff]: "<M,0> \<notin> multirel1(A, r)"
 by (auto simp add: multirel1_def Mult_iff_multiset)
@@ -1134,7 +1134,7 @@
 done
 
 
-subsection{*Ordinal Multisets*}
+subsection\<open>Ordinal Multisets\<close>
 
 (* A \<subseteq> B ==>  field(Memrel(A)) \<subseteq> field(Memrel(B)) *)
 lemmas field_Memrel_mono = Memrel_mono [THEN field_mono]
--- a/src/ZF/Induct/Mutil.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Mutil.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   1996  University of Cambridge
 *)
 
-section {* The Mutilated Chess Board Problem, formalized inductively *}
+section \<open>The Mutilated Chess Board Problem, formalized inductively\<close>
 
 theory Mutil imports Main begin
 
-text {*
+text \<open>
   Originator is Max Black, according to J A Robinson.  Popularized as
   the Mutilated Checkerboard Problem by J McCarthy.
-*}
+\<close>
 
 consts
   domino :: i
@@ -36,7 +36,7 @@
   "evnodd(A,b) == {z \<in> A. \<exists>i j. z = <i,j> \<and> (i #+ j) mod 2 = b}"
 
 
-subsection {* Basic properties of evnodd *}
+subsection \<open>Basic properties of evnodd\<close>
 
 lemma evnodd_iff: "<i,j>: evnodd(A,b) \<longleftrightarrow> <i,j>: A & (i#+j) mod 2 = b"
   by (unfold evnodd_def) blast
@@ -62,7 +62,7 @@
   by (simp add: evnodd_def)
 
 
-subsection {* Dominoes *}
+subsection \<open>Dominoes\<close>
 
 lemma domino_Finite: "d \<in> domino ==> Finite(d)"
   by (blast intro!: Finite_cons Finite_0 elim: domino.cases)
@@ -78,9 +78,9 @@
   done
 
 
-subsection {* Tilings *}
+subsection \<open>Tilings\<close>
 
-text {* The union of two disjoint tilings is a tiling *}
+text \<open>The union of two disjoint tilings is a tiling\<close>
 
 lemma tiling_UnI:
     "t \<in> tiling(A) ==> u \<in> tiling(A) ==> t \<inter> u = 0 ==> t \<union> u \<in> tiling(A)"
--- a/src/ZF/Induct/Ntree.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Ntree.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Datatype definition n-ary branching trees *}
+section \<open>Datatype definition n-ary branching trees\<close>
 
 theory Ntree imports Main begin
 
-text {*
+text \<open>
   Demonstrates a simple use of function space in a datatype
   definition.  Based upon theory @{text Term}.
-*}
+\<close>
 
 consts
   ntree :: "i => i"
@@ -18,12 +18,12 @@
   maptree2 :: "[i, i] => i"
 
 datatype "ntree(A)" = Branch ("a \<in> A", "h \<in> (\<Union>n \<in> nat. n -> ntree(A))")
-  monos UN_mono [OF subset_refl Pi_mono]  -- {* MUST have this form *}
+  monos UN_mono [OF subset_refl Pi_mono]  -- \<open>MUST have this form\<close>
   type_intros nat_fun_univ [THEN subsetD]
   type_elims UN_E
 
 datatype "maptree(A)" = Sons ("a \<in> A", "h \<in> maptree(A) -||> maptree(A)")
-  monos FiniteFun_mono1  -- {* Use monotonicity in BOTH args *}
+  monos FiniteFun_mono1  -- \<open>Use monotonicity in BOTH args\<close>
   type_intros FiniteFun_univ1 [THEN subsetD]
 
 datatype "maptree2(A, B)" = Sons2 ("a \<in> A", "h \<in> B -||> maptree2(A, B)")
@@ -40,9 +40,9 @@
   "ntree_copy(z) == ntree_rec(\<lambda>x h r. Branch(x,r), z)"
 
 
-text {*
+text \<open>
   \medskip @{text ntree}
-*}
+\<close>
 
 lemma ntree_unfold: "ntree(A) = A \<times> (\<Union>n \<in> nat. n -> ntree(A))"
   by (blast intro: ntree.intros [unfolded ntree.con_defs]
@@ -53,7 +53,7 @@
     and step: "!!x n h. [| x \<in> A;  n \<in> nat;  h \<in> n -> ntree(A);  \<forall>i \<in> n. P(h`i)
       |] ==> P(Branch(x,h))"
   shows "P(t)"
-  -- {* A nicer induction rule than the standard one. *}
+  -- \<open>A nicer induction rule than the standard one.\<close>
   using t
   apply induct
   apply (erule UN_E)
@@ -69,7 +69,7 @@
     and step: "!!x n h. [| x \<in> A;  n \<in> nat;  h \<in> n -> ntree(A);  f O h = g O h |] ==>
       f ` Branch(x,h) = g ` Branch(x,h)"
   shows "f`t=g`t"
-  -- {* Induction on @{term "ntree(A)"} to prove an equation *}
+  -- \<open>Induction on @{term "ntree(A)"} to prove an equation\<close>
   using t
   apply induct
   apply (assumption | rule step)+
@@ -79,10 +79,10 @@
   apply (simp add: comp_fun_apply)
   done
 
-text {*
+text \<open>
   \medskip Lemmas to justify using @{text Ntree} in other recursive
   type definitions.
-*}
+\<close>
 
 lemma ntree_mono: "A \<subseteq> B ==> ntree(A) \<subseteq> ntree(B)"
   apply (unfold ntree.defs)
@@ -92,7 +92,7 @@
   done
 
 lemma ntree_univ: "ntree(univ(A)) \<subseteq> univ(A)"
-  -- {* Easily provable by induction also *}
+  -- \<open>Easily provable by induction also\<close>
   apply (unfold ntree.defs ntree.con_defs)
   apply (rule lfp_lowerbound)
    apply (rule_tac [2] A_subset_univ [THEN univ_mono])
@@ -103,9 +103,9 @@
   by (rule subset_trans [OF ntree_mono ntree_univ])
 
 
-text {*
+text \<open>
   \medskip @{text ntree} recursion.
-*}
+\<close>
 
 lemma ntree_rec_Branch:
     "function(h) ==>
@@ -124,9 +124,9 @@
     (auto simp add: domain_of_fun Pi_Collect_iff fun_is_function)
 
 
-text {*
+text \<open>
   \medskip @{text maptree}
-*}
+\<close>
 
 lemma maptree_unfold: "maptree(A) = A \<times> (maptree(A) -||> maptree(A))"
   by (fast intro!: maptree.intros [unfolded maptree.con_defs]
@@ -138,7 +138,7 @@
                   \<forall>y \<in> field(h). P(y)
                |] ==> P(Sons(x,h))"
   shows "P(t)"
-  -- {* A nicer induction rule than the standard one. *}
+  -- \<open>A nicer induction rule than the standard one.\<close>
   using t
   apply induct
   apply (assumption | rule step)+
@@ -149,9 +149,9 @@
   done
 
 
-text {*
+text \<open>
   \medskip @{text maptree2}
-*}
+\<close>
 
 lemma maptree2_unfold: "maptree2(A, B) = A \<times> (B -||> maptree2(A, B))"
   by (fast intro!: maptree2.intros [unfolded maptree2.con_defs]
--- a/src/ZF/Induct/Primrec.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Primrec.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,18 +3,18 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Primitive Recursive Functions: the inductive definition *}
+section \<open>Primitive Recursive Functions: the inductive definition\<close>
 
 theory Primrec imports Main begin
 
-text {*
+text \<open>
   Proof adopted from @{cite szasz93}.
 
   See also @{cite \<open>page 250, exercise 11\<close> mendelson}.
-*}
+\<close>
 
 
-subsection {* Basic definitions *}
+subsection \<open>Basic definitions\<close>
 
 definition
   SC :: "i"  where
@@ -37,7 +37,7 @@
   "PREC(f,g) ==
      \<lambda>l \<in> list(nat). list_case(0,
                       \<lambda>x xs. rec(x, f`xs, \<lambda>y r. g ` Cons(r, Cons(y, xs))), l)"
-  -- {* Note that @{text g} is applied first to @{term "PREC(f,g)`y"} and then to @{text y}! *}
+  -- \<open>Note that @{text g} is applied first to @{term "PREC(f,g)`y"} and then to @{text y}!\<close>
 
 consts
   ACK :: "i=>i"
@@ -50,9 +50,9 @@
   "ack(x,y) == ACK(x) ` [y]"
 
 
-text {*
+text \<open>
   \medskip Useful special cases of evaluation.
-*}
+\<close>
 
 lemma SC: "[| x \<in> nat;  l \<in> list(nat) |] ==> SC ` (Cons(x,l)) = succ(x)"
   by (simp add: SC_def)
@@ -76,7 +76,7 @@
   by (simp add: PREC_def)
 
 
-subsection {* Inductive definition of the PR functions *}
+subsection \<open>Inductive definition of the PR functions\<close>
 
 consts
   prim_rec :: i
@@ -112,19 +112,19 @@
   by auto
 
 
-subsection {* Ackermann's function cases *}
+subsection \<open>Ackermann's function cases\<close>
 
 lemma ack_0: "j \<in> nat ==> ack(0,j) = succ(j)"
-  -- {* PROPERTY A 1 *}
+  -- \<open>PROPERTY A 1\<close>
   by (simp add: SC)
 
 lemma ack_succ_0: "ack(succ(i), 0) = ack(i,1)"
-  -- {* PROPERTY A 2 *}
+  -- \<open>PROPERTY A 2\<close>
   by (simp add: CONSTANT PREC_0)
 
 lemma ack_succ_succ:
   "[| i\<in>nat;  j\<in>nat |] ==> ack(succ(i), succ(j)) = ack(i, ack(succ(i), j))"
-  -- {* PROPERTY A 3 *}
+  -- \<open>PROPERTY A 3\<close>
   by (simp add: CONSTANT PREC_succ COMP_1 PROJ_0)
 
 lemmas [simp] = ack_0 ack_succ_0 ack_succ_succ ack_type
@@ -132,7 +132,7 @@
 
 
 lemma lt_ack2: "i \<in> nat ==> j \<in> nat ==> j < ack(i,j)"
-  -- {* PROPERTY A 4 *}
+  -- \<open>PROPERTY A 4\<close>
   apply (induct i arbitrary: j set: nat)
    apply simp
   apply (induct_tac j)
@@ -142,11 +142,11 @@
   done
 
 lemma ack_lt_ack_succ2: "[|i\<in>nat; j\<in>nat|] ==> ack(i,j) < ack(i, succ(j))"
-  -- {* PROPERTY A 5-, the single-step lemma *}
+  -- \<open>PROPERTY A 5-, the single-step lemma\<close>
   by (induct set: nat) (simp_all add: lt_ack2)
 
 lemma ack_lt_mono2: "[| j<k; i \<in> nat; k \<in> nat |] ==> ack(i,j) < ack(i,k)"
-  -- {* PROPERTY A 5, monotonicity for @{text "<"} *}
+  -- \<open>PROPERTY A 5, monotonicity for @{text "<"}\<close>
   apply (frule lt_nat_in_nat, assumption)
   apply (erule succ_lt_induct)
     apply assumption
@@ -155,14 +155,14 @@
   done
 
 lemma ack_le_mono2: "[|j\<le>k;  i\<in>nat;  k\<in>nat|] ==> ack(i,j) \<le> ack(i,k)"
-  -- {* PROPERTY A 5', monotonicity for @{text \<le>} *}
+  -- \<open>PROPERTY A 5', monotonicity for @{text \<le>}\<close>
   apply (rule_tac f = "\<lambda>j. ack (i,j) " in Ord_lt_mono_imp_le_mono)
      apply (assumption | rule ack_lt_mono2 ack_type [THEN nat_into_Ord])+
   done
 
 lemma ack2_le_ack1:
   "[| i\<in>nat;  j\<in>nat |] ==> ack(i, succ(j)) \<le> ack(succ(i), j)"
-  -- {* PROPERTY A 6 *}
+  -- \<open>PROPERTY A 6\<close>
   apply (induct_tac j)
    apply simp_all
   apply (rule ack_le_mono2)
@@ -171,14 +171,14 @@
   done
 
 lemma ack_lt_ack_succ1: "[| i \<in> nat; j \<in> nat |] ==> ack(i,j) < ack(succ(i),j)"
-  -- {* PROPERTY A 7-, the single-step lemma *}
+  -- \<open>PROPERTY A 7-, the single-step lemma\<close>
   apply (rule ack_lt_mono2 [THEN lt_trans2])
      apply (rule_tac [4] ack2_le_ack1)
       apply auto
   done
 
 lemma ack_lt_mono1: "[| i<j; j \<in> nat; k \<in> nat |] ==> ack(i,k) < ack(j,k)"
-  -- {* PROPERTY A 7, monotonicity for @{text "<"} *}
+  -- \<open>PROPERTY A 7, monotonicity for @{text "<"}\<close>
   apply (frule lt_nat_in_nat, assumption)
   apply (erule succ_lt_induct)
     apply assumption
@@ -187,23 +187,23 @@
   done
 
 lemma ack_le_mono1: "[| i\<le>j; j \<in> nat; k \<in> nat |] ==> ack(i,k) \<le> ack(j,k)"
-  -- {* PROPERTY A 7', monotonicity for @{text \<le>} *}
+  -- \<open>PROPERTY A 7', monotonicity for @{text \<le>}\<close>
   apply (rule_tac f = "\<lambda>j. ack (j,k) " in Ord_lt_mono_imp_le_mono)
      apply (assumption | rule ack_lt_mono1 ack_type [THEN nat_into_Ord])+
   done
 
 lemma ack_1: "j \<in> nat ==> ack(1,j) = succ(succ(j))"
-  -- {* PROPERTY A 8 *}
+  -- \<open>PROPERTY A 8\<close>
   by (induct set: nat) simp_all
 
 lemma ack_2: "j \<in> nat ==> ack(succ(1),j) = succ(succ(succ(j#+j)))"
-  -- {* PROPERTY A 9 *}
+  -- \<open>PROPERTY A 9\<close>
   by (induct set: nat) (simp_all add: ack_1)
 
 lemma ack_nest_bound:
   "[| i1 \<in> nat; i2 \<in> nat; j \<in> nat |]
     ==> ack(i1, ack(i2,j)) < ack(succ(succ(i1#+i2)), j)"
-  -- {* PROPERTY A 10 *}
+  -- \<open>PROPERTY A 10\<close>
   apply (rule lt_trans2 [OF _ ack2_le_ack1])
     apply simp
     apply (rule add_le_self [THEN ack_le_mono1, THEN lt_trans1])
@@ -214,7 +214,7 @@
 lemma ack_add_bound:
   "[| i1 \<in> nat; i2 \<in> nat; j \<in> nat |]
     ==> ack(i1,j) #+ ack(i2,j) < ack(succ(succ(succ(succ(i1#+i2)))), j)"
-  -- {* PROPERTY A 11 *}
+  -- \<open>PROPERTY A 11\<close>
   apply (rule_tac j = "ack (succ (1), ack (i1 #+ i2, j))" in lt_trans)
    apply (simp add: ack_2)
    apply (rule_tac [2] ack_nest_bound [THEN lt_trans2])
@@ -225,9 +225,9 @@
 lemma ack_add_bound2:
      "[| i < ack(k,j);  j \<in> nat;  k \<in> nat |]
       ==> i#+j < ack(succ(succ(succ(succ(k)))), j)"
-  -- {* PROPERTY A 12. *}
-  -- {* Article uses existential quantifier but the ALF proof used @{term "k#+#4"}. *}
-  -- {* Quantified version must be nested @{text "\<exists>k'. \<forall>i,j \<dots>"}. *}
+  -- \<open>PROPERTY A 12.\<close>
+  -- \<open>Article uses existential quantifier but the ALF proof used @{term "k#+#4"}.\<close>
+  -- \<open>Quantified version must be nested @{text "\<exists>k'. \<forall>i,j \<dots>"}.\<close>
   apply (rule_tac j = "ack (k,j) #+ ack (0,j) " in lt_trans)
    apply (rule_tac [2] ack_add_bound [THEN lt_trans2])
       apply (rule add_lt_mono)
@@ -235,7 +235,7 @@
   done
 
 
-subsection {* Main result *}
+subsection \<open>Main result\<close>
 
 declare list_add_type [simp]
 
@@ -247,7 +247,7 @@
   done
 
 lemma lt_ack1: "[| i \<in> nat; j \<in> nat |] ==> i < ack(i,j)"
-  -- {* PROPERTY A 4'? Extra lemma needed for @{text CONSTANT} case, constant functions. *}
+  -- \<open>PROPERTY A 4'? Extra lemma needed for @{text CONSTANT} case, constant functions.\<close>
   apply (induct_tac i)
    apply (simp add: nat_0_le)
   apply (erule lt_trans1 [OF succ_leI ack_lt_ack_succ1])
@@ -274,9 +274,9 @@
    apply auto
   done
 
-text {*
+text \<open>
   \medskip @{text COMP} case.
-*}
+\<close>
 
 lemma COMP_map_lemma:
   "fs \<in> list({f \<in> prim_rec. \<exists>kf \<in> nat. \<forall>l \<in> list(nat). f`l < ack(kf, list_add(l))})
@@ -311,9 +311,9 @@
          apply auto
   done
 
-text {*
+text \<open>
   \medskip @{text PREC} case.
-*}
+\<close>
 
 lemma PREC_case_lemma:
  "[| \<forall>l \<in> list(nat). f`l #+ list_add(l) < ack(kf, list_add(l));
@@ -326,20 +326,20 @@
   apply (erule list.cases)
    apply (simp add: lt_trans [OF nat_le_refl lt_ack2])
   apply simp
-  apply (erule ssubst)  -- {* get rid of the needless assumption *}
+  apply (erule ssubst)  -- \<open>get rid of the needless assumption\<close>
   apply (induct_tac a)
    apply simp_all
-   txt {* base case *}
+   txt \<open>base case\<close>
    apply (rule lt_trans, erule bspec, assumption)
    apply (simp add: add_le_self [THEN ack_lt_mono1])
-  txt {* ind step *}
+  txt \<open>ind step\<close>
   apply (rule succ_leI [THEN lt_trans1])
    apply (rule_tac j = "g ` ll #+ mm" for ll mm in lt_trans1)
     apply (erule_tac [2] bspec)
     apply (rule nat_le_refl [THEN add_le_mono])
        apply typecheck
    apply (simp add: add_le_self2)
-   txt {* final part of the simplification *}
+   txt \<open>final part of the simplification\<close>
   apply simp
   apply (rule add_le_self2 [THEN ack_le_mono1, THEN lt_trans1])
      apply (erule_tac [4] ack_lt_mono2)
--- a/src/ZF/Induct/PropLog.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/PropLog.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1993  University of Cambridge
 *)
 
-section {* Meta-theory of propositional logic *}
+section \<open>Meta-theory of propositional logic\<close>
 
 theory PropLog imports Main begin
 
-text {*
+text \<open>
   Datatype definition of propositional logic formulae and inductive
   definition of the propositional tautologies.
 
@@ -16,10 +16,10 @@
 
   Prove: If @{text "H |= p"} then @{text "G |= p"} where @{text "G \<in>
   Fin(H)"}
-*}
+\<close>
 
 
-subsection {* The datatype of propositions *}
+subsection \<open>The datatype of propositions\<close>
 
 consts
   propn :: i
@@ -30,7 +30,7 @@
   | Imp ("p \<in> propn", "q \<in> propn")    (infixr "=>" 90)
 
 
-subsection {* The proof system *}
+subsection \<open>The proof system\<close>
 
 consts thms     :: "i => i"
 
@@ -52,9 +52,9 @@
 declare propn.intros [simp]
 
 
-subsection {* The semantics *}
+subsection \<open>The semantics\<close>
 
-subsubsection {* Semantics of propositional logic. *}
+subsubsection \<open>Semantics of propositional logic.\<close>
 
 consts
   is_true_fun :: "[i,i] => i"
@@ -66,7 +66,7 @@
 definition
   is_true :: "[i,i] => o"  where
   "is_true(p,t) == is_true_fun(p,t) = 1"
-  -- {* this definition is required since predicates can't be recursive *}
+  -- \<open>this definition is required since predicates can't be recursive\<close>
 
 lemma is_true_Fls [simp]: "is_true(Fls,t) \<longleftrightarrow> False"
   by (simp add: is_true_def)
@@ -78,22 +78,22 @@
   by (simp add: is_true_def)
 
 
-subsubsection {* Logical consequence *}
+subsubsection \<open>Logical consequence\<close>
 
-text {*
+text \<open>
   For every valuation, if all elements of @{text H} are true then so
   is @{text p}.
-*}
+\<close>
 
 definition
   logcon :: "[i,i] => o"    (infixl "|=" 50)  where
   "H |= p == \<forall>t. (\<forall>q \<in> H. is_true(q,t)) \<longrightarrow> is_true(p,t)"
 
 
-text {*
+text \<open>
   A finite set of hypotheses from @{text t} and the @{text Var}s in
   @{text p}.
-*}
+\<close>
 
 consts
   hyps :: "[i,i] => i"
@@ -104,7 +104,7 @@
 
 
 
-subsection {* Proof theory of propositional logic *}
+subsection \<open>Proof theory of propositional logic\<close>
 
 lemma thms_mono: "G \<subseteq> H ==> thms(G) \<subseteq> thms(H)"
   apply (unfold thms.defs)
@@ -118,13 +118,13 @@
 inductive_cases ImpE: "p=>q \<in> propn"
 
 lemma thms_MP: "[| H |- p=>q;  H |- p |] ==> H |- q"
-  -- {* Stronger Modus Ponens rule: no typechecking! *}
+  -- \<open>Stronger Modus Ponens rule: no typechecking!\<close>
   apply (rule thms.MP)
      apply (erule asm_rl thms_in_pl thms_in_pl [THEN ImpE])+
   done
 
 lemma thms_I: "p \<in> propn ==> H |- p=>p"
-  -- {*Rule is called @{text I} for Identity Combinator, not for Introduction. *}
+  -- \<open>Rule is called @{text I} for Identity Combinator, not for Introduction.\<close>
   apply (rule thms.S [THEN thms_MP, THEN thms_MP])
       apply (rule_tac [5] thms.K)
        apply (rule_tac [4] thms.K)
@@ -132,10 +132,10 @@
   done
 
 
-subsubsection {* Weakening, left and right *}
+subsubsection \<open>Weakening, left and right\<close>
 
 lemma weaken_left: "[| G \<subseteq> H;  G|-p |] ==> H|-p"
-  -- {* Order of premises is convenient with @{text THEN} *}
+  -- \<open>Order of premises is convenient with @{text THEN}\<close>
   by (erule thms_mono [THEN subsetD])
 
 lemma weaken_left_cons: "H |- p ==> cons(a,H) |- p"
@@ -148,7 +148,7 @@
   by (simp_all add: thms.K [THEN thms_MP] thms_in_pl)
 
 
-subsubsection {* The deduction theorem *}
+subsubsection \<open>The deduction theorem\<close>
 
 theorem deduction: "[| cons(p,H) |- q;  p \<in> propn |] ==>  H |- p=>q"
   apply (erule thms.induct)
@@ -160,7 +160,7 @@
   done
 
 
-subsubsection {* The cut rule *}
+subsubsection \<open>The cut rule\<close>
 
 lemma cut: "[| H|-p;  cons(p,H) |- q |] ==>  H |- q"
   apply (rule deduction [THEN thms_MP])
@@ -177,7 +177,7 @@
   by (erule thms_MP [THEN thms_FlsE])
 
 
-subsubsection {* Soundness of the rules wrt truth-table semantics *}
+subsubsection \<open>Soundness of the rules wrt truth-table semantics\<close>
 
 theorem soundness: "H |- p ==> H |= p"
   apply (unfold logcon_def)
@@ -186,9 +186,9 @@
   done
 
 
-subsection {* Completeness *}
+subsection \<open>Completeness\<close>
 
-subsubsection {* Towards the completeness proof *}
+subsubsection \<open>Towards the completeness proof\<close>
 
 lemma Fls_Imp: "[| H |- p=>Fls; q \<in> propn |] ==> H |- p=>q"
   apply (frule thms_in_pl)
@@ -208,7 +208,7 @@
 
 lemma hyps_thms_if:
     "p \<in> propn ==> hyps(p,t) |- (if is_true(p,t) then p else p=>Fls)"
-  -- {* Typical example of strengthening the induction statement. *}
+  -- \<open>Typical example of strengthening the induction statement.\<close>
   apply simp
   apply (induct_tac p)
     apply (simp_all add: thms_I thms.H)
@@ -217,21 +217,21 @@
   done
 
 lemma logcon_thms_p: "[| p \<in> propn;  0 |= p |] ==> hyps(p,t) |- p"
-  -- {* Key lemma for completeness; yields a set of assumptions satisfying @{text p} *}
+  -- \<open>Key lemma for completeness; yields a set of assumptions satisfying @{text p}\<close>
   apply (drule hyps_thms_if)
   apply (simp add: logcon_def)
   done
 
-text {*
+text \<open>
   For proving certain theorems in our new propositional logic.
-*}
+\<close>
 
 lemmas propn_SIs = propn.intros deduction
   and propn_Is = thms_in_pl thms.H thms.H [THEN thms_MP]
 
-text {*
+text \<open>
   The excluded middle in the form of an elimination rule.
-*}
+\<close>
 
 lemma thms_excluded_middle:
     "[| p \<in> propn;  q \<in> propn |] ==> H |- (p=>q) => ((p=>Fls)=>q) => q"
@@ -242,33 +242,33 @@
 
 lemma thms_excluded_middle_rule:
   "[| cons(p,H) |- q;  cons(p=>Fls,H) |- q;  p \<in> propn |] ==> H |- q"
-  -- {* Hard to prove directly because it requires cuts *}
+  -- \<open>Hard to prove directly because it requires cuts\<close>
   apply (rule thms_excluded_middle [THEN thms_MP, THEN thms_MP])
      apply (blast intro!: propn_SIs intro: propn_Is)+
   done
 
 
-subsubsection {* Completeness -- lemmas for reducing the set of assumptions *}
+subsubsection \<open>Completeness -- lemmas for reducing the set of assumptions\<close>
 
-text {*
+text \<open>
   For the case @{prop "hyps(p,t)-cons(#v,Y) |- p"} we also have @{prop
   "hyps(p,t)-{#v} \<subseteq> hyps(p, t-{v})"}.
-*}
+\<close>
 
 lemma hyps_Diff:
     "p \<in> propn ==> hyps(p, t-{v}) \<subseteq> cons(#v=>Fls, hyps(p,t)-{#v})"
   by (induct set: propn) auto
 
-text {*
+text \<open>
   For the case @{prop "hyps(p,t)-cons(#v => Fls,Y) |- p"} we also have
   @{prop "hyps(p,t)-{#v=>Fls} \<subseteq> hyps(p, cons(v,t))"}.
-*}
+\<close>
 
 lemma hyps_cons:
     "p \<in> propn ==> hyps(p, cons(v,t)) \<subseteq> cons(#v, hyps(p,t)-{#v=>Fls})"
   by (induct set: propn) auto
 
-text {* Two lemmas for use with @{text weaken_left} *}
+text \<open>Two lemmas for use with @{text weaken_left}\<close>
 
 lemma cons_Diff_same: "B-C \<subseteq> cons(a, B-cons(a,C))"
   by blast
@@ -276,36 +276,36 @@
 lemma cons_Diff_subset2: "cons(a, B-{c}) - D \<subseteq> cons(a, B-cons(c,D))"
   by blast
 
-text {*
+text \<open>
   The set @{term "hyps(p,t)"} is finite, and elements have the form
   @{term "#v"} or @{term "#v=>Fls"}; could probably prove the stronger
   @{prop "hyps(p,t) \<in> Fin(hyps(p,0) \<union> hyps(p,nat))"}.
-*}
+\<close>
 
 lemma hyps_finite: "p \<in> propn ==> hyps(p,t) \<in> Fin(\<Union>v \<in> nat. {#v, #v=>Fls})"
   by (induct set: propn) auto
 
 lemmas Diff_weaken_left = Diff_mono [OF _ subset_refl, THEN weaken_left]
 
-text {*
+text \<open>
   Induction on the finite set of assumptions @{term "hyps(p,t0)"}.  We
   may repeatedly subtract assumptions until none are left!
-*}
+\<close>
 
 lemma completeness_0_lemma [rule_format]:
     "[| p \<in> propn;  0 |= p |] ==> \<forall>t. hyps(p,t) - hyps(p,t0) |- p"
   apply (frule hyps_finite)
   apply (erule Fin_induct)
    apply (simp add: logcon_thms_p Diff_0)
-  txt {* inductive step *}
+  txt \<open>inductive step\<close>
   apply safe
-   txt {* Case @{prop "hyps(p,t)-cons(#v,Y) |- p"} *}
+   txt \<open>Case @{prop "hyps(p,t)-cons(#v,Y) |- p"}\<close>
    apply (rule thms_excluded_middle_rule)
      apply (erule_tac [3] propn.intros)
     apply (blast intro: cons_Diff_same [THEN weaken_left])
    apply (blast intro: cons_Diff_subset2 [THEN weaken_left]
      hyps_Diff [THEN Diff_weaken_left])
-  txt {* Case @{prop "hyps(p,t)-cons(#v => Fls,Y) |- p"} *}
+  txt \<open>Case @{prop "hyps(p,t)-cons(#v => Fls,Y) |- p"}\<close>
   apply (rule thms_excluded_middle_rule)
     apply (erule_tac [3] propn.intros)
    apply (blast intro: cons_Diff_subset2 [THEN weaken_left]
@@ -314,16 +314,16 @@
   done
 
 
-subsubsection {* Completeness theorem *}
+subsubsection \<open>Completeness theorem\<close>
 
 lemma completeness_0: "[| p \<in> propn;  0 |= p |] ==> 0 |- p"
-  -- {* The base case for completeness *}
+  -- \<open>The base case for completeness\<close>
   apply (rule Diff_cancel [THEN subst])
   apply (blast intro: completeness_0_lemma)
   done
 
 lemma logcon_Imp: "[| cons(p,H) |= q |] ==> H |= p=>q"
-  -- {* A semantic analogue of the Deduction Theorem *}
+  -- \<open>A semantic analogue of the Deduction Theorem\<close>
   by (simp add: logcon_def)
 
 lemma completeness:
--- a/src/ZF/Induct/Rmap.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Rmap.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* An operator to ``map'' a relation over a list *}
+section \<open>An operator to ``map'' a relation over a list\<close>
 
 theory Rmap imports Main begin
 
@@ -52,10 +52,10 @@
    apply blast+
   done
 
-text {*
+text \<open>
   \medskip If @{text f} is a function then @{text "rmap(f)"} behaves
   as expected.
-*}
+\<close>
 
 lemma rmap_fun_type: "f \<in> A->B ==> rmap(f): list(A)->list(B)"
   by (simp add: Pi_iff rmap_rel_type rmap_functional rmap_total)
--- a/src/ZF/Induct/Term.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Term.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Terms over an alphabet *}
+section \<open>Terms over an alphabet\<close>
 
 theory Term imports Main begin
 
-text {*
+text \<open>
   Illustrates the list functor (essentially the same type as in @{text
   Trees_Forest}).
-*}
+\<close>
 
 consts
   "term" :: "i => i"
@@ -56,7 +56,7 @@
       !!x z zs. [| x \<in> A;  z \<in> term(A);  zs: list(term(A));  P(Apply(x,zs))
                 |] ==> P(Apply(x, Cons(z,zs)))
      |] ==> P(t)"
-  -- {* Induction on @{term "term(A)"} followed by induction on @{term list}. *}
+  -- \<open>Induction on @{term "term(A)"} followed by induction on @{term list}.\<close>
   apply (induct_tac t)
   apply (erule list.induct)
    apply (auto dest: list_CollectD)
@@ -67,15 +67,15 @@
       !!x zs. [| x \<in> A;  zs: list(term(A));  map(f,zs) = map(g,zs) |] ==>
               f(Apply(x,zs)) = g(Apply(x,zs))
    |] ==> f(t) = g(t)"
-  -- {* Induction on @{term "term(A)"} to prove an equation. *}
+  -- \<open>Induction on @{term "term(A)"} to prove an equation.\<close>
   apply (induct_tac t)
   apply (auto dest: map_list_Collect list_CollectD)
   done
 
-text {*
+text \<open>
   \medskip Lemmas to justify using @{term "term"} in other recursive
   type definitions.
-*}
+\<close>
 
 lemma term_mono: "A \<subseteq> B ==> term(A) \<subseteq> term(B)"
   apply (unfold term.defs)
@@ -85,7 +85,7 @@
   done
 
 lemma term_univ: "term(univ(A)) \<subseteq> univ(A)"
-  -- {* Easily provable by induction also *}
+  -- \<open>Easily provable by induction also\<close>
   apply (unfold term.defs term.con_defs)
   apply (rule lfp_lowerbound)
    apply (rule_tac [2] A_subset_univ [THEN univ_mono])
@@ -102,13 +102,13 @@
 lemma term_into_univ: "[| t \<in> term(A);  A \<subseteq> univ(B) |] ==> t \<in> univ(B)"
   by (rule term_subset_univ [THEN subsetD])
 
-text {*
+text \<open>
   \medskip @{text term_rec} -- by @{text Vset} recursion.
-*}
+\<close>
 
 lemma map_lemma: "[| l \<in> list(A);  Ord(i);  rank(l)<i |]
     ==> map(\<lambda>z. (\<lambda>x \<in> Vset(i).h(x)) ` z, l) = map(h,l)"
-  -- {* @{term map} works correctly on the underlying list of terms. *}
+  -- \<open>@{term map} works correctly on the underlying list of terms.\<close>
   apply (induct set: list)
    apply simp
   apply (subgoal_tac "rank (a) <i & rank (l) < i")
@@ -119,7 +119,7 @@
 
 lemma term_rec [simp]: "ts \<in> list(A) ==>
   term_rec(Apply(a,ts), d) = d(a, ts, map (\<lambda>z. term_rec(z,d), ts))"
-  -- {* Typing premise is necessary to invoke @{text map_lemma}. *}
+  -- \<open>Typing premise is necessary to invoke @{text map_lemma}.\<close>
   apply (rule term_rec_def [THEN def_Vrec, THEN trans])
   apply (unfold term.con_defs)
   apply (simp add: rank_pair2 map_lemma)
@@ -131,7 +131,7 @@
                    r \<in> list(\<Union>t \<in> term(A). C(t)) |]
                 ==> d(x, zs, r): C(Apply(x,zs))"
   shows "term_rec(t,d) \<in> C(t)"
-  -- {* Slightly odd typing condition on @{text r} in the second premise! *}
+  -- \<open>Slightly odd typing condition on @{text r} in the second premise!\<close>
   using t
   apply induct
   apply (frule list_CollectD)
@@ -159,9 +159,9 @@
   done
 
 
-text {*
+text \<open>
   \medskip @{term term_map}.
-*}
+\<close>
 
 lemma term_map [simp]:
   "ts \<in> list(A) ==>
@@ -181,9 +181,9 @@
   apply (erule RepFunI)
   done
 
-text {*
+text \<open>
   \medskip @{term term_size}.
-*}
+\<close>
 
 lemma term_size [simp]:
     "ts \<in> list(A) ==> term_size(Apply(a, ts)) = succ(list_add(map(term_size, ts)))"
@@ -193,9 +193,9 @@
   by (auto simp add: term_size_def)
 
 
-text {*
+text \<open>
   \medskip @{text reflect}.
-*}
+\<close>
 
 lemma reflect [simp]:
     "ts \<in> list(A) ==> reflect(Apply(a, ts)) = Apply(a, rev(map(reflect, ts)))"
@@ -205,9 +205,9 @@
   by (auto simp add: reflect_def)
 
 
-text {*
+text \<open>
   \medskip @{text preorder}.
-*}
+\<close>
 
 lemma preorder [simp]:
     "ts \<in> list(A) ==> preorder(Apply(a, ts)) = Cons(a, flat(map(preorder, ts)))"
@@ -217,9 +217,9 @@
   by (simp add: preorder_def)
 
 
-text {*
+text \<open>
   \medskip @{text postorder}.
-*}
+\<close>
 
 lemma postorder [simp]:
     "ts \<in> list(A) ==> postorder(Apply(a, ts)) = flat(map(postorder, ts)) @ [a]"
@@ -229,9 +229,9 @@
   by (simp add: postorder_def)
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text term_map}.
-*}
+\<close>
 
 declare map_compose [simp]
 
@@ -247,9 +247,9 @@
   by (induct rule: term_induct_eqn) (simp add: rev_map_distrib [symmetric])
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text term_size}.
-*}
+\<close>
 
 lemma term_size_term_map: "t \<in> term(A) ==> term_size(term_map(f,t)) = term_size(t)"
   by (induct rule: term_induct_eqn) simp
@@ -261,17 +261,17 @@
   by (induct rule: term_induct_eqn) (simp add: length_flat)
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text reflect}.
-*}
+\<close>
 
 lemma reflect_reflect_ident: "t \<in> term(A) ==> reflect(reflect(t)) = t"
   by (induct rule: term_induct_eqn) (simp add: rev_map_distrib)
 
 
-text {*
+text \<open>
   \medskip Theorems about preorder.
-*}
+\<close>
 
 lemma preorder_term_map:
     "t \<in> term(A) ==> preorder(term_map(f,t)) = map(f, preorder(t))"
--- a/src/ZF/Induct/Tree_Forest.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Induct/Tree_Forest.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Trees and forests, a mutually recursive type definition *}
+section \<open>Trees and forests, a mutually recursive type definition\<close>
 
 theory Tree_Forest imports Main begin
 
-subsection {* Datatype definition *}
+subsection \<open>Datatype definition\<close>
 
 consts
   tree :: "i => i"
@@ -33,10 +33,10 @@
   by (simp only: tree_forest.defs)
 
 
-text {*
+text \<open>
   \medskip @{term "tree_forest(A)"} as the union of @{term "tree(A)"}
   and @{term "forest(A)"}.
-*}
+\<close>
 
 lemma tree_subset_TF: "tree(A) \<subseteq> tree_forest(A)"
   apply (unfold tree_forest.defs)
@@ -61,7 +61,7 @@
 
 lemma tree_forest_unfold:
   "tree_forest(A) = (A \<times> forest(A)) + ({0} + tree(A) \<times> forest(A))"
-    -- {* NOT useful, but interesting \dots *}
+    -- \<open>NOT useful, but interesting \dots\<close>
   supply rews = tree_forest.con_defs tree_def forest_def
   apply (unfold tree_def forest_def)
   apply (fast intro!: tree_forest.intros [unfolded rews, THEN PartD1]
@@ -86,9 +86,9 @@
   apply (rule tree_forest_unfold' [THEN subst_context])
   done
 
-text {*
+text \<open>
   \medskip Type checking for recursor: Not needed; possibly interesting?
-*}
+\<close>
 
 lemma TF_rec_type:
   "[| z \<in> tree_forest(A);
@@ -108,14 +108,14 @@
                     |] ==> d(t,f,r1,r2) \<in> D(Fcons(t,f))
    |] ==> (\<forall>t \<in> tree(A).    tree_forest_rec(b,c,d,t) \<in> C(t)) \<and>
           (\<forall>f \<in> forest(A). tree_forest_rec(b,c,d,f) \<in> D(f))"
-    -- {* Mutually recursive version. *}
+    -- \<open>Mutually recursive version.\<close>
   apply (unfold Ball_def)
   apply (rule tree_forest.mutual_induct)
   apply simp_all
   done
 
 
-subsection {* Operations *}
+subsection \<open>Operations\<close>
 
 consts
   map :: "[i => i, i] => i"
@@ -156,9 +156,9 @@
     of_list (list_of_TF (reflect(tf)) @ Cons(reflect(t), Nil))"
 
 
-text {*
+text \<open>
   \medskip @{text list_of_TF} and @{text of_list}.
-*}
+\<close>
 
 lemma list_of_TF_type [TC]:
     "z \<in> tree_forest(A) ==> list_of_TF(z) \<in> list(tree(A))"
@@ -167,9 +167,9 @@
 lemma of_list_type [TC]: "l \<in> list(tree(A)) ==> of_list(l) \<in> forest(A)"
   by (induct set: list) simp_all
 
-text {*
+text \<open>
   \medskip @{text map}.
-*}
+\<close>
 
 lemma
   assumes "!!x. x \<in> A ==> h(x): B"
@@ -178,32 +178,32 @@
   using assms
   by (induct rule: tree'induct forest'induct) simp_all
 
-text {*
+text \<open>
   \medskip @{text size}.
-*}
+\<close>
 
 lemma size_type [TC]: "z \<in> tree_forest(A) ==> size(z) \<in> nat"
   by (induct set: tree_forest) simp_all
 
 
-text {*
+text \<open>
   \medskip @{text preorder}.
-*}
+\<close>
 
 lemma preorder_type [TC]: "z \<in> tree_forest(A) ==> preorder(z) \<in> list(A)"
   by (induct set: tree_forest) simp_all
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text list_of_TF} and @{text of_list}.
-*}
+\<close>
 
 lemma forest_induct [consumes 1, case_names Fnil Fcons]:
   "[| f \<in> forest(A);
       R(Fnil);
       !!t f. [| t \<in> tree(A);  f \<in> forest(A);  R(f) |] ==> R(Fcons(t,f))
    |] ==> R(f)"
-  -- {* Essentially the same as list induction. *}
+  -- \<open>Essentially the same as list induction.\<close>
   apply (erule tree_forest.mutual_induct
       [THEN conjunct2, THEN spec, THEN [2] rev_mp])
     apply (rule TrueI)
@@ -218,9 +218,9 @@
   by (induct set: list) simp_all
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text map}.
-*}
+\<close>
 
 lemma map_ident: "z \<in> tree_forest(A) ==> map(\<lambda>u. u, z) = z"
   by (induct set: tree_forest) simp_all
@@ -230,9 +230,9 @@
   by (induct set: tree_forest) simp_all
 
 
-text {*
+text \<open>
   \medskip Theorems about @{text size}.
-*}
+\<close>
 
 lemma size_map: "z \<in> tree_forest(A) ==> size(map(h,z)) = size(z)"
   by (induct set: tree_forest) simp_all
@@ -240,9 +240,9 @@
 lemma size_length: "z \<in> tree_forest(A) ==> size(z) = length(preorder(z))"
   by (induct set: tree_forest) (simp_all add: length_app)
 
-text {*
+text \<open>
   \medskip Theorems about @{text preorder}.
-*}
+\<close>
 
 lemma preorder_map:
     "z \<in> tree_forest(A) ==> preorder(map(h,z)) = List_ZF.map(h, preorder(z))"
--- a/src/ZF/Inductive_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Inductive_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
 Products are used only to derive "streamlined" induction rules for relations
 *)
 
-section{*Inductive and Coinductive Definitions*}
+section\<open>Inductive and Coinductive Definitions\<close>
 
 theory Inductive_ZF
 imports Fixedpt QPair Nat_ZF
@@ -34,7 +34,7 @@
 ML_file "Tools/induct_tacs.ML"
 ML_file "Tools/primrec_package.ML"
 
-ML {*
+ML \<open>
 structure Lfp =
   struct
   val oper      = @{const lfp}
@@ -127,6 +127,6 @@
   Add_inductive_def_Fun(structure Fp=Gfp and Pr=Quine_Prod and CP=Quine_CP
     and Su=Quine_Sum val coind = true);
 
-*}
+\<close>
 
 end
--- a/src/ZF/InfDatatype.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/InfDatatype.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Infinite-Branching Datatype Definitions*}
+section\<open>Infinite-Branching Datatype Definitions\<close>
 
 theory InfDatatype imports Datatype_ZF Univ Finite Cardinal_AC begin
 
@@ -70,7 +70,7 @@
        ==> f: Vfrom(A,csucc(K))"
 by (blast intro: fun_Vcsucc [THEN subsetD])
 
-text{*Remove @{text "\<subseteq>"} from the rule above*}
+text\<open>Remove @{text "\<subseteq>"} from the rule above\<close>
 lemmas fun_in_Vcsucc' = fun_in_Vcsucc [OF _ _ _ subsetI]
 
 (** Version where K itself is the index set **)
--- a/src/ZF/IntDiv_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/IntDiv_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -27,7 +27,7 @@
                           else        negateSnd (posDivAlg (~a,~b));
 *)
 
-section{*The Division Operators Div and Mod*}
+section\<open>The Division Operators Div and Mod\<close>
 
 theory IntDiv_ZF
 imports Bin OrderArith
@@ -349,7 +349,7 @@
 by (simp add: zmult_commute [of k] zmult_cancel2)
 
 
-subsection{* Uniqueness and monotonicity of quotients and remainders *}
+subsection\<open>Uniqueness and monotonicity of quotients and remainders\<close>
 
 lemma unique_quotient_lemma:
      "[| b$*q' $+ r' $<= b$*q $+ r;  #0 $<= r';  #0 $< b;  r $< b |]
@@ -400,8 +400,8 @@
 done
 
 
-subsection{*Correctness of posDivAlg,
-           the Division Algorithm for @{text "a\<ge>0"} and @{text "b>0"} *}
+subsection\<open>Correctness of posDivAlg,
+           the Division Algorithm for @{text "a\<ge>0"} and @{text "b>0"}\<close>
 
 lemma adjust_eq [simp]:
      "adjust(b, <q,r>) = (let diff = r$-b in
@@ -466,7 +466,7 @@
   by (simp add: int_eq_iff_zle)
 
 
-subsection{* Some convenient biconditionals for products of signs *}
+subsection\<open>Some convenient biconditionals for products of signs\<close>
 
 lemma zmult_pos: "[| #0 $< i; #0 $< j |] ==> #0 $< i $* j"
   by (drule zmult_zless_mono1, auto)
@@ -546,23 +546,23 @@
 apply (rule_tac u = "a" and v = "b" in posDivAlg_induct)
 apply auto
    apply (simp_all add: quorem_def)
-   txt{*base case: a<b*}
+   txt\<open>base case: a<b\<close>
    apply (simp add: posDivAlg_eqn)
   apply (simp add: not_zless_iff_zle [THEN iff_sym])
  apply (simp add: int_0_less_mult_iff)
-txt{*main argument*}
+txt\<open>main argument\<close>
 apply (subst posDivAlg_eqn)
 apply (simp_all (no_asm_simp))
 apply (erule splitE)
 apply (rule posDivAlg_type)
 apply (simp_all add: int_0_less_mult_iff)
 apply (auto simp add: zadd_zmult_distrib2 Let_def)
-txt{*now just linear arithmetic*}
+txt\<open>now just linear arithmetic\<close>
 apply (simp add: not_zle_iff_zless zdiff_zless_iff)
 done
 
 
-subsection{*Correctness of negDivAlg, the division algorithm for a<0 and b>0*}
+subsection\<open>Correctness of negDivAlg, the division algorithm for a<0 and b>0\<close>
 
 lemma negDivAlg_termination:
      "[| #0 $< b; a $+ b $< #0 |]
@@ -642,23 +642,23 @@
 apply (rule_tac u = "a" and v = "b" in negDivAlg_induct)
   apply auto
    apply (simp_all add: quorem_def)
-   txt{*base case: @{term "0$<=a$+b"}*}
+   txt\<open>base case: @{term "0$<=a$+b"}\<close>
    apply (simp add: negDivAlg_eqn)
   apply (simp add: not_zless_iff_zle [THEN iff_sym])
  apply (simp add: int_0_less_mult_iff)
-txt{*main argument*}
+txt\<open>main argument\<close>
 apply (subst negDivAlg_eqn)
 apply (simp_all (no_asm_simp))
 apply (erule splitE)
 apply (rule negDivAlg_type)
 apply (simp_all add: int_0_less_mult_iff)
 apply (auto simp add: zadd_zmult_distrib2 Let_def)
-txt{*now just linear arithmetic*}
+txt\<open>now just linear arithmetic\<close>
 apply (simp add: not_zle_iff_zless zdiff_zless_iff)
 done
 
 
-subsection{* Existence shown by proving the division algorithm to be correct *}
+subsection\<open>Existence shown by proving the division algorithm to be correct\<close>
 
 (*the case a=0*)
 lemma quorem_0: "[|b \<noteq> #0;  b \<in> int|] ==> quorem (<#0,b>, <#0,#0>)"
@@ -702,7 +702,7 @@
       ==> quorem (<a,b>, negateSnd(qr))"
 apply clarify
 apply (auto elim: zless_asym simp add: quorem_def zless_zminus)
-txt{*linear arithmetic from here on*}
+txt\<open>linear arithmetic from here on\<close>
 apply (simp_all add: zminus_equation [of a] zminus_zless)
 apply (cut_tac [2] z = "b" and w = "#0" in zless_linear)
 apply (cut_tac [1] z = "b" and w = "#0" in zless_linear)
@@ -716,7 +716,7 @@
 apply (safe intro!: quorem_neg posDivAlg_correct negDivAlg_correct
                     posDivAlg_type negDivAlg_type)
 apply (auto simp add: quorem_def neq_iff_zless)
-txt{*linear arithmetic from here on*}
+txt\<open>linear arithmetic from here on\<close>
 apply (auto simp add: zle_def)
 done
 
@@ -945,7 +945,7 @@
 done
 
 
-subsection{* division of a number by itself *}
+subsection\<open>division of a number by itself\<close>
 
 lemma self_quotient_aux1: "[| #0 $< a; a = r $+ a$*q; r $< a |] ==> #1 $<= q"
 apply (subgoal_tac "#0 $< a$*q")
@@ -1011,7 +1011,7 @@
 done
 
 
-subsection{* Computation of division and remainder *}
+subsection\<open>Computation of division and remainder\<close>
 
 lemma zdiv_zero [simp]: "#0 zdiv b = #0"
   by (simp add: zdiv_def divAlg_def)
@@ -1152,7 +1152,7 @@
 declare zdiv_minus1_right [simp]
 
 
-subsection{* Monotonicity in the first argument (divisor) *}
+subsection\<open>Monotonicity in the first argument (divisor)\<close>
 
 lemma zdiv_mono1: "[| a $<= a';  #0 $< b |] ==> a zdiv b $<= a' zdiv b"
 apply (cut_tac a = "a" and b = "b" in zmod_zdiv_equality)
@@ -1173,7 +1173,7 @@
 done
 
 
-subsection{* Monotonicity in the second argument (dividend) *}
+subsection\<open>Monotonicity in the second argument (dividend)\<close>
 
 lemma q_pos_lemma:
      "[| #0 $<= b'$*q' $+ r'; r' $< b';  #0 $< b' |] ==> #0 $<= q'"
@@ -1286,7 +1286,7 @@
 
 
 
-subsection{* More algebraic laws for zdiv and zmod *}
+subsection\<open>More algebraic laws for zdiv and zmod\<close>
 
 (** proving (a*b) zdiv c = a $* (b zdiv c) $+ a * (b zmod c) **)
 
@@ -1456,7 +1456,7 @@
 done
 
 
-subsection{* proving  a zdiv (b*c) = (a zdiv b) zdiv c *}
+subsection\<open>proving  a zdiv (b*c) = (a zdiv b) zdiv c\<close>
 
 (*The condition c>0 seems necessary.  Consider that 7 zdiv ~6 = ~2 but
   7 zdiv 2 zdiv ~3 = 3 zdiv ~3 = ~1.  The subcase (a zdiv b) zmod c = 0 seems
@@ -1550,7 +1550,7 @@
 apply auto
 done
 
-subsection{* Cancellation of common factors in "zdiv" *}
+subsection\<open>Cancellation of common factors in "zdiv"\<close>
 
 lemma zdiv_zmult_zmult1_aux1:
      "[| #0 $< b;  intify(c) \<noteq> #0 |] ==> (c$*a) zdiv (c$*b) = a zdiv b"
@@ -1584,7 +1584,7 @@
 done
 
 
-subsection{* Distribution of factors over "zmod" *}
+subsection\<open>Distribution of factors over "zmod"\<close>
 
 lemma zmod_zmult_zmult1_aux1:
      "[| #0 $< b;  intify(c) \<noteq> #0 |]
--- a/src/ZF/Int_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Int_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section{*The Integers as Equivalence Classes Over Pairs of Natural Numbers*}
+section\<open>The Integers as Equivalence Classes Over Pairs of Natural Numbers\<close>
 
 theory Int_ZF imports EquivClass ArithSimp begin
 
@@ -17,11 +17,11 @@
     "int == (nat*nat)//intrel"
 
 definition
-  int_of :: "i=>i" --{*coercion from nat to int*}    ("$# _" [80] 80)  where
+  int_of :: "i=>i" --\<open>coercion from nat to int\<close>    ("$# _" [80] 80)  where
     "$# m == intrel `` {<natify(m), 0>}"
 
 definition
-  intify :: "i=>i" --{*coercion from ANYTHING to int*}  where
+  intify :: "i=>i" --\<open>coercion from ANYTHING to int\<close>  where
     "intify(m) == if m \<in> int then m else $#0"
 
 definition
@@ -50,7 +50,7 @@
 
 definition
   zmagnitude  ::      "i=>i"  where
-  --{*could be replaced by an absolute value function from int to int?*}
+  --\<open>could be replaced by an absolute value function from int to int?\<close>
     "zmagnitude(z) ==
      THE m. m\<in>nat & ((~ znegative(z) & z = $# m) |
                        (znegative(z) & $- z = $# m))"
@@ -93,7 +93,7 @@
 
 notation (xsymbols)
   zmult  (infixl "$\<times>" 70) and
-  zle  (infixl "$\<le>" 50)  --{*less than or equals*}
+  zle  (infixl "$\<le>" 50)  --\<open>less than or equals\<close>
 
 notation (HTML output)
   zmult  (infixl "$\<times>" 70) and
@@ -102,7 +102,7 @@
 
 declare quotientE [elim!]
 
-subsection{*Proving that @{term intrel} is an equivalence relation*}
+subsection\<open>Proving that @{term intrel} is an equivalence relation\<close>
 
 (** Natural deduction for intrel **)
 
@@ -164,8 +164,8 @@
 by (simp add: intify_def)
 
 
-subsection{*Collapsing rules: to remove @{term intify}
-            from arithmetic expressions*}
+subsection\<open>Collapsing rules: to remove @{term intify}
+            from arithmetic expressions\<close>
 
 lemma intify_idem [simp]: "intify(intify(x)) = intify(x)"
 by simp
@@ -215,7 +215,7 @@
 by (simp add: zle_def)
 
 
-subsection{*@{term zminus}: unary negation on @{term int}*}
+subsection\<open>@{term zminus}: unary negation on @{term int}\<close>
 
 lemma zminus_congruent: "(%<x,y>. intrel``{<y,x>}) respects intrel"
 by (auto simp add: congruent_def add_ac)
@@ -266,7 +266,7 @@
 by simp
 
 
-subsection{*@{term znegative}: the test for negative integers*}
+subsection\<open>@{term znegative}: the test for negative integers\<close>
 
 lemma znegative: "[| x\<in>nat; y\<in>nat |] ==> znegative(intrel``{<x,y>}) \<longleftrightarrow> x<y"
 apply (cases "x<y")
@@ -286,7 +286,7 @@
 by (simp add: znegative int_of_def zminus Ord_0_lt_iff [THEN iff_sym])
 
 
-subsection{*@{term nat_of}: Coercion of an Integer to a Natural Number*}
+subsection\<open>@{term nat_of}: Coercion of an Integer to a Natural Number\<close>
 
 lemma nat_of_intify [simp]: "nat_of(intify(z)) = nat_of(z)"
 by (simp add: nat_of_def)
@@ -310,7 +310,7 @@
 lemma nat_of_type [iff,TC]: "nat_of(z) \<in> nat"
 by (simp add: nat_of_def raw_nat_of_type)
 
-subsection{*zmagnitude: magnitide of an integer, as a natural number*}
+subsection\<open>zmagnitude: magnitide of an integer, as a natural number\<close>
 
 lemma zmagnitude_int_of [simp]: "zmagnitude($# n) = natify(n)"
 by (auto simp add: zmagnitude_def int_of_eq)
@@ -380,9 +380,9 @@
 done
 
 
-subsection{*@{term zadd}: addition on int*}
+subsection\<open>@{term zadd}: addition on int\<close>
 
-text{*Congruence Property for Addition*}
+text\<open>Congruence Property for Addition\<close>
 lemma zadd_congruent2:
     "(%z1 z2. let <x1,y1>=z1; <x2,y2>=z2
                             in intrel``{<x1#+x2, y1#+y2>})
@@ -494,9 +494,9 @@
 by simp
 
 
-subsection{*@{term zmult}: Integer Multiplication*}
+subsection\<open>@{term zmult}: Integer Multiplication\<close>
 
-text{*Congruence property for multiplication*}
+text\<open>Congruence property for multiplication\<close>
 lemma zmult_congruent2:
     "(%p1 p2. split(%x1 y1. split(%x2 y2.
                     intrel``{<x1#*x2 #+ y1#*y2, x1#*y2 #+ y1#*x2>}, p2), p1))
@@ -625,7 +625,7 @@
 by (simp add: zdiff_def zadd_ac)
 
 
-subsection{*The "Less Than" Relation*}
+subsection\<open>The "Less Than" Relation\<close>
 
 (*"Less than" is a linear ordering*)
 lemma zless_linear_lemma:
@@ -729,7 +729,7 @@
 done
 
 
-subsection{*Less Than or Equals*}
+subsection\<open>Less Than or Equals\<close>
 
 lemma zle_refl: "z $<= z"
 by (simp add: zle_def)
@@ -779,7 +779,7 @@
 by (simp add: not_zless_iff_zle [THEN iff_sym])
 
 
-subsection{*More subtraction laws (for @{text zcompare_rls})*}
+subsection\<open>More subtraction laws (for @{text zcompare_rls})\<close>
 
 lemma zdiff_zdiff_eq: "(x $- y) $- z = x $- (y $+ z)"
 by (simp add: zdiff_def zadd_ac)
@@ -815,9 +815,9 @@
 lemma zle_zdiff_iff: "(x $<= z$-y) \<longleftrightarrow> (x $+ y $<= z)"
 by (cut_tac zle_zdiff_iff_lemma [ OF intify_in_int intify_in_int], simp)
 
-text{*This list of rewrites simplifies (in)equalities by bringing subtractions
+text\<open>This list of rewrites simplifies (in)equalities by bringing subtractions
   to the top and then moving negative terms to the other side.
-  Use with @{text zadd_ac}*}
+  Use with @{text zadd_ac}\<close>
 lemmas zcompare_rls =
      zdiff_def [symmetric]
      zadd_zdiff_eq zdiff_zadd_eq zdiff_zdiff_eq zdiff_zdiff_eq2
@@ -825,8 +825,8 @@
      zdiff_eq_iff eq_zdiff_iff
 
 
-subsection{*Monotonicity and Cancellation Results for Instantiation
-     of the CancelNumerals Simprocs*}
+subsection\<open>Monotonicity and Cancellation Results for Instantiation
+     of the CancelNumerals Simprocs\<close>
 
 lemma zadd_left_cancel:
      "[| w \<in> int; w': int |] ==> (z $+ w' = z $+ w) \<longleftrightarrow> (w' = w)"
@@ -886,7 +886,7 @@
 by (erule zadd_zless_mono1 [THEN zless_zle_trans], simp)
 
 
-subsection{*Comparison laws*}
+subsection\<open>Comparison laws\<close>
 
 lemma zminus_zless_zminus [simp]: "($- x $< $- y) \<longleftrightarrow> (y $< x)"
 by (simp add: zless_def zdiff_def zadd_ac)
@@ -894,7 +894,7 @@
 lemma zminus_zle_zminus [simp]: "($- x $<= $- y) \<longleftrightarrow> (y $<= x)"
 by (simp add: not_zless_iff_zle [THEN iff_sym])
 
-subsubsection{*More inequality lemmas*}
+subsubsection\<open>More inequality lemmas\<close>
 
 lemma equation_zminus: "[| x \<in> int;  y \<in> int |] ==> (x = $- y) \<longleftrightarrow> (y = $- x)"
 by auto
@@ -913,7 +913,7 @@
 done
 
 
-subsubsection{*The next several equations are permutative: watch out!*}
+subsubsection\<open>The next several equations are permutative: watch out!\<close>
 
 lemma zless_zminus: "(x $< $- y) \<longleftrightarrow> (y $< $- x)"
 by (simp add: zless_def zdiff_def zadd_ac)
--- a/src/ZF/List_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/List_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Lists in Zermelo-Fraenkel Set Theory*}
+section\<open>Lists in Zermelo-Fraenkel Set Theory\<close>
 
 theory List_ZF imports Datatype_ZF ArithSimp begin
 
@@ -98,8 +98,8 @@
 
 definition
   nth :: "[i, i]=>i"  where
-  --{*returns the (n+1)th element of a list, or 0 if the
-   list is too short.*}
+  --\<open>returns the (n+1)th element of a list, or 0 if the
+   list is too short.\<close>
   "nth(n, as) == list_rec(\<lambda>n\<in>nat. 0,
                           %a l r. \<lambda>n\<in>nat. nat_case(a, %m. r`m, n), as) ` n"
 
@@ -610,7 +610,7 @@
  apply clarify
  apply (erule list.cases)
  apply simp_all
-txt{*Inductive step*}
+txt\<open>Inductive step\<close>
 apply clarify
 apply (erule_tac a=ys in list.cases, simp_all)
 done
@@ -838,9 +838,9 @@
 apply (erule natE, simp_all)
 done
 
-subsection{*The function zip*}
+subsection\<open>The function zip\<close>
 
-text{*Crafty definition to eliminate a type argument*}
+text\<open>Crafty definition to eliminate a type argument\<close>
 
 consts
   zip_aux        :: "[i,i]=>i"
@@ -1227,7 +1227,7 @@
 apply (simp_all add: sublist_Cons)
 done
 
-text{*Repetition of a List Element*}
+text\<open>Repetition of a List Element\<close>
 
 consts   repeat :: "[i,i]=>i"
 primrec
--- a/src/ZF/Main_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Main_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,11 +1,11 @@
-section{*Theory Main: Everything Except AC*}
+section\<open>Theory Main: Everything Except AC\<close>
 
 theory Main_ZF imports List_ZF IntDiv_ZF CardinalArith begin
 
 (*The theory of "iterates" logically belongs to Nat, but can't go there because
   primrec isn't available into after Datatype.*)
 
-subsection{* Iteration of the function @{term F} *}
+subsection\<open>Iteration of the function @{term F}\<close>
 
 consts  iterates :: "[i=>i,i,i] => i"   ("(_^_ '(_'))" [60,1000,1000] 60)
 
@@ -44,10 +44,10 @@
 by (induct_tac n, simp_all)
 
 
-subsection{* Transfinite Recursion *}
+subsection\<open>Transfinite Recursion\<close>
 
-text{*Transfinite recursion for definitions based on the
-    three cases of ordinals*}
+text\<open>Transfinite recursion for definitions based on the
+    three cases of ordinals\<close>
 
 definition
   transrec3 :: "[i, i, [i,i]=>i, [i,i]=>i] =>i" where
@@ -70,9 +70,9 @@
 by (rule transrec3_def [THEN def_transrec, THEN trans], force)
 
 
-declaration {* fn _ =>
+declaration \<open>fn _ =>
   Simplifier.map_ss (Simplifier.set_mksimps (fn ctxt =>
     map mk_eq o Ord_atomize o Drule.gen_all (Variable.maxidx_of ctxt)))
-*}
+\<close>
 
 end
--- a/src/ZF/Nat_ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Nat_ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*The Natural numbers As a Least Fixed Point*}
+section\<open>The Natural numbers As a Least Fixed Point\<close>
 
 theory Nat_ZF imports OrdQuant Bool begin
 
@@ -47,8 +47,8 @@
   greater_than :: "i=>i"  where
     "greater_than(n) == {i \<in> nat. n < i}"
 
-text{*No need for a less-than operator: a natural number is its list of
-predecessors!*}
+text\<open>No need for a less-than operator: a natural number is its list of
+predecessors!\<close>
 
 
 lemma nat_bnd_mono: "bnd_mono(Inf, %X. {0} \<union> {succ(i). i \<in> X})"
@@ -83,7 +83,7 @@
 lemmas bool_into_nat = bool_subset_nat [THEN subsetD]
 
 
-subsection{*Injectivity Properties and Induction*}
+subsection\<open>Injectivity Properties and Induction\<close>
 
 (*Mathematical induction*)
 lemma nat_induct [case_names 0 succ, induct set: nat]:
@@ -149,7 +149,7 @@
 by (blast dest!: lt_nat_in_nat)
 
 
-subsection{*Variations on Mathematical Induction*}
+subsection\<open>Variations on Mathematical Induction\<close>
 
 (*complete induction*)
 
@@ -209,9 +209,9 @@
      ==> P(m,n)"
 by (blast intro: succ_lt_induct_lemma lt_nat_in_nat)
 
-subsection{*quasinat: to allow a case-split rule for @{term nat_case}*}
+subsection\<open>quasinat: to allow a case-split rule for @{term nat_case}\<close>
 
-text{*True if the argument is zero or any successor*}
+text\<open>True if the argument is zero or any successor\<close>
 lemma [iff]: "quasinat(0)"
 by (simp add: quasinat_def)
 
@@ -255,7 +255,7 @@
 done
 
 
-subsection{*Recursion on the Natural Numbers*}
+subsection\<open>Recursion on the Natural Numbers\<close>
 
 (** nat_rec is used to define eclose and transrec, then becomes obsolete.
     The operator rec, from arith.thy, has fewer typing conditions **)
@@ -288,7 +288,7 @@
 lemma nat_nonempty [simp]: "nat \<noteq> 0"
 by blast
 
-text{*A natural number is the set of its predecessors*}
+text\<open>A natural number is the set of its predecessors\<close>
 lemma nat_eq_Collect_lt: "i \<in> nat ==> {j\<in>nat. j<i} = i"
 apply (rule equalityI)
 apply (blast dest: ltD)
--- a/src/ZF/OrdQuant.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/OrdQuant.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,11 +2,11 @@
     Authors:    Krzysztof Grabczewski and L C Paulson
 *)
 
-section {*Special quantifiers*}
+section \<open>Special quantifiers\<close>
 
 theory OrdQuant imports Ordinal begin
 
-subsection {*Quantifiers and union operator for ordinals*}
+subsection \<open>Quantifiers and union operator for ordinals\<close>
 
 definition
   (* Ordinal Quantifiers *)
@@ -42,7 +42,7 @@
   "_OUNION"   :: "[idt, i, i] => i"        ("(3\<Union>_<_./ _)" 10)
 
 
-subsubsection {*simplification of the new quantifiers*}
+subsubsection \<open>simplification of the new quantifiers\<close>
 
 
 (*MOST IMPORTANT that this is added to the simpset BEFORE Ord_atomize
@@ -64,7 +64,7 @@
 apply (blast intro: lt_Ord2)
 done
 
-subsubsection {*Union over ordinals*}
+subsubsection \<open>Union over ordinals\<close>
 
 lemma Ord_OUN [intro,simp]:
      "[| !!x. x<A ==> Ord(B(x)) |] ==> Ord(\<Union>x<A. B(x))"
@@ -112,7 +112,7 @@
      "(!!x. x<A ==> P(x)) == Trueprop (\<forall>x<A. P(x))"
 by (simp add: oall_def atomize_all atomize_imp)
 
-subsubsection {*universal quantifier for ordinals*}
+subsubsection \<open>universal quantifier for ordinals\<close>
 
 lemma oallI [intro!]:
     "[| !!x. x<A ==> P(x) |] ==> \<forall>x<A. P(x)"
@@ -141,7 +141,7 @@
 by (simp add: oall_def)
 
 
-subsubsection {*existential quantifier for ordinals*}
+subsubsection \<open>existential quantifier for ordinals\<close>
 
 lemma oexI [intro]:
     "[| P(x);  x<A |] ==> \<exists>x<A. P(x)"
@@ -166,7 +166,7 @@
 done
 
 
-subsubsection {*Rules for Ordinal-Indexed Unions*}
+subsubsection \<open>Rules for Ordinal-Indexed Unions\<close>
 
 lemma OUN_I [intro]: "[| a<i;  b \<in> B(a) |] ==> b: (\<Union>z<i. B(z))"
 by (unfold OUnion_def lt_def, blast)
@@ -191,7 +191,7 @@
 done
 
 
-subsection {*Quantification over a class*}
+subsection \<open>Quantification over a class\<close>
 
 definition
   "rall"     :: "[i=>o, i=>o] => o"  where
@@ -217,7 +217,7 @@
   "EX x[M]. P"   == "CONST rex(M, %x. P)"
 
 
-subsubsection{*Relativized universal quantifier*}
+subsubsection\<open>Relativized universal quantifier\<close>
 
 lemma rallI [intro!]: "[| !!x. M(x) ==> P(x) |] ==> \<forall>x[M]. P(x)"
 by (simp add: rall_def)
@@ -243,7 +243,7 @@
 by (simp add: rall_def)
 
 
-subsubsection{*Relativized existential quantifier*}
+subsubsection\<open>Relativized existential quantifier\<close>
 
 lemma rexI [intro]: "[| P(x); M(x) |] ==> \<exists>x[M]. P(x)"
 by (simp add: rex_def, blast)
@@ -317,7 +317,7 @@
 by blast
 
 
-subsubsection{*One-point rule for bounded quantifiers*}
+subsubsection\<open>One-point rule for bounded quantifiers\<close>
 
 lemma rex_triv_one_point1 [simp]: "(\<exists>x[M]. x=a) <-> ( M(a))"
 by blast
@@ -338,7 +338,7 @@
 by blast
 
 
-subsubsection{*Sets as Classes*}
+subsubsection\<open>Sets as Classes\<close>
 
 definition
   setclass :: "[i,i] => o"       ("##_" [40] 40)  where
@@ -355,30 +355,30 @@
 
 
 ML
-{*
+\<open>
 val Ord_atomize =
   atomize ([(@{const_name oall}, @{thms ospec}), (@{const_name rall}, @{thms rspec})] @
     ZF_conn_pairs, ZF_mem_pairs);
-*}
-declaration {* fn _ =>
+\<close>
+declaration \<open>fn _ =>
   Simplifier.map_ss (Simplifier.set_mksimps (fn ctxt =>
     map mk_eq o Ord_atomize o Drule.gen_all (Variable.maxidx_of ctxt)))
-*}
+\<close>
 
-text {* Setting up the one-point-rule simproc *}
+text \<open>Setting up the one-point-rule simproc\<close>
 
-simproc_setup defined_rex ("\<exists>x[M]. P(x) & Q(x)") = {*
+simproc_setup defined_rex ("\<exists>x[M]. P(x) & Q(x)") = \<open>
   fn _ => Quantifier1.rearrange_bex
     (fn ctxt =>
       unfold_tac ctxt @{thms rex_def} THEN
       Quantifier1.prove_one_point_ex_tac ctxt)
-*}
+\<close>
 
-simproc_setup defined_rall ("\<forall>x[M]. P(x) \<longrightarrow> Q(x)") = {*
+simproc_setup defined_rall ("\<forall>x[M]. P(x) \<longrightarrow> Q(x)") = \<open>
   fn _ => Quantifier1.rearrange_ball
     (fn ctxt =>
       unfold_tac ctxt @{thms rall_def} THEN
       Quantifier1.prove_one_point_all_tac ctxt)
-*}
+\<close>
 
 end
--- a/src/ZF/Order.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Order.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -7,13 +7,13 @@
 Additional definitions and lemmas for reflexive orders.
 *)
 
-section{*Partial and Total Orderings: Basic Definitions and Properties*}
+section\<open>Partial and Total Orderings: Basic Definitions and Properties\<close>
 
 theory Order imports WF Perm begin
 
-text {* We adopt the following convention: @{text ord} is used for
+text \<open>We adopt the following convention: @{text ord} is used for
   strict orders and @{text order} is used for their reflexive
-  counterparts. *}
+  counterparts.\<close>
 
 definition
   part_ord :: "[i,i]=>o"                (*Strict partial ordering*)  where
@@ -71,7 +71,7 @@
   ord_iso  ("(\<langle>_, _\<rangle> \<cong>/ \<langle>_, _\<rangle>)" 51)
 
 
-subsection{*Immediate Consequences of the Definitions*}
+subsection\<open>Immediate Consequences of the Definitions\<close>
 
 lemma part_ord_Imp_asym:
     "part_ord(A,r) ==> asym(r \<inter> A*A)"
@@ -131,7 +131,7 @@
 by (unfold trans_on_def pred_def, blast)
 
 
-subsection{*Restricting an Ordering's Domain*}
+subsection\<open>Restricting an Ordering's Domain\<close>
 
 (** The ordering's properties hold over all subsets of its domain
     [including initial segments of the form pred(A,x,r) **)
@@ -189,13 +189,13 @@
 done
 
 
-subsection{*Empty and Unit Domains*}
+subsection\<open>Empty and Unit Domains\<close>
 
 (*The empty relation is well-founded*)
 lemma wf_on_any_0: "wf[A](0)"
 by (simp add: wf_on_def wf_def, fast)
 
-subsubsection{*Relations over the Empty Set*}
+subsubsection\<open>Relations over the Empty Set\<close>
 
 lemma irrefl_0: "irrefl(0,r)"
 by (unfold irrefl_def, blast)
@@ -225,9 +225,9 @@
 done
 
 
-subsubsection{*The Empty Relation Well-Orders the Unit Set*}
+subsubsection\<open>The Empty Relation Well-Orders the Unit Set\<close>
 
-text{*by Grabczewski*}
+text\<open>by Grabczewski\<close>
 
 lemma tot_ord_unit: "tot_ord({a},0)"
 by (simp add: irrefl_def trans_on_def part_ord_def linear_def tot_ord_def)
@@ -238,9 +238,9 @@
 done
 
 
-subsection{*Order-Isomorphisms*}
+subsection\<open>Order-Isomorphisms\<close>
 
-text{*Suppes calls them "similarities"*}
+text\<open>Suppes calls them "similarities"\<close>
 
 (** Order-preserving (monotone) maps **)
 
@@ -372,7 +372,7 @@
 done
 
 
-subsection{*Main results of Kunen, Chapter 1 section 6*}
+subsection\<open>Main results of Kunen, Chapter 1 section 6\<close>
 
 (*Inductive argument for Kunen's Lemma 6.1, etc.
   Simple proof from Halmos, page 72*)
@@ -486,7 +486,7 @@
                     well_ord_is_linear well_ord_ord_iso ord_iso_sym)
 done
 
-subsection{*Towards Kunen's Theorem 6.3: Linearity of the Similarity Relation*}
+subsection\<open>Towards Kunen's Theorem 6.3: Linearity of the Similarity Relation\<close>
 
 lemma ord_iso_map_subset: "ord_iso_map(A,r,B,s) \<subseteq> A*B"
 by (unfold ord_iso_map_def, blast)
@@ -594,7 +594,7 @@
 apply (simp add: domain_ord_iso_map_cases)
 done
 
-text{*Kunen's Theorem 6.3: Fundamental Theorem for Well-Ordered Sets*}
+text\<open>Kunen's Theorem 6.3: Fundamental Theorem for Well-Ordered Sets\<close>
 theorem well_ord_trichotomy:
    "[| well_ord(A,r);  well_ord(B,s) |]
     ==> ord_iso_map(A,r,B,s) \<in> ord_iso(A, r, B, s) |
@@ -615,7 +615,7 @@
 done
 
 
-subsection{*Miscellaneous Results by Krzysztof Grabczewski*}
+subsection\<open>Miscellaneous Results by Krzysztof Grabczewski\<close>
 
 (** Properties of converse(r) **)
 
@@ -662,7 +662,7 @@
 done
 
 
-subsection {* Lemmas for the Reflexive Orders *}
+subsection \<open>Lemmas for the Reflexive Orders\<close>
 
 lemma subset_vimage_vimage_iff:
   "[| Preorder(r); A \<subseteq> field(r); B \<subseteq> field(r) |] ==>
--- a/src/ZF/OrderArith.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/OrderArith.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Combining Orderings: Foundations of Ordinal Arithmetic*}
+section\<open>Combining Orderings: Foundations of Ordinal Arithmetic\<close>
 
 theory OrderArith imports Order Sum Ordinal begin
 
@@ -34,9 +34,9 @@
     "measure(A,f) == {<x,y>: A*A. f(x) < f(y)}"
 
 
-subsection{*Addition of Relations -- Disjoint Sum*}
+subsection\<open>Addition of Relations -- Disjoint Sum\<close>
 
-subsubsection{*Rewrite rules.  Can be used to obtain introduction rules*}
+subsubsection\<open>Rewrite rules.  Can be used to obtain introduction rules\<close>
 
 lemma radd_Inl_Inr_iff [iff]:
     "<Inl(a), Inr(b)> \<in> radd(A,r,B,s)  \<longleftrightarrow>  a \<in> A & b \<in> B"
@@ -56,7 +56,7 @@
 
 declare radd_Inr_Inl_iff [THEN iffD1, dest!]
 
-subsubsection{*Elimination Rule*}
+subsubsection\<open>Elimination Rule\<close>
 
 lemma raddE:
     "[| <p',p> \<in> radd(A,r,B,s);
@@ -66,7 +66,7 @@
      |] ==> Q"
 by (unfold radd_def, blast)
 
-subsubsection{*Type checking*}
+subsubsection\<open>Type checking\<close>
 
 lemma radd_type: "radd(A,r,B,s) \<subseteq> (A+B) * (A+B)"
 apply (unfold radd_def)
@@ -75,25 +75,25 @@
 
 lemmas field_radd = radd_type [THEN field_rel_subset]
 
-subsubsection{*Linearity*}
+subsubsection\<open>Linearity\<close>
 
 lemma linear_radd:
     "[| linear(A,r);  linear(B,s) |] ==> linear(A+B,radd(A,r,B,s))"
 by (unfold linear_def, blast)
 
 
-subsubsection{*Well-foundedness*}
+subsubsection\<open>Well-foundedness\<close>
 
 lemma wf_on_radd: "[| wf[A](r);  wf[B](s) |] ==> wf[A+B](radd(A,r,B,s))"
 apply (rule wf_onI2)
 apply (subgoal_tac "\<forall>x\<in>A. Inl (x) \<in> Ba")
- --{*Proving the lemma, which is needed twice!*}
+ --\<open>Proving the lemma, which is needed twice!\<close>
  prefer 2
  apply (erule_tac V = "y \<in> A + B" in thin_rl)
  apply (rule_tac ballI)
  apply (erule_tac r = r and a = x in wf_on_induct, assumption)
  apply blast
-txt{*Returning to main part of proof*}
+txt\<open>Returning to main part of proof\<close>
 apply safe
 apply blast
 apply (erule_tac r = s and a = ya in wf_on_induct, assumption, blast)
@@ -112,7 +112,7 @@
 apply (simp add: well_ord_def tot_ord_def linear_radd)
 done
 
-subsubsection{*An @{term ord_iso} congruence law*}
+subsubsection\<open>An @{term ord_iso} congruence law\<close>
 
 lemma sum_bij:
      "[| f \<in> bij(A,C);  g \<in> bij(B,D) |]
@@ -141,7 +141,7 @@
 apply auto
 done
 
-subsubsection{*Associativity*}
+subsubsection\<open>Associativity\<close>
 
 lemma sum_assoc_bij:
      "(\<lambda>z\<in>(A+B)+C. case(case(Inl, %y. Inr(Inl(y))), %y. Inr(Inr(y)), z))
@@ -158,9 +158,9 @@
 by (rule sum_assoc_bij [THEN ord_isoI], auto)
 
 
-subsection{*Multiplication of Relations -- Lexicographic Product*}
+subsection\<open>Multiplication of Relations -- Lexicographic Product\<close>
 
-subsubsection{*Rewrite rule.  Can be used to obtain introduction rules*}
+subsubsection\<open>Rewrite rule.  Can be used to obtain introduction rules\<close>
 
 lemma  rmult_iff [iff]:
     "<<a',b'>, <a,b>> \<in> rmult(A,r,B,s) \<longleftrightarrow>
@@ -176,20 +176,20 @@
      |] ==> Q"
 by blast
 
-subsubsection{*Type checking*}
+subsubsection\<open>Type checking\<close>
 
 lemma rmult_type: "rmult(A,r,B,s) \<subseteq> (A*B) * (A*B)"
 by (unfold rmult_def, rule Collect_subset)
 
 lemmas field_rmult = rmult_type [THEN field_rel_subset]
 
-subsubsection{*Linearity*}
+subsubsection\<open>Linearity\<close>
 
 lemma linear_rmult:
     "[| linear(A,r);  linear(B,s) |] ==> linear(A*B,rmult(A,r,B,s))"
 by (simp add: linear_def, blast)
 
-subsubsection{*Well-foundedness*}
+subsubsection\<open>Well-foundedness\<close>
 
 lemma wf_on_rmult: "[| wf[A](r);  wf[B](s) |] ==> wf[A*B](rmult(A,r,B,s))"
 apply (rule wf_onI2)
@@ -217,7 +217,7 @@
 done
 
 
-subsubsection{*An @{term ord_iso} congruence law*}
+subsubsection\<open>An @{term ord_iso} congruence law\<close>
 
 lemma prod_bij:
      "[| f \<in> bij(A,C);  g \<in> bij(B,D) |]
@@ -277,7 +277,7 @@
 apply (auto elim!: well_ord_is_wf [THEN wf_on_asym] predE)
 done
 
-subsubsection{*Distributive law*}
+subsubsection\<open>Distributive law\<close>
 
 lemma sum_prod_distrib_bij:
      "(lam <x,z>:(A+B)*C. case(%y. Inl(<y,z>), %y. Inr(<y,z>), x))
@@ -291,7 +291,7 @@
             (A*C)+(B*C), radd(A*C, rmult(A,r,C,t), B*C, rmult(B,s,C,t)))"
 by (rule sum_prod_distrib_bij [THEN ord_isoI], auto)
 
-subsubsection{*Associativity*}
+subsubsection\<open>Associativity\<close>
 
 lemma prod_assoc_bij:
      "(lam <<x,y>, z>:(A*B)*C. <x,<y,z>>) \<in> bij((A*B)*C, A*(B*C))"
@@ -303,14 +303,14 @@
             A*(B*C), rmult(A, r, B*C, rmult(B,s,C,t)))"
 by (rule prod_assoc_bij [THEN ord_isoI], auto)
 
-subsection{*Inverse Image of a Relation*}
+subsection\<open>Inverse Image of a Relation\<close>
 
-subsubsection{*Rewrite rule*}
+subsubsection\<open>Rewrite rule\<close>
 
 lemma rvimage_iff: "<a,b> \<in> rvimage(A,f,r)  \<longleftrightarrow>  <f`a,f`b>: r & a \<in> A & b \<in> A"
 by (unfold rvimage_def, blast)
 
-subsubsection{*Type checking*}
+subsubsection\<open>Type checking\<close>
 
 lemma rvimage_type: "rvimage(A,f,r) \<subseteq> A*A"
 by (unfold rvimage_def, rule Collect_subset)
@@ -321,7 +321,7 @@
 by (unfold rvimage_def, blast)
 
 
-subsubsection{*Partial Ordering Properties*}
+subsubsection\<open>Partial Ordering Properties\<close>
 
 lemma irrefl_rvimage:
     "[| f \<in> inj(A,B);  irrefl(B,r) |] ==> irrefl(A, rvimage(A,f,r))"
@@ -341,7 +341,7 @@
 apply (blast intro!: irrefl_rvimage trans_on_rvimage)
 done
 
-subsubsection{*Linearity*}
+subsubsection\<open>Linearity\<close>
 
 lemma linear_rvimage:
     "[| f \<in> inj(A,B);  linear(B,r) |] ==> linear(A,rvimage(A,f,r))"
@@ -356,7 +356,7 @@
 done
 
 
-subsubsection{*Well-foundedness*}
+subsubsection\<open>Well-foundedness\<close>
 
 lemma wf_rvimage [intro!]: "wf(r) ==> wf(rvimage(A,f,r))"
 apply (simp (no_asm_use) add: rvimage_def wf_eq_minimal)
@@ -369,8 +369,8 @@
 apply blast
 done
 
-text{*But note that the combination of @{text wf_imp_wf_on} and
- @{text wf_rvimage} gives @{prop "wf(r) ==> wf[C](rvimage(A,f,r))"}*}
+text\<open>But note that the combination of @{text wf_imp_wf_on} and
+ @{text wf_rvimage} gives @{prop "wf(r) ==> wf[C](rvimage(A,f,r))"}\<close>
 lemma wf_on_rvimage: "[| f \<in> A->B;  wf[B](r) |] ==> wf[A](rvimage(A,f,r))"
 apply (rule wf_onI2)
 apply (subgoal_tac "\<forall>z\<in>A. f`z=f`y \<longrightarrow> z \<in> Ba")
@@ -400,8 +400,8 @@
 by (unfold ord_iso_def rvimage_def, blast)
 
 
-subsection{*Every well-founded relation is a subset of some inverse image of
-      an ordinal*}
+subsection\<open>Every well-founded relation is a subset of some inverse image of
+      an ordinal\<close>
 
 lemma wf_rvimage_Ord: "Ord(i) \<Longrightarrow> wf(rvimage(A, f, Memrel(i)))"
 by (blast intro: wf_rvimage wf_Memrel)
@@ -455,12 +455,12 @@
           intro: wf_rvimage_Ord [THEN wf_subset])
 
 
-subsection{*Other Results*}
+subsection\<open>Other Results\<close>
 
 lemma wf_times: "A \<inter> B = 0 ==> wf(A*B)"
 by (simp add: wf_def, blast)
 
-text{*Could also be used to prove @{text wf_radd}*}
+text\<open>Could also be used to prove @{text wf_radd}\<close>
 lemma wf_Un:
      "[| range(r) \<inter> domain(s) = 0; wf(r);  wf(s) |] ==> wf(r \<union> s)"
 apply (simp add: wf_def, clarify)
@@ -473,7 +473,7 @@
 apply (blast intro: elim: equalityE)
 done
 
-subsubsection{*The Empty Relation*}
+subsubsection\<open>The Empty Relation\<close>
 
 lemma wf0: "wf(0)"
 by (simp add: wf_def, blast)
@@ -484,7 +484,7 @@
 lemma well_ord0: "well_ord(0,0)"
 by (blast intro: wf_imp_wf_on well_ordI wf0 linear0)
 
-subsubsection{*The "measure" relation is useful with wfrec*}
+subsubsection\<open>The "measure" relation is useful with wfrec\<close>
 
 lemma measure_eq_rvimage_Memrel:
      "measure(A,f) = rvimage(A,Lambda(A,f),Memrel(Collect(RepFun(A,f),Ord)))"
@@ -524,7 +524,7 @@
 lemma measure_type: "measure(A,f) \<subseteq> A*A"
 by (auto simp add: measure_def)
 
-subsubsection{*Well-foundedness of Unions*}
+subsubsection\<open>Well-foundedness of Unions\<close>
 
 lemma wf_on_Union:
  assumes wfA: "wf[A](r)"
@@ -544,7 +544,7 @@
 apply (frule ok, assumption+, blast)
 done
 
-subsubsection{*Bijections involving Powersets*}
+subsubsection\<open>Bijections involving Powersets\<close>
 
 lemma Pow_sum_bij:
     "(\<lambda>Z \<in> Pow(A+B). <{x \<in> A. Inl(x) \<in> Z}, {y \<in> B. Inr(y) \<in> Z}>)
@@ -554,7 +554,7 @@
 apply force+
 done
 
-text{*As a special case, we have @{term "bij(Pow(A*B), A -> Pow(B))"} *}
+text\<open>As a special case, we have @{term "bij(Pow(A*B), A -> Pow(B))"}\<close>
 lemma Pow_Sigma_bij:
     "(\<lambda>r \<in> Pow(Sigma(A,B)). \<lambda>x \<in> A. r``{x})
      \<in> bij(Pow(Sigma(A,B)), \<Pi> x \<in> A. Pow(B(x)))"
--- a/src/ZF/OrderType.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/OrderType.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,13 +3,13 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Order Types and Ordinal Arithmetic*}
+section\<open>Order Types and Ordinal Arithmetic\<close>
 
 theory OrderType imports OrderArith OrdQuant Nat_ZF begin
 
-text{*The order type of a well-ordering is the least ordinal isomorphic to it.
+text\<open>The order type of a well-ordering is the least ordinal isomorphic to it.
 Ordinal arithmetic is traditionally defined in terms of order types, as it is
-here.  But a definition by transfinite recursion would be much simpler!*}
+here.  But a definition by transfinite recursion would be much simpler!\<close>
 
 definition
   ordermap  :: "[i,i]=>i"  where
@@ -56,7 +56,7 @@
   omult  (infixl "\<times>\<times>" 70)
 
 
-subsection{*Proofs needing the combination of Ordinal.thy and Order.thy*}
+subsection\<open>Proofs needing the combination of Ordinal.thy and Order.thy\<close>
 
 lemma le_well_ord_Memrel: "j \<le> i ==> well_ord(j, Memrel(i))"
 apply (rule well_ordI)
@@ -102,7 +102,7 @@
 done
 
 
-subsection{*Ordermap and ordertype*}
+subsection\<open>Ordermap and ordertype\<close>
 
 lemma ordermap_type:
     "ordermap(A,r) \<in> A -> ordertype(A,r)"
@@ -111,7 +111,7 @@
 apply (rule lamI [THEN imageI], assumption+)
 done
 
-subsubsection{*Unfolding of ordermap *}
+subsubsection\<open>Unfolding of ordermap\<close>
 
 (*Useful for cardinality reasoning; see CardinalArith.ML*)
 lemma ordermap_eq_image:
@@ -148,7 +148,7 @@
 *)
 
 
-subsubsection{*Showing that ordermap, ordertype yield ordinals *}
+subsubsection\<open>Showing that ordermap, ordertype yield ordinals\<close>
 
 lemma Ord_ordermap:
     "[| well_ord(A,r);  x \<in> A |] ==> Ord(ordermap(A,r) ` x)"
@@ -173,7 +173,7 @@
 done
 
 
-subsubsection{*ordermap preserves the orderings in both directions *}
+subsubsection\<open>ordermap preserves the orderings in both directions\<close>
 
 lemma ordermap_mono:
      "[| <w,x>: r;  wf[A](r);  w \<in> A; x \<in> A |]
@@ -202,7 +202,7 @@
              simp add: mem_not_refl)
 done
 
-subsubsection{*Isomorphisms involving ordertype *}
+subsubsection\<open>Isomorphisms involving ordertype\<close>
 
 lemma ordertype_ord_iso:
  "well_ord(A,r)
@@ -230,7 +230,7 @@
 apply (erule ordertype_ord_iso [THEN ord_iso_sym])
 done
 
-subsubsection{*Basic equalities for ordertype *}
+subsubsection\<open>Basic equalities for ordertype\<close>
 
 (*Ordertype of Memrel*)
 lemma le_ordertype_Memrel: "j \<le> i ==> ordertype(j,Memrel(i)) = j"
@@ -258,7 +258,7 @@
                          ordertype(A, rvimage(A,f,s)) = ordertype(B,s) *)
 lemmas bij_ordertype_vimage = ord_iso_rvimage [THEN ordertype_eq]
 
-subsubsection{*A fundamental unfolding law for ordertype. *}
+subsubsection\<open>A fundamental unfolding law for ordertype.\<close>
 
 (*Ordermap returns the same result if applied to an initial segment*)
 lemma ordermap_pred_eq_ordermap:
@@ -282,7 +282,7 @@
 apply (rule image_fun [OF ordermap_type subset_refl])
 done
 
-text{*Theorems by Krzysztof Grabczewski; proofs simplified by lcp *}
+text\<open>Theorems by Krzysztof Grabczewski; proofs simplified by lcp\<close>
 
 lemma ordertype_pred_subset: "[| well_ord(A,r);  x \<in> A |] ==>
           ordertype(pred(A,x,r),r) \<subseteq> ordertype(A,r)"
@@ -313,7 +313,7 @@
 done
 
 
-subsection{*Alternative definition of ordinal*}
+subsection\<open>Alternative definition of ordinal\<close>
 
 (*proof by Krzysztof Grabczewski*)
 lemma Ord_is_Ord_alt: "Ord(i) ==> Ord_alt(i)"
@@ -333,11 +333,11 @@
 done
 
 
-subsection{*Ordinal Addition*}
+subsection\<open>Ordinal Addition\<close>
 
-subsubsection{*Order Type calculations for radd *}
+subsubsection\<open>Order Type calculations for radd\<close>
 
-text{*Addition with 0 *}
+text\<open>Addition with 0\<close>
 
 lemma bij_sum_0: "(\<lambda>z\<in>A+0. case(%x. x, %y. y, z)) \<in> bij(A+0, A)"
 apply (rule_tac d = Inl in lam_bijective, safe)
@@ -363,7 +363,7 @@
 apply force
 done
 
-text{*Initial segments of radd.  Statements by Grabczewski *}
+text\<open>Initial segments of radd.  Statements by Grabczewski\<close>
 
 (*In fact, pred(A+B, Inl(a), radd(A,r,B,s)) = pred(A,a,r)+0 *)
 lemma pred_Inl_bij:
@@ -401,7 +401,7 @@
 done
 
 
-subsubsection{*ordify: trivial coercion to an ordinal *}
+subsubsection\<open>ordify: trivial coercion to an ordinal\<close>
 
 lemma Ord_ordify [iff, TC]: "Ord(ordify(x))"
 by (simp add: ordify_def)
@@ -411,7 +411,7 @@
 by (simp add: ordify_def)
 
 
-subsubsection{*Basic laws for ordinal addition *}
+subsubsection\<open>Basic laws for ordinal addition\<close>
 
 lemma Ord_raw_oadd: "[|Ord(i); Ord(j)|] ==> Ord(raw_oadd(i,j))"
 by (simp add: raw_oadd_def ordify_def Ord_ordertype well_ord_radd
@@ -421,7 +421,7 @@
 by (simp add: oadd_def Ord_raw_oadd)
 
 
-text{*Ordinal addition with zero *}
+text\<open>Ordinal addition with zero\<close>
 
 lemma raw_oadd_0: "Ord(i) ==> raw_oadd(i,0) = i"
 by (simp add: raw_oadd_def ordify_def ordertype_sum_0_eq
@@ -467,7 +467,7 @@
 apply (auto simp add: Ord_oadd lt_oadd1)
 done
 
-text{*Various other results *}
+text\<open>Various other results\<close>
 
 lemma id_ord_iso_Memrel: "A<=B ==> id(A) \<in> ord_iso(A, Memrel(A), A, Memrel(B))"
 apply (rule id_bij [THEN ord_isoI])
@@ -555,7 +555,7 @@
 done
 
 
-subsubsection{*Ordinal addition with successor -- via associativity! *}
+subsubsection\<open>Ordinal addition with successor -- via associativity!\<close>
 
 lemma oadd_assoc: "(i++j)++k = i++(j++k)"
 apply (simp add: oadd_eq_if_raw_oadd Ord_raw_oadd raw_oadd_0 raw_oadd_0_left, clarify)
@@ -591,7 +591,7 @@
 done
 
 
-text{*Ordinal addition with limit ordinals *}
+text\<open>Ordinal addition with limit ordinals\<close>
 
 lemma oadd_UN:
      "[| !!x. x \<in> A ==> Ord(j(x));  a \<in> A |]
@@ -628,7 +628,7 @@
 apply (simp add: Limit_def lt_def)
 done
 
-text{*Order/monotonicity properties of ordinal addition *}
+text\<open>Order/monotonicity properties of ordinal addition\<close>
 
 lemma oadd_le_self2: "Ord(i) ==> i \<le> j++i"
 proof (induct i rule: trans_induct3)
@@ -673,7 +673,7 @@
 apply (blast intro: succ_leI oadd_le_mono)
 done
 
-text{*Every ordinal is exceeded by some limit ordinal.*}
+text\<open>Every ordinal is exceeded by some limit ordinal.\<close>
 lemma Ord_imp_greater_Limit: "Ord(i) ==> \<exists>k. i<k & Limit(k)"
 apply (rule_tac x="i ++ nat" in exI)
 apply (blast intro: oadd_LimitI  oadd_lt_self  Limit_nat [THEN Limit_has_0])
@@ -685,10 +685,10 @@
 done
 
 
-subsection{*Ordinal Subtraction*}
+subsection\<open>Ordinal Subtraction\<close>
 
-text{*The difference is @{term "ordertype(j-i, Memrel(j))"}.
-    It's probably simpler to define the difference recursively!*}
+text\<open>The difference is @{term "ordertype(j-i, Memrel(j))"}.
+    It's probably simpler to define the difference recursively!\<close>
 
 lemma bij_sum_Diff:
      "A<=B ==> (\<lambda>y\<in>B. if(y \<in> A, Inl(y), Inr(y))) \<in> bij(B, A+(B-A))"
@@ -752,7 +752,7 @@
 done
 
 
-subsection{*Ordinal Multiplication*}
+subsection\<open>Ordinal Multiplication\<close>
 
 lemma Ord_omult [simp,TC]:
     "[| Ord(i);  Ord(j) |] ==> Ord(i**j)"
@@ -760,7 +760,7 @@
 apply (blast intro: Ord_ordertype well_ord_rmult well_ord_Memrel)
 done
 
-subsubsection{*A useful unfolding law *}
+subsubsection\<open>A useful unfolding law\<close>
 
 lemma pred_Pair_eq:
  "[| a \<in> A;  b \<in> B |] ==> pred(A*B, <a,b>, rmult(A,r,B,s)) =
@@ -835,9 +835,9 @@
 apply (blast intro: omult_oadd_lt [THEN ltD] ltI)
 done
 
-subsubsection{*Basic laws for ordinal multiplication *}
+subsubsection\<open>Basic laws for ordinal multiplication\<close>
 
-text{*Ordinal multiplication by zero *}
+text\<open>Ordinal multiplication by zero\<close>
 
 lemma omult_0 [simp]: "i**0 = 0"
 apply (unfold omult_def)
@@ -849,7 +849,7 @@
 apply (simp (no_asm_simp))
 done
 
-text{*Ordinal multiplication by 1 *}
+text\<open>Ordinal multiplication by 1\<close>
 
 lemma omult_1 [simp]: "Ord(i) ==> i**1 = i"
 apply (unfold omult_def)
@@ -867,7 +867,7 @@
 apply (auto elim!: fst_type well_ord_Memrel ordertype_Memrel)
 done
 
-text{*Distributive law for ordinal multiplication and addition *}
+text\<open>Distributive law for ordinal multiplication and addition\<close>
 
 lemma oadd_omult_distrib:
      "[| Ord(i);  Ord(j);  Ord(k) |] ==> i**(j++k) = (i**j)++(i**k)"
@@ -888,7 +888,7 @@
 lemma omult_succ: "[| Ord(i);  Ord(j) |] ==> i**succ(j) = (i**j)++i"
 by (simp del: oadd_succ add: oadd_1 [of j, symmetric] oadd_omult_distrib)
 
-text{*Associative law *}
+text\<open>Associative law\<close>
 
 lemma omult_assoc:
     "[| Ord(i);  Ord(j);  Ord(k) |] ==> (i**j)**k = i**(j**k)"
@@ -905,7 +905,7 @@
 done
 
 
-text{*Ordinal multiplication with limit ordinals *}
+text\<open>Ordinal multiplication with limit ordinals\<close>
 
 lemma omult_UN:
      "[| Ord(i);  !!x. x \<in> A ==> Ord(j(x)) |]
@@ -917,7 +917,7 @@
               Union_eq_UN [symmetric] Limit_Union_eq)
 
 
-subsubsection{*Ordering/monotonicity properties of ordinal multiplication *}
+subsubsection\<open>Ordering/monotonicity properties of ordinal multiplication\<close>
 
 (*As a special case we have "[| 0<i;  0<j |] ==> 0 < i**j" *)
 lemma lt_omult1: "[| k<i;  0<j |] ==> k < i**j"
@@ -992,7 +992,7 @@
 qed
 
 
-text{*Further properties of ordinal multiplication *}
+text\<open>Further properties of ordinal multiplication\<close>
 
 lemma omult_inject: "[| i**j = i**k;  0<i;  Ord(j);  Ord(k) |] ==> j=k"
 apply (rule Ord_linear_lt)
@@ -1001,7 +1001,7 @@
 apply (force dest: omult_lt_mono2 simp add: lt_not_refl)+
 done
 
-subsection{*The Relation @{term Lt}*}
+subsection\<open>The Relation @{term Lt}\<close>
 
 lemma wf_Lt: "wf(Lt)"
 apply (rule wf_subset)
--- a/src/ZF/Ordinal.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Ordinal.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Transitive Sets and Ordinals*}
+section\<open>Transitive Sets and Ordinals\<close>
 
 theory Ordinal imports WF Bool equalities begin
 
@@ -38,9 +38,9 @@
   le  (infixl "\<le>" 50)
 
 
-subsection{*Rules for Transset*}
+subsection\<open>Rules for Transset\<close>
 
-subsubsection{*Three Neat Characterisations of Transset*}
+subsubsection\<open>Three Neat Characterisations of Transset\<close>
 
 lemma Transset_iff_Pow: "Transset(A) <-> A<=Pow(A)"
 by (unfold Transset_def, blast)
@@ -53,7 +53,7 @@
 lemma Transset_iff_Union_subset: "Transset(A) <-> \<Union>(A) \<subseteq> A"
 by (unfold Transset_def, blast)
 
-subsubsection{*Consequences of Downwards Closure*}
+subsubsection\<open>Consequences of Downwards Closure\<close>
 
 lemma Transset_doubleton_D:
     "[| Transset(C); {a,b}: C |] ==> a\<in>C & b\<in>C"
@@ -73,7 +73,7 @@
     "[| Transset(C); A*B \<subseteq> C; a \<in> A |] ==> B \<subseteq> C"
 by (blast dest: Transset_Pair_D)
 
-subsubsection{*Closure Properties*}
+subsubsection\<open>Closure Properties\<close>
 
 lemma Transset_0: "Transset(0)"
 by (unfold Transset_def, blast)
@@ -112,7 +112,7 @@
 by (rule Transset_Inter_family, auto)
 
 
-subsection{*Lemmas for Ordinals*}
+subsection\<open>Lemmas for Ordinals\<close>
 
 lemma OrdI:
     "[| Transset(i);  !!x. x\<in>i ==> Transset(x) |]  ==>  Ord(i)"
@@ -149,7 +149,7 @@
 by (blast dest: OrdmemD)
 
 
-subsection{*The Construction of Ordinals: 0, succ, Union*}
+subsection\<open>The Construction of Ordinals: 0, succ, Union\<close>
 
 lemma Ord_0 [iff,TC]: "Ord(0)"
 by (blast intro: OrdI Transset_0)
@@ -172,7 +172,7 @@
 apply (blast intro!: Transset_Int)
 done
 
-text{*There is no set of all ordinals, for then it would contain itself*}
+text\<open>There is no set of all ordinals, for then it would contain itself\<close>
 lemma ON_class: "~ (\<forall>i. i\<in>X <-> Ord(i))"
 proof (rule notI)
   assume X: "\<forall>i. i \<in> X \<longleftrightarrow> Ord(i)"
@@ -187,7 +187,7 @@
   thus "False" by (rule mem_irrefl)
 qed
 
-subsection{*< is 'less Than' for Ordinals*}
+subsection\<open>< is 'less Than' for Ordinals\<close>
 
 lemma ltI: "[| i\<in>j;  Ord(j) |] ==> i<j"
 by (unfold lt_def, blast)
@@ -236,7 +236,7 @@
 done
 
 
-text{* Recall that  @{term"i \<le> j"}  abbreviates  @{term"i<succ(j)"} !! *}
+text\<open>Recall that  @{term"i \<le> j"}  abbreviates  @{term"i<succ(j)"} !!\<close>
 
 lemma le_iff: "i \<le> j <-> i<j | (i=j & Ord(j))"
 by (unfold lt_def, blast)
@@ -270,7 +270,7 @@
 
 lemmas le0D = le0_iff [THEN iffD1, dest!]
 
-subsection{*Natural Deduction Rules for Memrel*}
+subsection\<open>Natural Deduction Rules for Memrel\<close>
 
 (*The lemmas MemrelI/E give better speed than [iff] here*)
 lemma Memrel_iff [simp]: "<a,b> \<in> Memrel(A) <-> a\<in>b & a\<in>A & b\<in>A"
@@ -307,12 +307,12 @@
 apply (rule foundation [THEN disjE, THEN allI], erule disjI1, blast)
 done
 
-text{*The premise @{term "Ord(i)"} does not suffice.*}
+text\<open>The premise @{term "Ord(i)"} does not suffice.\<close>
 lemma trans_Memrel:
     "Ord(i) ==> trans(Memrel(i))"
 by (unfold Ord_def Transset_def trans_def, blast)
 
-text{*However, the following premise is strong enough.*}
+text\<open>However, the following premise is strong enough.\<close>
 lemma Transset_trans_Memrel:
     "\<forall>j\<in>i. Transset(j) ==> trans(Memrel(i))"
 by (unfold Transset_def trans_def, blast)
@@ -323,7 +323,7 @@
 by (unfold Transset_def, blast)
 
 
-subsection{*Transfinite Induction*}
+subsection\<open>Transfinite Induction\<close>
 
 (*Epsilon induction over a transitive set*)
 lemma Transset_induct:
@@ -348,17 +348,17 @@
 done
 
 
-section{*Fundamental properties of the epsilon ordering (< on ordinals)*}
+section\<open>Fundamental properties of the epsilon ordering (< on ordinals)\<close>
 
 
-subsubsection{*Proving That < is a Linear Ordering on the Ordinals*}
+subsubsection\<open>Proving That < is a Linear Ordering on the Ordinals\<close>
 
 lemma Ord_linear:
      "Ord(i) \<Longrightarrow> Ord(j) \<Longrightarrow> i\<in>j | i=j | j\<in>i"
 proof (induct i arbitrary: j rule: trans_induct)
   case (step i)
   note step_i = step
-  show ?case using `Ord(j)`
+  show ?case using \<open>Ord(j)\<close>
     proof (induct j rule: trans_induct)
       case (step j)
       thus ?case using step_i
@@ -366,7 +366,7 @@
     qed
 qed
 
-text{*The trichotomy law for ordinals*}
+text\<open>The trichotomy law for ordinals\<close>
 lemma Ord_linear_lt:
  assumes o: "Ord(i)" "Ord(j)"
  obtains (lt) "i<j" | (eq) "i=j" | (gt) "j<i"
@@ -395,7 +395,7 @@
 lemma not_lt_imp_le: "[| ~ i<j;  Ord(i);  Ord(j) |] ==> j \<le> i"
 by (rule_tac i = i and j = j in Ord_linear2, auto)
 
-subsubsection{*Some Rewrite Rules for <, le*}
+subsubsection\<open>Some Rewrite Rules for <, le\<close>
 
 lemma Ord_mem_iff_lt: "Ord(j) ==> i\<in>j <-> i<j"
 by (unfold lt_def, blast)
@@ -419,7 +419,7 @@
 by (blast intro: Ord_0_lt)
 
 
-subsection{*Results about Less-Than or Equals*}
+subsection\<open>Results about Less-Than or Equals\<close>
 
 (** For ordinals, @{term"j\<subseteq>i"} implies @{term"j \<le> i"} (less-than or equals) **)
 
@@ -446,7 +446,7 @@
 lemma all_lt_imp_le: "[| Ord(i);  Ord(j);  !!x. x<j ==> x<i |] ==> j \<le> i"
 by (blast intro: not_lt_imp_le dest: lt_irrefl)
 
-subsubsection{*Transitivity Laws*}
+subsubsection\<open>Transitivity Laws\<close>
 
 lemma lt_trans1: "[| i \<le> j;  j<k |] ==> i<k"
 by (blast elim!: leE intro: lt_trans)
@@ -498,7 +498,7 @@
 apply (simp add: lt_def)
 done
 
-subsubsection{*Union and Intersection*}
+subsubsection\<open>Union and Intersection\<close>
 
 lemma Un_upper1_le: "[| Ord(i); Ord(j) |] ==> i \<le> i \<union> j"
 by (rule Un_upper1 [THEN subset_imp_le], auto)
@@ -560,7 +560,7 @@
 by (blast intro: Ord_trans)
 
 
-subsection{*Results about Limits*}
+subsection\<open>Results about Limits\<close>
 
 lemma Ord_Union [intro,simp,TC]: "[| !!i. i\<in>A ==> Ord(i) |] ==> Ord(\<Union>(A))"
 apply (rule Ord_is_Transset [THEN Transset_Union_family, THEN OrdI])
@@ -633,7 +633,7 @@
 by (blast intro: Ord_trans)
 
 
-subsection{*Limit Ordinals -- General Properties*}
+subsection\<open>Limit Ordinals -- General Properties\<close>
 
 lemma Limit_Union_eq: "Limit(i) ==> \<Union>(i) = i"
 apply (unfold Limit_def)
@@ -701,7 +701,7 @@
 by (blast elim!: leE)
 
 
-subsubsection{*Traditional 3-Way Case Analysis on Ordinals*}
+subsubsection\<open>Traditional 3-Way Case Analysis on Ordinals\<close>
 
 lemma Ord_cases_disj: "Ord(i) ==> i=0 | (\<exists>j. Ord(j) & i=succ(j)) | Limit(i)"
 by (blast intro!: non_succ_LimitI Ord_0_lt)
@@ -723,8 +723,8 @@
 
 lemmas trans_induct3 = trans_induct3_raw [rule_format, case_names 0 succ limit, consumes 1]
 
-text{*A set of ordinals is either empty, contains its own union, or its
-union is a limit ordinal.*}
+text\<open>A set of ordinals is either empty, contains its own union, or its
+union is a limit ordinal.\<close>
 
 lemma Union_le: "[| !!x. x\<in>I ==> x\<le>j; Ord(j) |] ==> \<Union>(I) \<le> j"
   by (auto simp add: le_subset_iff Union_least)
@@ -757,7 +757,7 @@
   assume "Limit(\<Union>I)" thus ?thesis by auto
 qed
 
-text{*If the union of a set of ordinals is a successor, then it is an element of that set.*}
+text\<open>If the union of a set of ordinals is a successor, then it is an element of that set.\<close>
 lemma Ord_Union_eq_succD: "[|\<forall>x\<in>X. Ord(x);  \<Union>X = succ(j)|] ==> succ(j) \<in> X"
   by (drule Ord_set_cases, auto)
 
--- a/src/ZF/Perm.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Perm.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -8,7 +8,7 @@
   -- Lemmas for the Schroeder-Bernstein Theorem
 *)
 
-section{*Injections, Surjections, Bijections, Composition*}
+section\<open>Injections, Surjections, Bijections, Composition\<close>
 
 theory Perm imports func begin
 
@@ -39,7 +39,7 @@
     "bij(A,B) == inj(A,B) \<inter> surj(A,B)"
 
 
-subsection{*Surjective Function Space*}
+subsection\<open>Surjective Function Space\<close>
 
 lemma surj_is_fun: "f \<in> surj(A,B) ==> f \<in> A->B"
 apply (unfold surj_def)
@@ -56,7 +56,7 @@
 apply (best intro: apply_Pair elim: range_type)
 done
 
-text{* A function with a right inverse is a surjection *}
+text\<open>A function with a right inverse is a surjection\<close>
 
 lemma f_imp_surjective:
     "[| f \<in> A->B;  !!y. y \<in> B ==> d(y): A;  !!y. y \<in> B ==> f`d(y) = y |]
@@ -72,7 +72,7 @@
 apply (simp_all add: lam_type)
 done
 
-text{*Cantor's theorem revisited*}
+text\<open>Cantor's theorem revisited\<close>
 lemma cantor_surj: "f \<notin> surj(A,Pow(A))"
 apply (unfold surj_def, safe)
 apply (cut_tac cantor)
@@ -80,14 +80,14 @@
 done
 
 
-subsection{*Injective Function Space*}
+subsection\<open>Injective Function Space\<close>
 
 lemma inj_is_fun: "f \<in> inj(A,B) ==> f \<in> A->B"
 apply (unfold inj_def)
 apply (erule CollectD1)
 done
 
-text{*Good for dealing with sets of pairs, but a bit ugly in use [used in AC]*}
+text\<open>Good for dealing with sets of pairs, but a bit ugly in use [used in AC]\<close>
 lemma inj_equality:
     "[| <a,b>:f;  <c,b>:f;  f \<in> inj(A,B) |] ==> a=c"
 apply (unfold inj_def)
@@ -97,7 +97,7 @@
 lemma inj_apply_equality: "[| f \<in> inj(A,B);  f`a=f`b;  a \<in> A;  b \<in> A |] ==> a=b"
 by (unfold inj_def, blast)
 
-text{* A function with a left inverse is an injection *}
+text\<open>A function with a left inverse is an injection\<close>
 
 lemma f_imp_injective: "[| f \<in> A->B;  \<forall>x\<in>A. d(f`x)=x |] ==> f \<in> inj(A,B)"
 apply (simp (no_asm_simp) add: inj_def)
@@ -112,7 +112,7 @@
 apply (simp_all add: lam_type)
 done
 
-subsection{*Bijections*}
+subsection\<open>Bijections\<close>
 
 lemma bij_is_inj: "f \<in> bij(A,B) ==> f \<in> inj(A,B)"
 apply (unfold bij_def)
@@ -144,7 +144,7 @@
 done
 
 
-subsection{*Identity Function*}
+subsection\<open>Identity Function\<close>
 
 lemma idI [intro!]: "a \<in> A ==> <a,a> \<in> id(A)"
 apply (unfold id_def)
@@ -191,12 +191,12 @@
 apply (force intro!: lam_type dest: apply_type)
 done
 
-text{*@{term id} as the identity relation*}
+text\<open>@{term id} as the identity relation\<close>
 lemma id_iff [simp]: "<x,y> \<in> id(A) \<longleftrightarrow> x=y & y \<in> A"
 by auto
 
 
-subsection{*Converse of a Function*}
+subsection\<open>Converse of a Function\<close>
 
 lemma inj_converse_fun: "f \<in> inj(A,B) ==> converse(f) \<in> range(f)->A"
 apply (unfold inj_def)
@@ -206,9 +206,9 @@
 apply (blast dest: fun_is_rel)
 done
 
-text{* Equations for converse(f) *}
+text\<open>Equations for converse(f)\<close>
 
-text{*The premises are equivalent to saying that f is injective...*}
+text\<open>The premises are equivalent to saying that f is injective...\<close>
 lemma left_inverse_lemma:
      "[| f \<in> A->B;  converse(f): C->A;  a \<in> A |] ==> converse(f)`(f`a) = a"
 by (blast intro: apply_Pair apply_equality converseI)
@@ -235,7 +235,7 @@
 lemma right_inverse_bij: "[| f \<in> bij(A,B);  b \<in> B |] ==> f`(converse(f)`b) = b"
 by (force simp add: bij_def surj_range)
 
-subsection{*Converses of Injections, Surjections, Bijections*}
+subsection\<open>Converses of Injections, Surjections, Bijections\<close>
 
 lemma inj_converse_inj: "f \<in> inj(A,B) ==> converse(f): inj(range(f), A)"
 apply (rule f_imp_injective)
@@ -249,7 +249,7 @@
 by (blast intro: f_imp_surjective inj_converse_fun left_inverse inj_is_fun
                  range_of_fun [THEN apply_type])
 
-text{*Adding this as an intro! rule seems to cause looping*}
+text\<open>Adding this as an intro! rule seems to cause looping\<close>
 lemma bij_converse_bij [TC]: "f \<in> bij(A,B) ==> converse(f): bij(B,A)"
 apply (unfold bij_def)
 apply (fast elim: surj_range [THEN subst] inj_converse_inj inj_converse_surj)
@@ -257,9 +257,9 @@
 
 
 
-subsection{*Composition of Two Relations*}
+subsection\<open>Composition of Two Relations\<close>
 
-text{*The inductive definition package could derive these theorems for @{term"r O s"}*}
+text\<open>The inductive definition package could derive these theorems for @{term"r O s"}\<close>
 
 lemma compI [intro]: "[| <a,b>:s; <b,c>:r |] ==> <a,c> \<in> r O s"
 by (unfold comp_def, blast)
@@ -280,9 +280,9 @@
 by blast
 
 
-subsection{*Domain and Range -- see Suppes, Section 3.1*}
+subsection\<open>Domain and Range -- see Suppes, Section 3.1\<close>
 
-text{*Boyer et al., Set Theory in First-Order Logic, JAR 2 (1986), 287-327*}
+text\<open>Boyer et al., Set Theory in First-Order Logic, JAR 2 (1986), 287-327\<close>
 lemma range_comp: "range(r O s) \<subseteq> range(r)"
 by blast
 
@@ -305,16 +305,16 @@
   by (auto simp add: bij_def intro: inj_inj_range inj_is_fun fun_is_surj)
 
 
-subsection{*Other Results*}
+subsection\<open>Other Results\<close>
 
 lemma comp_mono: "[| r'<=r; s'<=s |] ==> (r' O s') \<subseteq> (r O s)"
 by blast
 
-text{*composition preserves relations*}
+text\<open>composition preserves relations\<close>
 lemma comp_rel: "[| s<=A*B;  r<=B*C |] ==> (r O s) \<subseteq> A*C"
 by blast
 
-text{*associative law for composition*}
+text\<open>associative law for composition\<close>
 lemma comp_assoc: "(r O s) O t = r O (s O t)"
 by blast
 
@@ -331,12 +331,12 @@
 by blast
 
 
-subsection{*Composition Preserves Functions, Injections, and Surjections*}
+subsection\<open>Composition Preserves Functions, Injections, and Surjections\<close>
 
 lemma comp_function: "[| function(g);  function(f) |] ==> function(f O g)"
 by (unfold function_def, blast)
 
-text{*Don't think the premises can be weakened much*}
+text\<open>Don't think the premises can be weakened much\<close>
 lemma comp_fun: "[| g \<in> A->B;  f \<in> B->C |] ==> (f O g) \<in> A->C"
 apply (auto simp add: Pi_def comp_function Pow_iff comp_rel)
 apply (subst range_rel_subset [THEN domain_comp_eq], auto)
@@ -350,7 +350,7 @@
 apply (blast dest: apply_equality)
 done
 
-text{*Simplifies compositions of lambda-abstractions*}
+text\<open>Simplifies compositions of lambda-abstractions\<close>
 lemma comp_lam:
     "[| !!x. x \<in> A ==> b(x): B |]
      ==> (\<lambda>y\<in>B. c(y)) O (\<lambda>x\<in>A. b(x)) = (\<lambda>x\<in>A. c(b(x)))"
@@ -383,11 +383,11 @@
 done
 
 
-subsection{*Dual Properties of @{term inj} and @{term surj}*}
+subsection\<open>Dual Properties of @{term inj} and @{term surj}\<close>
 
-text{*Useful for proofs from
+text\<open>Useful for proofs from
     D Pastre.  Automatic theorem proving in set theory.
-    Artificial Intelligence, 10:1--27, 1978.*}
+    Artificial Intelligence, 10:1--27, 1978.\<close>
 
 lemma comp_mem_injD1:
     "[| (f O g): inj(A,C);  g \<in> A->B;  f \<in> B->C |] ==> g \<in> inj(A,B)"
@@ -417,18 +417,18 @@
 apply (blast intro: apply_funtype)
 done
 
-subsubsection{*Inverses of Composition*}
+subsubsection\<open>Inverses of Composition\<close>
 
-text{*left inverse of composition; one inclusion is
-        @{term "f \<in> A->B ==> id(A) \<subseteq> converse(f) O f"} *}
+text\<open>left inverse of composition; one inclusion is
+        @{term "f \<in> A->B ==> id(A) \<subseteq> converse(f) O f"}\<close>
 lemma left_comp_inverse: "f \<in> inj(A,B) ==> converse(f) O f = id(A)"
 apply (unfold inj_def, clarify)
 apply (rule equalityI)
  apply (auto simp add: apply_iff, blast)
 done
 
-text{*right inverse of composition; one inclusion is
-                @{term "f \<in> A->B ==> f O converse(f) \<subseteq> id(B)"} *}
+text\<open>right inverse of composition; one inclusion is
+                @{term "f \<in> A->B ==> f O converse(f) \<subseteq> id(B)"}\<close>
 lemma right_comp_inverse:
     "f \<in> surj(A,B) ==> f O converse(f) = id(B)"
 apply (simp add: surj_def, clarify)
@@ -438,7 +438,7 @@
 done
 
 
-subsubsection{*Proving that a Function is a Bijection*}
+subsubsection\<open>Proving that a Function is a Bijection\<close>
 
 lemma comp_eq_id_iff:
     "[| f \<in> A->B;  g \<in> B->A |] ==> f O g = id(B) \<longleftrightarrow> (\<forall>y\<in>B. f`(g`y)=y)"
@@ -465,11 +465,11 @@
 by (simp add: fg_imp_bijective comp_eq_id_iff
               left_inverse_lemma right_inverse_lemma)
 
-subsubsection{*Unions of Functions*}
+subsubsection\<open>Unions of Functions\<close>
 
-text{*See similar theorems in func.thy*}
+text\<open>See similar theorems in func.thy\<close>
 
-text{*Theorem by KG, proof by LCP*}
+text\<open>Theorem by KG, proof by LCP\<close>
 lemma inj_disjoint_Un:
      "[| f \<in> inj(A,B);  g \<in> inj(C,D);  B \<inter> D = 0 |]
       ==> (\<lambda>a\<in>A \<union> C. if a \<in> A then f`a else g`a) \<in> inj(A \<union> C, B \<union> D)"
@@ -486,8 +486,8 @@
              intro!: fun_disjoint_apply1 fun_disjoint_apply2)
 done
 
-text{*A simple, high-level proof; the version for injections follows from it,
-  using  @{term "f \<in> inj(A,B) \<longleftrightarrow> f \<in> bij(A,range(f))"}  *}
+text\<open>A simple, high-level proof; the version for injections follows from it,
+  using  @{term "f \<in> inj(A,B) \<longleftrightarrow> f \<in> bij(A,range(f))"}\<close>
 lemma bij_disjoint_Un:
      "[| f \<in> bij(A,B);  g \<in> bij(C,D);  A \<inter> C = 0;  B \<inter> D = 0 |]
       ==> (f \<union> g) \<in> bij(A \<union> C, B \<union> D)"
@@ -497,7 +497,7 @@
 done
 
 
-subsubsection{*Restrictions as Surjections and Bijections*}
+subsubsection\<open>Restrictions as Surjections and Bijections\<close>
 
 lemma surj_image:
     "f \<in> Pi(A,B) ==> f \<in> surj(A, f``A)"
@@ -529,7 +529,7 @@
 done
 
 
-subsubsection{*Lemmas for Ramsey's Theorem*}
+subsubsection\<open>Lemmas for Ramsey's Theorem\<close>
 
 lemma inj_weaken_type: "[| f \<in> inj(A,B);  B<=D |] ==> f \<in> inj(A,D)"
 apply (unfold inj_def)
--- a/src/ZF/QPair.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/QPair.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -8,17 +8,17 @@
 is not a limit ordinal?
 *)
 
-section{*Quine-Inspired Ordered Pairs and Disjoint Sums*}
+section\<open>Quine-Inspired Ordered Pairs and Disjoint Sums\<close>
 
 theory QPair imports Sum func begin
 
-text{*For non-well-founded data
+text\<open>For non-well-founded data
 structures in ZF.  Does not precisely follow Quine's construction.  Thanks
 to Thomas Forster for suggesting this approach!
 
 W. V. Quine, On Ordered Pairs and Relations, in Selected Logic Papers,
 1966.
-*}
+\<close>
 
 definition
   QPair     :: "[i, i] => i"                      ("<(_;/ _)>")  where
@@ -70,7 +70,7 @@
     "qcase(c,d)   == qsplit(%y z. cond(y, d(z), c(z)))"
 
 
-subsection{*Quine ordered pairing*}
+subsection\<open>Quine ordered pairing\<close>
 
 (** Lemmas for showing that <a;b> uniquely determines a and b **)
 
@@ -91,8 +91,8 @@
 by blast
 
 
-subsubsection{*QSigma: Disjoint union of a family of sets
-     Generalizes Cartesian product*}
+subsubsection\<open>QSigma: Disjoint union of a family of sets
+     Generalizes Cartesian product\<close>
 
 lemma QSigmaI [intro!]: "[| a \<in> A;  b \<in> B(a) |] ==> <a;b> \<in> QSigma(A,B)"
 by (simp add: QSigma_def)
@@ -128,7 +128,7 @@
 by blast
 
 
-subsubsection{*Projections: qfst, qsnd*}
+subsubsection\<open>Projections: qfst, qsnd\<close>
 
 lemma qfst_conv [simp]: "qfst(<a;b>) = a"
 by (simp add: qfst_def)
@@ -146,7 +146,7 @@
 by auto
 
 
-subsubsection{*Eliminator: qsplit*}
+subsubsection\<open>Eliminator: qsplit\<close>
 
 (*A META-equality, so that it applies to higher types as well...*)
 lemma qsplit [simp]: "qsplit(%x y. c(x,y), <a;b>) == c(a,b)"
@@ -165,7 +165,7 @@
 done
 
 
-subsubsection{*qsplit for predicates: result type o*}
+subsubsection\<open>qsplit for predicates: result type o\<close>
 
 lemma qsplitI: "R(a,b) ==> qsplit(R, <a;b>)"
 by (simp add: qsplit_def)
@@ -181,7 +181,7 @@
 by (simp add: qsplit_def)
 
 
-subsubsection{*qconverse*}
+subsubsection\<open>qconverse\<close>
 
 lemma qconverseI [intro!]: "<a;b>:r ==> <b;a>:qconverse(r)"
 by (simp add: qconverse_def, blast)
@@ -208,7 +208,7 @@
 by blast
 
 
-subsection{*The Quine-inspired notion of disjoint sum*}
+subsection\<open>The Quine-inspired notion of disjoint sum\<close>
 
 lemmas qsum_defs = qsum_def QInl_def QInr_def qcase_def
 
@@ -274,7 +274,7 @@
 apply blast
 done
 
-subsubsection{*Eliminator -- qcase*}
+subsubsection\<open>Eliminator -- qcase\<close>
 
 lemma qcase_QInl [simp]: "qcase(c, d, QInl(a)) = c(a)"
 by (simp add: qsum_defs )
@@ -305,7 +305,7 @@
 by blast
 
 
-subsubsection{*Monotonicity*}
+subsubsection\<open>Monotonicity\<close>
 
 lemma QPair_mono: "[| a<=c;  b<=d |] ==> <a;b> \<subseteq> <c;d>"
 by (simp add: QPair_def sum_mono)
--- a/src/ZF/QUniv.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/QUniv.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section{*A Small Universe for Lazy Recursive Types*}
+section\<open>A Small Universe for Lazy Recursive Types\<close>
 
 theory QUniv imports Univ QPair begin
 
@@ -24,7 +24,7 @@
    "quniv(A) == Pow(univ(eclose(A)))"
 
 
-subsection{*Properties involving Transset and Sum*}
+subsection\<open>Properties involving Transset and Sum\<close>
 
 lemma Transset_includes_summands:
      "[| Transset(C); A+B \<subseteq> C |] ==> A \<subseteq> C & B \<subseteq> C"
@@ -38,7 +38,7 @@
 apply (blast dest: Transset_Pair_D)
 done
 
-subsection{*Introduction and Elimination Rules*}
+subsection\<open>Introduction and Elimination Rules\<close>
 
 lemma qunivI: "X \<subseteq> univ(eclose(A)) ==> X \<in> quniv(A)"
 by (simp add: quniv_def)
@@ -51,7 +51,7 @@
 apply (erule eclose_mono [THEN univ_mono, THEN Pow_mono])
 done
 
-subsection{*Closure Properties*}
+subsection\<open>Closure Properties\<close>
 
 lemma univ_eclose_subset_quniv: "univ(eclose(A)) \<subseteq> quniv(A)"
 apply (simp add: quniv_def Transset_iff_Pow [symmetric])
@@ -89,7 +89,7 @@
     "[| a \<subseteq> univ(A);  b \<subseteq> univ(A) |] ==> <a;b> \<subseteq> univ(A)"
 by (simp add: QPair_def sum_subset_univ)
 
-subsection{*Quine Disjoint Sum*}
+subsection\<open>Quine Disjoint Sum\<close>
 
 lemma QInl_subset_univ: "a \<subseteq> univ(A) ==> QInl(a) \<subseteq> univ(A)"
 apply (unfold QInl_def)
@@ -107,7 +107,7 @@
 apply (erule nat_1I [THEN naturals_subset_univ, THEN QPair_subset_univ])
 done
 
-subsection{*Closure for Quine-Inspired Products and Sums*}
+subsection\<open>Closure for Quine-Inspired Products and Sums\<close>
 
 (*Quine ordered pairs*)
 lemma QPair_in_quniv:
@@ -134,7 +134,7 @@
 by (blast intro: QPair_in_quniv dest: quniv_QPair_D)
 
 
-subsection{*Quine Disjoint Sum*}
+subsection\<open>Quine Disjoint Sum\<close>
 
 lemma QInl_in_quniv: "a: quniv(A) ==> QInl(a) \<in> quniv(A)"
 by (simp add: QInl_def zero_in_quniv QPair_in_quniv)
@@ -148,7 +148,7 @@
 lemmas qsum_subset_quniv = subset_trans [OF qsum_mono qsum_quniv]
 
 
-subsection{*The Natural Numbers*}
+subsection\<open>The Natural Numbers\<close>
 
 lemmas nat_subset_quniv =  subset_trans [OF nat_subset_univ univ_subset_quniv]
 
@@ -169,7 +169,7 @@
               product_Int_Vfrom_subset [THEN subset_trans]
               Sigma_mono [OF Int_lower1 subset_refl])
 
-subsection{*"Take-Lemma" Rules*}
+subsection\<open>"Take-Lemma" Rules\<close>
 
 (*for proving a=b by coinduction and c: quniv(A)*)
 
--- a/src/ZF/Resid/Residuals.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Resid/Residuals.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -30,7 +30,7 @@
   "u |> v == THE w. residuals(u,v,w)"
 
 
-subsection{*Setting up rule lists*}
+subsection\<open>Setting up rule lists\<close>
 
 declare Sres.intros [intro]
 declare Sreg.intros [intro]
@@ -61,7 +61,7 @@
 
 declare Sres.intros [simp]
 
-subsection{*residuals is a  partial function*}
+subsection\<open>residuals is a  partial function\<close>
 
 lemma residuals_function [rule_format]:
      "residuals(u,v,w) ==> \<forall>w1. residuals(u,v,w1) \<longrightarrow> w1 = w"
@@ -78,7 +78,7 @@
 apply (blast intro: residuals_function)+
 done
 
-subsection{*Residual function*}
+subsection\<open>Residual function\<close>
 
 lemma res_Var [simp]: "n \<in> nat ==> Var(n) |> Var(n) = Var(n)"
 by (unfold res_func_def, blast)
@@ -108,7 +108,7 @@
      "[|s~t; regular(t)|]==> regular(t) \<longrightarrow> s |> t \<in> redexes"
   by (erule Scomp.induct, auto)
 
-subsection{*Commutation theorem*}
+subsection\<open>Commutation theorem\<close>
 
 lemma sub_comp [simp]: "u<==v ==> u~v"
 by (erule Ssub.induct, simp_all)
@@ -140,7 +140,7 @@
 by (simp add: residuals_subst_rec)
 
 
-subsection{*Residuals are comp and regular*}
+subsection\<open>Residuals are comp and regular\<close>
 
 lemma residuals_preserve_comp [rule_format, simp]:
      "u~v ==> \<forall>w. u~w \<longrightarrow> v~w \<longrightarrow> regular(w) \<longrightarrow> (u|>w) ~ (v|>w)"
@@ -151,7 +151,7 @@
 apply (erule Scomp.induct, auto)
 done
 
-subsection{*Preservation lemma*}
+subsection\<open>Preservation lemma\<close>
 
 lemma union_preserve_comp: "u~v ==> v ~ (u un v)"
 by (erule Scomp.induct, simp_all)
@@ -166,7 +166,7 @@
 
 declare sub_comp [THEN comp_sym, simp]
 
-subsection{*Prism theorem*}
+subsection\<open>Prism theorem\<close>
 
 (* Having more assumptions than needed -- removed below  *)
 lemma prism_l [rule_format]:
@@ -181,7 +181,7 @@
 done
 
 
-subsection{*Levy's Cube Lemma*}
+subsection\<open>Levy's Cube Lemma\<close>
 
 lemma cube: "[|u~v; regular(v); regular(u); w~u|]==>   
            (w|>u) |> (v|>u) = (w|>v) |> (u|>v)"
@@ -196,7 +196,7 @@
 done
 
 
-subsection{*paving theorem*}
+subsection\<open>paving theorem\<close>
 
 lemma paving: "[|w~u; w~v; regular(u); regular(v)|]==>  
            \<exists>uv vu. (w|>u) |> vu = (w|>v) |> uv & (w|>u)~vu & 
--- a/src/ZF/Sum.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Sum.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1993  University of Cambridge
 *)
 
-section{*Disjoint Sums*}
+section\<open>Disjoint Sums\<close>
 
 theory Sum imports Bool equalities begin
 
-text{*And the "Part" primitive for simultaneous recursive type definitions*}
+text\<open>And the "Part" primitive for simultaneous recursive type definitions\<close>
 
 definition sum :: "[i,i]=>i" (infixr "+" 65) where
      "A+B == {0}*A \<union> {1}*B"
@@ -25,7 +25,7 @@
 definition Part :: "[i,i=>i] => i" where
      "Part(A,h) == {x \<in> A. \<exists>z. x = h(z)}"
 
-subsection{*Rules for the @{term Part} Primitive*}
+subsection\<open>Rules for the @{term Part} Primitive\<close>
 
 lemma Part_iff:
     "a \<in> Part(A,h) \<longleftrightarrow> a \<in> A & (\<exists>y. a=h(y))"
@@ -51,7 +51,7 @@
 done
 
 
-subsection{*Rules for Disjoint Sums*}
+subsection\<open>Rules for Disjoint Sums\<close>
 
 lemmas sum_defs = sum_def Inl_def Inr_def case_def
 
@@ -125,7 +125,7 @@
 by (simp add: sum_def, blast)
 
 
-subsection{*The Eliminator: @{term case}*}
+subsection\<open>The Eliminator: @{term case}\<close>
 
 lemma case_Inl [simp]: "case(c, d, Inl(a)) = c(a)"
 by (simp add: sum_defs)
@@ -159,7 +159,7 @@
 by auto
 
 
-subsection{*More Rules for @{term "Part(A,h)"}*}
+subsection\<open>More Rules for @{term "Part(A,h)"}\<close>
 
 lemma Part_mono: "A<=B ==> Part(A,h)<=Part(B,h)"
 by blast
--- a/src/ZF/Trancl.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Trancl.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1992  University of Cambridge
 *)
 
-section{*Relations: Their General Properties and Transitive Closure*}
+section\<open>Relations: Their General Properties and Transitive Closure\<close>
 
 theory Trancl imports Fixedpt Perm begin
 
@@ -49,9 +49,9 @@
     "equiv(A,r) == r \<subseteq> A*A & refl(A,r) & sym(r) & trans(r)"
 
 
-subsection{*General properties of relations*}
+subsection\<open>General properties of relations\<close>
 
-subsubsection{*irreflexivity*}
+subsubsection\<open>irreflexivity\<close>
 
 lemma irreflI:
     "[| !!x. x \<in> A ==> <x,x> \<notin> r |] ==> irrefl(A,r)"
@@ -60,7 +60,7 @@
 lemma irreflE: "[| irrefl(A,r);  x \<in> A |] ==>  <x,x> \<notin> r"
 by (simp add: irrefl_def)
 
-subsubsection{*symmetry*}
+subsubsection\<open>symmetry\<close>
 
 lemma symI:
      "[| !!x y.<x,y>: r ==> <y,x>: r |] ==> sym(r)"
@@ -69,7 +69,7 @@
 lemma symE: "[| sym(r); <x,y>: r |]  ==>  <y,x>: r"
 by (unfold sym_def, blast)
 
-subsubsection{*antisymmetry*}
+subsubsection\<open>antisymmetry\<close>
 
 lemma antisymI:
      "[| !!x y.[| <x,y>: r;  <y,x>: r |] ==> x=y |] ==> antisym(r)"
@@ -78,7 +78,7 @@
 lemma antisymE: "[| antisym(r); <x,y>: r;  <y,x>: r |]  ==>  x=y"
 by (simp add: antisym_def, blast)
 
-subsubsection{*transitivity*}
+subsubsection\<open>transitivity\<close>
 
 lemma transD: "[| trans(r);  <a,b>:r;  <b,c>:r |] ==> <a,c>:r"
 by (unfold trans_def, blast)
@@ -94,7 +94,7 @@
 by (simp add: trans_on_def trans_def, blast)
 
 
-subsection{*Transitive closure of a relation*}
+subsection\<open>Transitive closure of a relation\<close>
 
 lemma rtrancl_bnd_mono:
      "bnd_mono(field(r)*field(r), %s. id(field(r)) \<union> (r O s))"
@@ -297,7 +297,7 @@
  prefer 2
  apply (frule rtrancl_type [THEN subsetD])
  apply (blast intro: r_into_rtrancl )
-txt{*converse direction*}
+txt\<open>converse direction\<close>
 apply (frule rtrancl_type [THEN subsetD], clarify)
 apply (erule rtrancl_induct)
 apply (simp add: rtrancl_refl rtrancl_field)
--- a/src/ZF/UNITY/AllocBase.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/AllocBase.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   2001  University of Cambridge
 *)
 
-section{*Common declarations for Chandy and Charpentier's Allocator*}
+section\<open>Common declarations for Chandy and Charpentier's Allocator\<close>
 
 theory AllocBase imports Follows MultisetSum Guar begin
 
@@ -19,7 +19,7 @@
   NbT_pos: "NbT \<in> nat-{0}" and
   Nclients_pos: "Nclients \<in> nat-{0}"
   
-text{*This function merely sums the elements of a list*}
+text\<open>This function merely sums the elements of a list\<close>
 consts tokens :: "i =>i"
        item :: i (* Items to be merged/distributed *)
 primrec 
@@ -32,8 +32,8 @@
   "bag_of(Cons(x,xs)) = {#x#} +# bag_of(xs)"
 
 
-text{*Definitions needed in Client.thy.  We define a recursive predicate
-using 0 and 1 to code the truth values.*}
+text\<open>Definitions needed in Client.thy.  We define a recursive predicate
+using 0 and 1 to code the truth values.\<close>
 consts all_distinct0 :: "i=>i"
 primrec
   "all_distinct0(Nil) = 1"
@@ -45,14 +45,14 @@
    "all_distinct(l) == all_distinct0(l)=1"
   
 definition  
-  state_of :: "i =>i" --{* coersion from anyting to state *}  where
+  state_of :: "i =>i" --\<open>coersion from anyting to state\<close>  where
    "state_of(s) == if s \<in> state then s else st0"
 
 definition
-  lift :: "i =>(i=>i)" --{* simplifies the expression of programs*}  where
+  lift :: "i =>(i=>i)" --\<open>simplifies the expression of programs\<close>  where
    "lift(x) == %s. s`x"
 
-text{* function to show that the set of variables is infinite *}
+text\<open>function to show that the set of variables is infinite\<close>
 consts
   nat_list_inj :: "i=>i"
   var_inj      :: "i=>i"
@@ -69,7 +69,7 @@
   "nat_var_inj(n) == Var(nat_list_inj(n))"
 
 
-subsection{*Various simple lemmas*}
+subsection\<open>Various simple lemmas\<close>
 
 lemma Nclients_NbT_gt_0 [simp]: "0 < Nclients & 0 < NbT"
 apply (cut_tac Nclients_pos NbT_pos)
@@ -127,7 +127,7 @@
 apply (induct_tac "xs", auto)
 done
 
-subsection{*The function @{term bag_of}*}
+subsection\<open>The function @{term bag_of}\<close>
 
 lemma bag_of_type [simp,TC]: "l\<in>list(A) ==>bag_of(l)\<in>Mult(A)"
 apply (induct_tac "l")
@@ -168,7 +168,7 @@
 by (auto simp add:  mono1_def bag_of_type)
 
 
-subsection{*The function @{term msetsum}*}
+subsection\<open>The function @{term msetsum}\<close>
 
 lemmas nat_into_Fin = eqpoll_refl [THEN [2] Fin_lemma]
 
@@ -272,7 +272,7 @@
 apply (auto intro: lt_trans)
 done
 
-subsubsection{*The function @{term all_distinct}*}
+subsubsection\<open>The function @{term all_distinct}\<close>
 
 lemma all_distinct_Nil [simp]: "all_distinct(Nil)"
 by (unfold all_distinct_def, auto)
@@ -284,7 +284,7 @@
 apply (auto elim: list.cases)
 done
 
-subsubsection{*The function @{term state_of}*}
+subsubsection\<open>The function @{term state_of}\<close>
 
 lemma state_of_state: "s\<in>state ==> state_of(s)=s"
 by (unfold state_of_def, auto)
--- a/src/ZF/UNITY/AllocImpl.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/AllocImpl.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -209,9 +209,9 @@
 apply (drule_tac a = k in Increasing_imp_Stable, auto)
 done
 
-subsection{* Towards proving the liveness property, (31) *}
+subsection\<open>Towards proving the liveness property, (31)\<close>
 
-subsubsection{*First, we lead up to a proof of Lemma 49, page 28.*}
+subsubsection\<open>First, we lead up to a proof of Lemma 49, page 28.\<close>
 
 lemma alloc_prog_transient_lemma:
      "[|G \<in> program; k\<in>nat|]
@@ -319,7 +319,7 @@
 apply (force dest: add_lt_elim2)
 done
 
-subsubsection{*Towards proving lemma 50, page 29*}
+subsubsection\<open>Towards proving lemma 50, page 29\<close>
 
 lemma alloc_prog_giv_Ensures_lemma:
 "[| G \<in> program; k\<in>nat; alloc_prog ok G;
@@ -392,10 +392,10 @@
 done
 
 
-text{*Lemma 51, page 29.
+text\<open>Lemma 51, page 29.
   This theorem states as invariant that if the number of
   tokens given does not exceed the number returned, then the upper limit
-  (@{term NbT}) does not exceed the number currently available.*}
+  (@{term NbT}) does not exceed the number currently available.\<close>
 lemma alloc_prog_Always_lemma:
 "[| G \<in> program; alloc_prog ok G;
     alloc_prog \<squnion> G \<in> Incr(lift(ask));
@@ -421,7 +421,7 @@
 
 
 
-subsubsection{* Main lemmas towards proving property (31)*}
+subsubsection\<open>Main lemmas towards proving property (31)\<close>
 
 lemma LeadsTo_strength_R:
     "[|  F \<in> C LeadsTo B'; F \<in> A-C LeadsTo B; B'<=B |] ==> F \<in> A LeadsTo  B"
--- a/src/ZF/UNITY/Comp.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Comp.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -13,7 +13,7 @@
   
 *)
 
-section{*Composition*}
+section\<open>Composition\<close>
 
 theory Comp imports Union Increasing begin
 
--- a/src/ZF/UNITY/Constrains.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Constrains.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   2001  University of Cambridge
 *)
 
-section{*Weak Safety Properties*}
+section\<open>Weak Safety Properties\<close>
 
 theory Constrains
 imports UNITY
@@ -457,7 +457,7 @@
 named_theorems program "program definitions"
 
 ML
-{*
+\<open>
 (*Combines two invariance ASSUMPTIONS into one.  USEFUL??*)
 fun Always_Int_tac ctxt =
   dresolve_tac ctxt @{thms Always_Int_I} THEN'
@@ -494,14 +494,14 @@
   resolve_tac ctxt @{thms AlwaysI} i THEN
   force_tac ctxt i
   THEN constrains_tac ctxt i;
-*}
+\<close>
 
-method_setup safety = {*
-  Scan.succeed (SIMPLE_METHOD' o constrains_tac) *}
+method_setup safety = \<open>
+  Scan.succeed (SIMPLE_METHOD' o constrains_tac)\<close>
   "for proving safety properties"
 
-method_setup always = {*
-  Scan.succeed (SIMPLE_METHOD' o always_tac) *}
+method_setup always = \<open>
+  Scan.succeed (SIMPLE_METHOD' o always_tac)\<close>
   "for proving invariants"
 
 end
--- a/src/ZF/UNITY/Distributor.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Distributor.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -8,9 +8,9 @@
 
 theory Distributor imports AllocBase Follows  Guar GenPrefix begin
 
-text{*Distributor specification (the number of outputs is Nclients)*}
+text\<open>Distributor specification (the number of outputs is Nclients)\<close>
 
-text{*spec (14)*}
+text\<open>spec (14)\<close>
 
 definition
   distr_follows :: "[i, i, i, i =>i] =>i"  where
@@ -37,10 +37,10 @@
      distr_follows(A, In, iIn, Out) \<inter> distr_allowed_acts(Out)"
 
 locale distr =
-  fixes In  --{*items to distribute*}
-    and iIn --{*destinations of items to distribute*}
-    and Out --{*distributed items*}
-    and A   --{*the type of items being distributed *}
+  fixes In  --\<open>items to distribute\<close>
+    and iIn --\<open>destinations of items to distribute\<close>
+    and Out --\<open>distributed items\<close>
+    and A   --\<open>the type of items being distributed\<close>
     and D
  assumes
      var_assumes [simp]:  "In \<in> var & iIn \<in> var & (\<forall>n. Out(n):var)"
--- a/src/ZF/UNITY/FP.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/FP.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -7,7 +7,7 @@
 Theory ported from HOL.
 *)
 
-section{*Fixed Point of a Program*}
+section\<open>Fixed Point of a Program\<close>
 
 theory FP imports UNITY begin
 
--- a/src/ZF/UNITY/Follows.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Follows.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,7 +5,7 @@
 Theory ported from HOL.
 *)
 
-section{*The "Follows" relation of Charpentier and Sivilotte*}
+section\<open>The "Follows" relation of Charpentier and Sivilotte\<close>
 
 theory Follows imports SubstAx Increasing begin
 
--- a/src/ZF/UNITY/GenPrefix.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/GenPrefix.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
 Based on Lex/Prefix
 *)
 
-section{*Charpentier's Generalized Prefix Relation*}
+section\<open>Charpentier's Generalized Prefix Relation\<close>
 
 theory GenPrefix
 imports Main
@@ -206,7 +206,7 @@
 apply (rule impI [THEN allI, THEN allI])
 apply (erule gen_prefix.induct, blast)
 apply (simp add: antisym_def, blast)
-txt{*append case is hardest*}
+txt\<open>append case is hardest\<close>
 apply clarify
 apply (subgoal_tac "length (zs) = 0")
 apply (subgoal_tac "ys \<in> list (A) ")
--- a/src/ZF/UNITY/Guar.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Guar.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -19,7 +19,7 @@
 *)
 
 
-section{*The Chandy-Sanders Guarantees Operator*}
+section\<open>The Chandy-Sanders Guarantees Operator\<close>
 
 theory Guar imports Comp begin 
 
--- a/src/ZF/UNITY/Increasing.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Increasing.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -6,7 +6,7 @@
 relation r over the domain A. 
 *)
 
-section{*Charpentier's "Increasing" Relation*}
+section\<open>Charpentier's "Increasing" Relation\<close>
 
 theory Increasing imports Constrains Monotonicity begin
 
--- a/src/ZF/UNITY/Merge.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Merge.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -64,10 +64,10 @@
 
 (** State definitions.  OUTPUT variables are locals **)
 locale merge =
-  fixes In   --{*merge's INPUT histories: streams to merge*}
-    and Out  --{*merge's OUTPUT history: merged items*}
-    and iOut --{*merge's OUTPUT history: origins of merged items*}
-    and A    --{*the type of items being merged *}
+  fixes In   --\<open>merge's INPUT histories: streams to merge\<close>
+    and Out  --\<open>merge's OUTPUT history: merged items\<close>
+    and iOut --\<open>merge's OUTPUT history: origins of merged items\<close>
+    and A    --\<open>the type of items being merged\<close>
     and M
  assumes var_assumes [simp]:
            "(\<forall>n. In(n):var) & Out \<in> var & iOut \<in> var"
--- a/src/ZF/UNITY/Monotonicity.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Monotonicity.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -6,7 +6,7 @@
 set relations.
 *)
 
-section{*Monotonicity of an Operator WRT a Relation*}
+section\<open>Monotonicity of an Operator WRT a Relation\<close>
 
 theory Monotonicity imports GenPrefix MultisetSum
 begin
--- a/src/ZF/UNITY/MultisetSum.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/MultisetSum.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
     Author:     Sidi O Ehmety
 *)
 
-section {*Setsum for Multisets*}
+section \<open>Setsum for Multisets\<close>
 
 theory MultisetSum
 imports "../Induct/Multiset"
--- a/src/ZF/UNITY/Mutex.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Mutex.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,17 +9,17 @@
 reduce to the empty set.
 *)
 
-section{*Mutual Exclusion*}
+section\<open>Mutual Exclusion\<close>
 
 theory Mutex
 imports SubstAx
 begin
 
-text{*Based on "A Family of 2-Process Mutual Exclusion Algorithms" by J Misra
+text\<open>Based on "A Family of 2-Process Mutual Exclusion Algorithms" by J Misra
 
 Variables' types are introduced globally so that type verification reduces to
 the usual ZF typechecking: an ill-tyed expressions reduce to the empty set.
-*}
+\<close>
 
 abbreviation "p == Var([0])"
 abbreviation "m == Var([1])"
@@ -27,7 +27,7 @@
 abbreviation "u == Var([0,1])"
 abbreviation "v == Var([1,0])"
 
-axiomatization where --{** Type declarations  **}
+axiomatization where --\<open>* Type declarations  *\<close>
   p_type:  "type_of(p)=bool & default_val(p)=0" and
   m_type:  "type_of(m)=int  & default_val(m)=#0" and
   n_type:  "type_of(n)=int  & default_val(n)=#0" and
@@ -126,7 +126,7 @@
 
 
 
-text{*Mutex is a program*}
+text\<open>Mutex is a program\<close>
 
 lemma Mutex_in_program [simp,TC]: "Mutex \<in> program"
 by (simp add: Mutex_def)
--- a/src/ZF/UNITY/State.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/State.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -8,7 +8,7 @@
  - variables can be quantified over.
 *)
 
-section{*UNITY Program States*}
+section\<open>UNITY Program States\<close>
 
 theory State imports Main begin
 
--- a/src/ZF/UNITY/SubstAx.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/SubstAx.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,7 +5,7 @@
 Theory ported from HOL.
 *)
 
-section{*Weak LeadsTo relation (restricted to the set of reachable states)*}
+section\<open>Weak LeadsTo relation (restricted to the set of reachable states)\<close>
 
 theory SubstAx
 imports WFair Constrains
@@ -348,7 +348,7 @@
 apply (rule_tac [3] subset_refl, auto)
 done
 
-ML {*
+ML \<open>
 (*proves "ensures/leadsTo" properties when the program is specified*)
 fun ensures_tac ctxt sact =
   SELECT_GOAL
@@ -374,11 +374,11 @@
            ALLGOALS (asm_full_simp_tac (ctxt addsimps [@{thm st_set_def}])),
                       ALLGOALS (clarify_tac ctxt),
           ALLGOALS (asm_lr_simp_tac ctxt)]);
-*}
+\<close>
 
-method_setup ensures = {*
+method_setup ensures = \<open>
     Args.goal_spec -- Scan.lift Args.name_inner_syntax >>
     (fn (quant, s) => fn ctxt => SIMPLE_METHOD'' quant (ensures_tac ctxt s))
-*} "for proving progress properties"
+\<close> "for proving progress properties"
 
 end
--- a/src/ZF/UNITY/UNITY.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/UNITY.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,14 +3,14 @@
     Copyright   2001  University of Cambridge
 *)
 
-section {*The Basic UNITY Theory*}
+section \<open>The Basic UNITY Theory\<close>
 
 theory UNITY imports State begin
 
-text{*The basic UNITY theory (revised version, based upon the "co" operator)
+text\<open>The basic UNITY theory (revised version, based upon the "co" operator)
 From Misra, "A Logic for Concurrent Programming", 1994.
 
-This ZF theory was ported from its HOL equivalent.*}
+This ZF theory was ported from its HOL equivalent.\<close>
 
 consts
   "constrains" :: "[i, i] => i"  (infixl "co"     60)
@@ -24,8 +24,8 @@
 
 definition
   mk_program :: "[i,i,i]=>i"  where
-  --{* The definition yields a program thanks to the coercions
-       init \<inter> state, acts \<inter> Pow(state*state), etc. *}
+  --\<open>The definition yields a program thanks to the coercions
+       init \<inter> state, acts \<inter> Pow(state*state), etc.\<close>
   "mk_program(init, acts, allowed) ==
     <init \<inter> state, cons(id(state), acts \<inter> Pow(state*state)),
               cons(id(state), allowed \<inter> Pow(state*state))>"
@@ -96,19 +96,19 @@
 defs
   constrains_def:
      "A co B == {F \<in> program. (\<forall>act \<in> Acts(F). act``A\<subseteq>B) & st_set(A)}"
-    --{* the condition @{term "st_set(A)"} makes the definition slightly
-         stronger than the HOL one *}
+    --\<open>the condition @{term "st_set(A)"} makes the definition slightly
+         stronger than the HOL one\<close>
 
   unless_def:    "A unless B == (A - B) co (A \<union> B)"
 
 
-text{*SKIP*}
+text\<open>SKIP\<close>
 lemma SKIP_in_program [iff,TC]: "SKIP \<in> program"
 by (force simp add: SKIP_def program_def mk_program_def)
 
 
-subsection{*The function @{term programify}, the coercion from anything to
- program*}
+subsection\<open>The function @{term programify}, the coercion from anything to
+ program\<close>
 
 lemma programify_program [simp]: "F \<in> program ==> programify(F)=F"
 by (force simp add: programify_def) 
@@ -116,7 +116,7 @@
 lemma programify_in_program [iff,TC]: "programify(F) \<in> program"
 by (force simp add: programify_def) 
 
-text{*Collapsing rules: to remove programify from expressions*}
+text\<open>Collapsing rules: to remove programify from expressions\<close>
 lemma programify_idem [simp]: "programify(programify(F))=programify(F)"
 by (force simp add: programify_def) 
 
@@ -130,7 +130,7 @@
      "AllowedActs(programify(F)) = AllowedActs(F)"
 by (simp add: AllowedActs_def)
 
-subsection{*The Inspectors for Programs*}
+subsection\<open>The Inspectors for Programs\<close>
 
 lemma id_in_RawActs: "F \<in> program ==>id(state) \<in> RawActs(F)"
 by (auto simp add: program_def RawActs_def)
@@ -152,7 +152,7 @@
 by (simp add: cons_absorb)
 
 
-subsection{*Types of the Inspectors*}
+subsection\<open>Types of the Inspectors\<close>
 
 lemma RawInit_type: "F \<in> program ==> RawInit(F)\<subseteq>state"
 by (auto simp add: program_def RawInit_def)
@@ -180,7 +180,7 @@
 lemma AllowedActs_type: "AllowedActs(F) \<subseteq> Pow(state*state)"
 by (simp add: RawAllowedActs_type AllowedActs_def)
 
-text{*Needed in Behaviors*}
+text\<open>Needed in Behaviors\<close>
 lemma ActsD: "[| act \<in> Acts(F); <s,s'> \<in> act |] ==> s \<in> state & s' \<in> state"
 by (blast dest: Acts_type [THEN subsetD])
 
@@ -188,10 +188,10 @@
      "[| act \<in> AllowedActs(F); <s,s'> \<in> act |] ==> s \<in> state & s' \<in> state"
 by (blast dest: AllowedActs_type [THEN subsetD])
 
-subsection{*Simplification rules involving @{term state}, @{term Init}, 
-  @{term Acts}, and @{term AllowedActs}*}
+subsection\<open>Simplification rules involving @{term state}, @{term Init}, 
+  @{term Acts}, and @{term AllowedActs}\<close>
 
-text{*But are they really needed?*}
+text\<open>But are they really needed?\<close>
 
 lemma state_subset_is_Init_iff [iff]: "state \<subseteq> Init(F) \<longleftrightarrow> Init(F)=state"
 by (cut_tac F = F in Init_type, auto)
@@ -204,7 +204,7 @@
      "Pow(state*state) \<subseteq> AllowedActs(F) \<longleftrightarrow> AllowedActs(F)=Pow(state*state)"
 by (cut_tac F = F in AllowedActs_type, auto)
 
-subsubsection{*Eliminating @{text "\<inter> state"} from expressions*}
+subsubsection\<open>Eliminating @{text "\<inter> state"} from expressions\<close>
 
 lemma Init_Int_state [simp]: "Init(F) \<inter> state = Init(F)"
 by (cut_tac F = F in Init_type, blast)
@@ -229,7 +229,7 @@
 by (cut_tac F = F in AllowedActs_type, blast)
 
 
-subsubsection{*The Operator @{term mk_program}*}
+subsubsection\<open>The Operator @{term mk_program}\<close>
 
 lemma mk_program_in_program [iff,TC]:
      "mk_program(init, acts, allowed) \<in> program"
@@ -262,7 +262,7 @@
       cons(id(state), allowed \<inter> Pow(state*state))"
 by (simp add: AllowedActs_def)
 
-text{*Init, Acts, and AlowedActs  of SKIP *}
+text\<open>Init, Acts, and AlowedActs  of SKIP\<close>
 
 lemma RawInit_SKIP [simp]: "RawInit(SKIP) = state"
 by (simp add: SKIP_def)
@@ -282,7 +282,7 @@
 lemma AllowedActs_SKIP [simp]: "AllowedActs(SKIP) = Pow(state*state)"
 by (force simp add: SKIP_def)
 
-text{*Equality of UNITY programs*}
+text\<open>Equality of UNITY programs\<close>
 
 lemma raw_surjective_mk_program:
      "F \<in> program ==> mk_program(RawInit(F), RawActs(F), RawAllowedActs(F))=F"
@@ -315,7 +315,7 @@
      (Init(F) = Init(G) & Acts(F) = Acts(G) & AllowedActs(F) = AllowedActs(G))"
 by (blast intro: program_equalityI program_equalityE)
 
-subsection{*These rules allow "lazy" definition expansion*}
+subsection\<open>These rules allow "lazy" definition expansion\<close>
 
 lemma def_prg_Init:
      "F == mk_program (init,acts,allowed) ==> Init(F) = init \<inter> state"
@@ -339,18 +339,18 @@
 by auto
 
 
-text{*An action is expanded only if a pair of states is being tested against it*}
+text\<open>An action is expanded only if a pair of states is being tested against it\<close>
 lemma def_act_simp:
      "[| act == {<s,s'> \<in> A*B. P(s, s')} |]
       ==> (<s,s'> \<in> act) \<longleftrightarrow> (<s,s'> \<in> A*B & P(s, s'))"
 by auto
 
-text{*A set is expanded only if an element is being tested against it*}
+text\<open>A set is expanded only if an element is being tested against it\<close>
 lemma def_set_simp: "A == B ==> (x \<in> A) \<longleftrightarrow> (x \<in> B)"
 by auto
 
 
-subsection{*The Constrains Operator*}
+subsection\<open>The Constrains Operator\<close>
 
 lemma constrains_type: "A co B \<subseteq> program"
 by (force simp add: constrains_def)
@@ -383,13 +383,13 @@
 apply (force simp add: constrains_def st_set_def)
 done
 
-text{*monotonic in 2nd argument*}
+text\<open>monotonic in 2nd argument\<close>
 lemma constrains_weaken_R:
     "[| F \<in> A co A'; A'\<subseteq>B' |] ==> F \<in> A co B'"
 apply (unfold constrains_def, blast)
 done
 
-text{*anti-monotonic in 1st argument*}
+text\<open>anti-monotonic in 1st argument\<close>
 lemma constrains_weaken_L:
     "[| F \<in> A co A'; B\<subseteq>A |] ==> F \<in> B co A'"
 apply (unfold constrains_def st_set_def, blast)
@@ -402,7 +402,7 @@
 done
 
 
-subsection{*Constrains and Union*}
+subsection\<open>Constrains and Union\<close>
 
 lemma constrains_Un:
     "[| F \<in> A co A'; F \<in> B co B' |] ==> F \<in> (A \<union> B) co (A' \<union> B')"
@@ -422,7 +422,7 @@
 by (force simp add: constrains_def st_set_def)
 
 
-subsection{*Constrains and Intersection*}
+subsection\<open>Constrains and Intersection\<close>
 
 lemma constrains_Int_distrib: "C co (A \<inter> B) = (C co A) \<inter> (C co B)"
 by (force simp add: constrains_def st_set_def)
@@ -455,8 +455,8 @@
   "[| F \<in> A co A' |] ==> A \<subseteq> A'"
 by (unfold constrains_def st_set_def, force)
 
-text{*The reasoning is by subsets since "co" refers to single actions
-  only.  So this rule isn't that useful.*}
+text\<open>The reasoning is by subsets since "co" refers to single actions
+  only.  So this rule isn't that useful.\<close>
 
 lemma constrains_trans: "[| F \<in> A co B; F \<in> B co C |] ==> F \<in> A co C"
 by (unfold constrains_def st_set_def, auto, blast)
@@ -468,7 +468,7 @@
 done
 
 
-subsection{*The Unless Operator*}
+subsection\<open>The Unless Operator\<close>
 
 lemma unless_type: "A unless B \<subseteq> program"
 by (force simp add: unless_def constrains_def) 
@@ -482,7 +482,7 @@
 by (unfold unless_def, auto)
 
 
-subsection{*The Operator @{term initially}*}
+subsection\<open>The Operator @{term initially}\<close>
 
 lemma initially_type: "initially(A) \<subseteq> program"
 by (unfold initially_def, blast)
@@ -494,7 +494,7 @@
 by (unfold initially_def, blast)
 
 
-subsection{*The Operator @{term stable}*}
+subsection\<open>The Operator @{term stable}\<close>
 
 lemma stable_type: "stable(A)\<subseteq>program"
 by (unfold stable_def constrains_def, blast)
@@ -516,7 +516,7 @@
 by (auto simp add: unless_def stable_def)
 
 
-subsection{*Union and Intersection with @{term stable}*}
+subsection\<open>Union and Intersection with @{term stable}\<close>
 
 lemma stable_Un:
     "[| F \<in> stable(A); F \<in> stable(A') |] ==> F \<in> stable(A \<union> A')"
@@ -564,7 +564,7 @@
 (* [| F \<in> stable(C); F  \<in> (C \<inter> A) co A |] ==> F \<in> stable(C \<inter> A) *)
 lemmas stable_constrains_stable = stable_constrains_Int [THEN stableI]
 
-subsection{*The Operator @{term invariant}*}
+subsection\<open>The Operator @{term invariant}\<close>
 
 lemma invariant_type: "invariant(A) \<subseteq> program"
 apply (unfold invariant_def)
@@ -584,8 +584,8 @@
 apply (blast dest: stableD2)
 done
 
-text{*Could also say
-      @{term "invariant(A) \<inter> invariant(B) \<subseteq> invariant (A \<inter> B)"}*}
+text\<open>Could also say
+      @{term "invariant(A) \<inter> invariant(B) \<subseteq> invariant (A \<inter> B)"}\<close>
 lemma invariant_Int:
   "[| F \<in> invariant(A);  F \<in> invariant(B) |] ==> F \<in> invariant(A \<inter> B)"
 apply (unfold invariant_def initially_def)
@@ -593,25 +593,25 @@
 done
 
 
-subsection{*The Elimination Theorem*}
+subsection\<open>The Elimination Theorem\<close>
 
 (** The "free" m has become universally quantified!
  Should the premise be !!m instead of \<forall>m ? Would make it harder
  to use in forward proof. **)
 
-text{*The general case is easier to prove than the special case!*}
+text\<open>The general case is easier to prove than the special case!\<close>
 lemma "elimination":
     "[| \<forall>m \<in> M. F \<in> {s \<in> A. x(s) = m} co B(m); F \<in> program  |]
      ==> F \<in> {s \<in> A. x(s) \<in> M} co (\<Union>m \<in> M. B(m))"
 by (auto simp add: constrains_def st_set_def, blast)
 
-text{*As above, but for the special case of A=state*}
+text\<open>As above, but for the special case of A=state\<close>
 lemma elimination2:
      "[| \<forall>m \<in> M. F \<in> {s \<in> state. x(s) = m} co B(m); F \<in> program  |]
      ==> F:{s \<in> state. x(s) \<in> M} co (\<Union>m \<in> M. B(m))"
 by (rule UNITY.elimination, auto)
 
-subsection{*The Operator @{term strongest_rhs}*}
+subsection\<open>The Operator @{term strongest_rhs}\<close>
 
 lemma constrains_strongest_rhs:
     "[| F \<in> program; st_set(A) |] ==> F \<in> A co (strongest_rhs(F,A))"
@@ -622,9 +622,9 @@
      "[| F \<in> A co B; st_set(B) |] ==> strongest_rhs(F,A) \<subseteq> B"
 by (auto simp add: constrains_def strongest_rhs_def st_set_def)
 
-ML {*
+ML \<open>
 fun simp_of_act def = def RS @{thm def_act_simp};
 fun simp_of_set def = def RS @{thm def_set_simp};
-*}
+\<close>
 
 end
--- a/src/ZF/UNITY/Union.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/Union.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -57,12 +57,12 @@
   "JN x. B"     == "CONST JOIN(CONST state,(%x. B))"
 
 
-subsection{*SKIP*}
+subsection\<open>SKIP\<close>
 
 lemma reachable_SKIP [simp]: "reachable(SKIP) = state"
 by (force elim: reachable.induct intro: reachable.intros)
 
-text{*Elimination programify from ok and Join*}
+text\<open>Elimination programify from ok and Join\<close>
 
 lemma ok_programify_left [iff]: "programify(F) ok G \<longleftrightarrow> F ok G"
 by (simp add: ok_def)
@@ -76,7 +76,7 @@
 lemma Join_programify_right [simp]: "F Join programify(G) = F Join G"
 by (simp add: Join_def)
 
-subsection{*SKIP and safety properties*}
+subsection\<open>SKIP and safety properties\<close>
 
 lemma SKIP_in_constrains_iff [iff]: "(SKIP \<in> A co B) \<longleftrightarrow> (A\<subseteq>B & st_set(A))"
 by (unfold constrains_def st_set_def, auto)
@@ -90,7 +90,7 @@
 lemma SKIP_in_Stable [iff]: "SKIP \<in> Stable(A)"
 by (unfold Stable_def, auto)
 
-subsection{*Join and JOIN types*}
+subsection\<open>Join and JOIN types\<close>
 
 lemma Join_in_program [iff,TC]: "F Join G \<in> program"
 by (unfold Join_def, auto)
@@ -98,7 +98,7 @@
 lemma JOIN_in_program [iff,TC]: "JOIN(I,F) \<in> program"
 by (unfold JOIN_def, auto)
 
-subsection{*Init, Acts, and AllowedActs of Join and JOIN*}
+subsection\<open>Init, Acts, and AllowedActs of Join and JOIN\<close>
 lemma Init_Join [simp]: "Init(F Join G) = Init(F) \<inter> Init(G)"
 by (simp add: Int_assoc Join_def)
 
@@ -110,7 +110,7 @@
 apply (simp add: Int_assoc cons_absorb Join_def)
 done
 
-subsection{*Join's algebraic laws*}
+subsection\<open>Join's algebraic laws\<close>
 
 lemma Join_commute: "F Join G = G Join F"
 by (simp add: Join_def Un_commute Int_commute)
@@ -123,7 +123,7 @@
 lemma Join_assoc: "(F Join G) Join H = F Join (G Join H)"
 by (simp add: Un_ac Join_def cons_absorb Int_assoc Int_Un_distrib2)
 
-subsection{*Needed below*}
+subsection\<open>Needed below\<close>
 lemma cons_id [simp]: "cons(id(state), Pow(state * state)) = Pow(state*state)"
 by auto
 
@@ -143,10 +143,10 @@
 lemma Join_left_absorb: "F Join (F Join G) = F Join G"
 by (simp add: Join_assoc [symmetric])
 
-subsection{*Join is an AC-operator*}
+subsection\<open>Join is an AC-operator\<close>
 lemmas Join_ac = Join_assoc Join_left_absorb Join_commute Join_left_commute
 
-subsection{*Eliminating programify form JN and OK expressions*}
+subsection\<open>Eliminating programify form JN and OK expressions\<close>
 
 lemma OK_programify [iff]: "OK(I, %x. programify(F(x))) \<longleftrightarrow> OK(I, F)"
 by (simp add: OK_def)
@@ -155,7 +155,7 @@
 by (simp add: JOIN_def)
 
 
-subsection{*JN*}
+subsection\<open>JN\<close>
 
 lemma JN_empty [simp]: "JOIN(0, F) = SKIP"
 by (unfold JOIN_def, auto)
@@ -190,7 +190,7 @@
 
 
 
-subsection{*JN laws*}
+subsection\<open>JN laws\<close>
 lemma JN_absorb: "k \<in> I ==>F(k) Join (\<Squnion>i \<in> I. F(i)) = (\<Squnion>i \<in> I. F(i))"
 apply (subst JN_cons [symmetric])
 apply (auto simp add: cons_absorb)
@@ -214,13 +214,13 @@
 lemma JN_Join_miniscope: "(\<Squnion>i \<in> I. F(i) Join G) = ((\<Squnion>i \<in> I. F(i) Join G))"
 by (simp add: JN_Join_distrib JN_constant)
 
-text{*Used to prove guarantees_JN_I*}
+text\<open>Used to prove guarantees_JN_I\<close>
 lemma JN_Join_diff: "i \<in> I==>F(i) Join JOIN(I - {i}, F) = JOIN(I, F)"
 apply (rule program_equalityI)
 apply (auto elim!: not_emptyE)
 done
 
-subsection{*Safety: co, stable, FP*}
+subsection\<open>Safety: co, stable, FP\<close>
 
 
 (*Fails if I=0 because it collapses to SKIP \<in> A co B, i.e. to A\<subseteq>B.  So an
@@ -326,7 +326,7 @@
 lemma FP_JN: "i \<in> I ==> FP(\<Squnion>i \<in> I. F(i)) = (\<Inter>i \<in> I. FP (programify(F(i))))"
 by (auto simp add: FP_def Inter_def st_set_def JN_stable)
 
-subsection{*Progress: transient, ensures*}
+subsection\<open>Progress: transient, ensures\<close>
 
 lemma JN_transient:
      "i \<in> I ==>
@@ -412,7 +412,7 @@
 apply (blast intro: stable_Join_ensures1)
 done
 
-subsection{*The ok and OK relations*}
+subsection\<open>The ok and OK relations\<close>
 
 lemma ok_SKIP1 [iff]: "SKIP ok F"
 by (auto dest: Acts_type [THEN subsetD] simp add: ok_def)
@@ -469,7 +469,7 @@
 done
 
 
-subsection{*Allowed*}
+subsection\<open>Allowed\<close>
 
 lemma Allowed_SKIP [simp]: "Allowed(SKIP) = program"
 by (auto dest: Acts_type [THEN subsetD] simp add: Allowed_def)
@@ -498,7 +498,7 @@
 apply (auto simp add: OK_iff_ok ok_iff_Allowed)
 done
 
-subsection{*safety_prop, for reasoning about given instances of "ok"*}
+subsection\<open>safety_prop, for reasoning about given instances of "ok"\<close>
 
 lemma safety_prop_Acts_iff:
      "safety_prop(X) ==> (Acts(G) \<subseteq> cons(id(state), (\<Union>F \<in> X. Acts(F)))) \<longleftrightarrow> (programify(G) \<in> X)"
--- a/src/ZF/UNITY/WFair.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/UNITY/WFair.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,15 +3,15 @@
     Copyright   1998  University of Cambridge
 *)
 
-section{*Progress under Weak Fairness*}
+section\<open>Progress under Weak Fairness\<close>
 
 theory WFair
 imports UNITY Main_ZFC
 begin
 
-text{*This theory defines the operators transient, ensures and leadsTo,
+text\<open>This theory defines the operators transient, ensures and leadsTo,
 assuming weak fairness. From Misra, "A Logic for Concurrent Programming",
-1994.*}
+1994.\<close>
 
 definition
   (* This definition specifies weak fairness.  The rest of the theory
@@ -286,7 +286,7 @@
 lemma leadsTo_Union_distrib: "(F \<in> \<Union>(S) leadsTo B) \<longleftrightarrow>  (\<forall>A \<in> S. F \<in> A leadsTo B) & F \<in> program & st_set(B)"
 by (blast dest: leadsToD2 intro: leadsTo_Union leadsTo_weaken_L)
 
-text{*Set difference: maybe combine with @{text leadsTo_weaken_L}??*}
+text\<open>Set difference: maybe combine with @{text leadsTo_weaken_L}??\<close>
 lemma leadsTo_Diff:
      "[| F: (A-B) leadsTo C; F \<in> B leadsTo C; st_set(C) |]
       ==> F \<in> A leadsTo C"
@@ -387,9 +387,9 @@
       !!A B. [| F \<in> A ensures B;  P(B); st_set(A); st_set(B) |] ==> P(A);
       !!S. [| \<forall>A \<in> S. P(A); \<forall>A \<in> S. st_set(A) |] ==> P(\<Union>(S))
    |] ==> P(za)"
-txt{*by induction on this formula*}
+txt\<open>by induction on this formula\<close>
 apply (subgoal_tac "P (zb) \<longrightarrow> P (za) ")
-txt{*now solve first subgoal: this formula is sufficient*}
+txt\<open>now solve first subgoal: this formula is sufficient\<close>
 apply (blast intro: leadsTo_refl)
 apply (erule leadsTo_induct)
 apply (blast+)
@@ -421,9 +421,9 @@
 done
 declare leadsTo_empty [simp]
 
-subsection{*PSP: Progress-Safety-Progress*}
+subsection\<open>PSP: Progress-Safety-Progress\<close>
 
-text{*Special case of PSP: Misra's "stable conjunction"*}
+text\<open>Special case of PSP: Misra's "stable conjunction"\<close>
 
 lemma psp_stable:
    "[| F \<in> A leadsTo A'; F \<in> stable(B) |] ==> F:(A \<inter> B) leadsTo (A' \<inter> B)"
@@ -456,9 +456,9 @@
 prefer 2 apply (blast dest!: constrainsD2 leadsToD2)
 apply (erule leadsTo_induct)
 prefer 3 apply (blast intro: leadsTo_Union_Int)
- txt{*Basis case*}
+ txt\<open>Basis case\<close>
  apply (blast intro: psp_ensures leadsTo_Basis)
-txt{*Transitivity case has a delicate argument involving "cancellation"*}
+txt\<open>Transitivity case has a delicate argument involving "cancellation"\<close>
 apply (rule leadsTo_Un_duplicate2)
 apply (erule leadsTo_cancel_Diff1)
 apply (simp add: Int_Diff Diff_triv)
@@ -481,7 +481,7 @@
 done
 
 
-subsection{*Proving the induction rules*}
+subsection\<open>Proving the induction rules\<close>
 
 (** The most general rule \<in> r is any wf relation; f is any variant function **)
 lemma leadsTo_wf_induct_aux: "[| wf(r);
@@ -600,13 +600,13 @@
       ==> \<exists>B \<in> Pow(state). A<=B & F \<in> B leadsTo A' & F \<in> (B-A') co (B \<union> A')"
 apply (frule leadsToD2)
 apply (erule leadsTo_induct)
-  txt{*Basis*}
+  txt\<open>Basis\<close>
   apply (blast dest: ensuresD constrainsD2 st_setD)
- txt{*Trans*}
+ txt\<open>Trans\<close>
  apply clarify
  apply (rule_tac x = "Ba \<union> Bb" in bexI)
  apply (blast intro: leadsTo_123_aux leadsTo_Un_Un leadsTo_cancel1 leadsTo_Un_duplicate, blast)
-txt{*Union*}
+txt\<open>Union\<close>
 apply (clarify dest!: ball_conj_distrib [THEN iffD1])
 apply (subgoal_tac "\<exists>y. y \<in> Pi (S, %A. {Ba \<in> Pow (state) . A<=Ba & F \<in> Ba leadsTo B & F \<in> Ba - B co Ba \<union> B}) ")
 defer 1
@@ -631,7 +631,7 @@
 done
 
 
-subsection{*Completion: Binary and General Finite versions*}
+subsection\<open>Completion: Binary and General Finite versions\<close>
 
 lemma completion_aux: "[| W = wlt(F, (B' \<union> C));
        F \<in> A leadsTo (A' \<union> C);  F \<in> A' co (A' \<union> C);
@@ -657,7 +657,7 @@
 apply (subgoal_tac "A \<inter> B \<subseteq> A \<inter> W")
 prefer 2 apply (blast dest!: leadsTo_subset intro!: subset_refl [THEN Int_mono])
 apply (blast intro: leadsTo_Trans subset_imp_leadsTo)
-txt{*last subgoal*}
+txt\<open>last subgoal\<close>
 apply (rule_tac leadsTo_Un_duplicate2)
 apply (rule_tac leadsTo_Un_Un)
  prefer 2 apply (blast intro: leadsTo_refl)
--- a/src/ZF/Univ.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Univ.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
   But Ind_Syntax.univ refers to the constant "Univ.univ"
 *)
 
-section{*The Cumulative Hierarchy and a Small Universe for Recursive Types*}
+section\<open>The Cumulative Hierarchy and a Small Universe for Recursive Types\<close>
 
 theory Univ imports Epsilon Cardinal begin
 
@@ -37,13 +37,13 @@
     "univ(A) == Vfrom(A,nat)"
 
 
-subsection{*Immediate Consequences of the Definition of @{term "Vfrom(A,i)"}*}
+subsection\<open>Immediate Consequences of the Definition of @{term "Vfrom(A,i)"}\<close>
 
-text{*NOT SUITABLE FOR REWRITING -- RECURSIVE!*}
+text\<open>NOT SUITABLE FOR REWRITING -- RECURSIVE!\<close>
 lemma Vfrom: "Vfrom(A,i) = A \<union> (\<Union>j\<in>i. Pow(Vfrom(A,j)))"
 by (subst Vfrom_def [THEN def_transrec], simp)
 
-subsubsection{* Monotonicity *}
+subsubsection\<open>Monotonicity\<close>
 
 lemma Vfrom_mono [rule_format]:
      "A<=B ==> \<forall>j. i<=j \<longrightarrow> Vfrom(A,i) \<subseteq> Vfrom(B,j)"
@@ -59,7 +59,7 @@
 by (blast dest: Vfrom_mono [OF subset_refl le_imp_subset [OF leI]])
 
 
-subsubsection{* A fundamental equality: Vfrom does not require ordinals! *}
+subsubsection\<open>A fundamental equality: Vfrom does not require ordinals!\<close>
 
 
 
@@ -77,7 +77,7 @@
 apply (subst Vfrom)
 apply (subst Vfrom, rule subset_refl [THEN Un_mono])
 apply (rule UN_least)
-txt{*expand @{text "rank(x1) = (\<Union>y\<in>x1. succ(rank(y)))"} in assumptions*}
+txt\<open>expand @{text "rank(x1) = (\<Union>y\<in>x1. succ(rank(y)))"} in assumptions\<close>
 apply (erule rank [THEN equalityD1, THEN subsetD, THEN UN_E])
 apply (rule subset_trans)
 apply (erule_tac [2] UN_upper)
@@ -94,7 +94,7 @@
 done
 
 
-subsection{* Basic Closure Properties *}
+subsection\<open>Basic Closure Properties\<close>
 
 lemma zero_in_Vfrom: "y:x ==> 0 \<in> Vfrom(A,x)"
 by (subst Vfrom, blast)
@@ -114,7 +114,7 @@
 lemma subset_mem_Vfrom: "a \<subseteq> Vfrom(A,i) ==> a \<in> Vfrom(A,succ(i))"
 by (subst Vfrom, blast)
 
-subsubsection{* Finite sets and ordered pairs *}
+subsubsection\<open>Finite sets and ordered pairs\<close>
 
 lemma singleton_in_Vfrom: "a \<in> Vfrom(A,i) ==> {a} \<in> Vfrom(A,succ(i))"
 by (rule subset_mem_Vfrom, safe)
@@ -135,7 +135,7 @@
 apply (rule Vfrom_mono [OF subset_refl subset_succI])
 done
 
-subsection{* 0, Successor and Limit Equations for @{term Vfrom} *}
+subsection\<open>0, Successor and Limit Equations for @{term Vfrom}\<close>
 
 lemma Vfrom_0: "Vfrom(A,0) = A"
 by (subst Vfrom, blast)
@@ -162,7 +162,7 @@
 lemma Vfrom_Union: "y:X ==> Vfrom(A,\<Union>(X)) = (\<Union>y\<in>X. Vfrom(A,y))"
 apply (subst Vfrom)
 apply (rule equalityI)
-txt{*first inclusion*}
+txt\<open>first inclusion\<close>
 apply (rule Un_least)
 apply (rule A_subset_Vfrom [THEN subset_trans])
 apply (rule UN_upper, assumption)
@@ -171,12 +171,12 @@
 apply (rule subset_trans)
 apply (erule_tac [2] UN_upper,
        subst Vfrom, erule subset_trans [OF UN_upper Un_upper2])
-txt{*opposite inclusion*}
+txt\<open>opposite inclusion\<close>
 apply (rule UN_least)
 apply (subst Vfrom, blast)
 done
 
-subsection{* @{term Vfrom} applied to Limit Ordinals *}
+subsection\<open>@{term Vfrom} applied to Limit Ordinals\<close>
 
 (*NB. limit ordinals are non-empty:
       Vfrom(A,0) = A = A \<union> (\<Union>y\<in>0. Vfrom(A,y)) *)
@@ -209,7 +209,7 @@
 lemmas Vfrom_UnI2 =
     Un_upper2 [THEN subset_refl [THEN Vfrom_mono, THEN subsetD]]
 
-text{*Hard work is finding a single j:i such that {a,b}<=Vfrom(A,j)*}
+text\<open>Hard work is finding a single j:i such that {a,b}<=Vfrom(A,j)\<close>
 lemma doubleton_in_VLimit:
     "[| a \<in> Vfrom(A,i);  b \<in> Vfrom(A,i);  Limit(i) |] ==> {a,b} \<in> Vfrom(A,i)"
 apply (erule Limit_VfromE, assumption)
@@ -220,10 +220,10 @@
 
 lemma Pair_in_VLimit:
     "[| a \<in> Vfrom(A,i);  b \<in> Vfrom(A,i);  Limit(i) |] ==> <a,b> \<in> Vfrom(A,i)"
-txt{*Infer that a, b occur at ordinals x,xa < i.*}
+txt\<open>Infer that a, b occur at ordinals x,xa < i.\<close>
 apply (erule Limit_VfromE, assumption)
 apply (erule Limit_VfromE, assumption)
-txt{*Infer that @{term"succ(succ(x \<union> xa)) < i"} *}
+txt\<open>Infer that @{term"succ(succ(x \<union> xa)) < i"}\<close>
 apply (blast intro: VfromI [OF Pair_in_Vfrom]
                     Vfrom_UnI1 Vfrom_UnI2 Limit_has_succ Un_least_lt)
 done
@@ -240,7 +240,7 @@
 lemma nat_into_VLimit: "[| n: nat;  Limit(i) |] ==> n \<in> Vfrom(A,i)"
 by (blast intro: nat_subset_VLimit [THEN subsetD])
 
-subsubsection{* Closure under Disjoint Union *}
+subsubsection\<open>Closure under Disjoint Union\<close>
 
 lemmas zero_in_VLimit = Limit_has_0 [THEN ltD, THEN zero_in_Vfrom]
 
@@ -266,7 +266,7 @@
 
 
 
-subsection{* Properties assuming @{term "Transset(A)"} *}
+subsection\<open>Properties assuming @{term "Transset(A)"}\<close>
 
 lemma Transset_Vfrom: "Transset(A) ==> Transset(Vfrom(A,i))"
 apply (rule_tac a=i in eps_induct)
@@ -313,13 +313,13 @@
      and i is a limit ordinal
 ***)
 
-text{*General theorem for membership in Vfrom(A,i) when i is a limit ordinal*}
+text\<open>General theorem for membership in Vfrom(A,i) when i is a limit ordinal\<close>
 lemma in_VLimit:
   "[| a \<in> Vfrom(A,i);  b \<in> Vfrom(A,i);  Limit(i);
       !!x y j. [| j<i; 1:j; x \<in> Vfrom(A,j); y \<in> Vfrom(A,j) |]
                ==> \<exists>k. h(x,y) \<in> Vfrom(A,k) & k<i |]
    ==> h(a,b) \<in> Vfrom(A,i)"
-txt{*Infer that a, b occur at ordinals x,xa < i.*}
+txt\<open>Infer that a, b occur at ordinals x,xa < i.\<close>
 apply (erule Limit_VfromE, assumption)
 apply (erule Limit_VfromE, assumption, atomize)
 apply (drule_tac x=a in spec)
@@ -329,7 +329,7 @@
 apply (blast intro: Limit_has_0 Limit_has_succ VfromI)
 done
 
-subsubsection{* Products *}
+subsubsection\<open>Products\<close>
 
 lemma prod_in_Vfrom:
     "[| a \<in> Vfrom(A,j);  b \<in> Vfrom(A,j);  Transset(A) |]
@@ -347,7 +347,7 @@
 apply (blast intro: prod_in_Vfrom Limit_has_succ)
 done
 
-subsubsection{* Disjoint Sums, or Quine Ordered Pairs *}
+subsubsection\<open>Disjoint Sums, or Quine Ordered Pairs\<close>
 
 lemma sum_in_Vfrom:
     "[| a \<in> Vfrom(A,j);  b \<in> Vfrom(A,j);  Transset(A);  1:j |]
@@ -366,7 +366,7 @@
 apply (blast intro: sum_in_Vfrom Limit_has_succ)
 done
 
-subsubsection{* Function Space! *}
+subsubsection\<open>Function Space!\<close>
 
 lemma fun_in_Vfrom:
     "[| a \<in> Vfrom(A,j);  b \<in> Vfrom(A,j);  Transset(A) |] ==>
@@ -404,7 +404,7 @@
 by (blast elim: Limit_VfromE intro: Limit_has_succ Pow_in_Vfrom VfromI)
 
 
-subsection{* The Set @{term "Vset(i)"} *}
+subsection\<open>The Set @{term "Vset(i)"}\<close>
 
 lemma Vset: "Vset(i) = (\<Union>j\<in>i. Pow(Vset(j)))"
 by (subst Vfrom, blast)
@@ -412,7 +412,7 @@
 lemmas Vset_succ = Transset_0 [THEN Transset_Vfrom_succ]
 lemmas Transset_Vset = Transset_0 [THEN Transset_Vfrom]
 
-subsubsection{* Characterisation of the elements of @{term "Vset(i)"} *}
+subsubsection\<open>Characterisation of the elements of @{term "Vset(i)"}\<close>
 
 lemma VsetD [rule_format]: "Ord(i) ==> \<forall>b. b \<in> Vset(i) \<longrightarrow> rank(b) < i"
 apply (erule trans_induct)
@@ -432,7 +432,7 @@
 lemma VsetI: "rank(x)<i ==> x \<in> Vset(i)"
 by (blast intro: VsetI_lemma elim: ltE)
 
-text{*Merely a lemma for the next result*}
+text\<open>Merely a lemma for the next result\<close>
 lemma Vset_Ord_rank_iff: "Ord(i) ==> b \<in> Vset(i) \<longleftrightarrow> rank(b) < i"
 by (blast intro: VsetD VsetI)
 
@@ -441,7 +441,7 @@
 apply (rule Ord_rank [THEN Vset_Ord_rank_iff])
 done
 
-text{*This is rank(rank(a)) = rank(a) *}
+text\<open>This is rank(rank(a)) = rank(a)\<close>
 declare Ord_rank [THEN rank_of_Ord, simp]
 
 lemma rank_Vset: "Ord(i) ==> rank(Vset(i)) = i"
@@ -459,7 +459,7 @@
 apply (simp add: Vset_succ)
 done
 
-subsubsection{* Reasoning about Sets in Terms of Their Elements' Ranks *}
+subsubsection\<open>Reasoning about Sets in Terms of Their Elements' Ranks\<close>
 
 lemma arg_subset_Vset_rank: "a \<subseteq> Vset(rank(a))"
 apply (rule subsetI)
@@ -473,7 +473,7 @@
 apply (blast intro: Ord_rank)
 done
 
-subsubsection{* Set Up an Environment for Simplification *}
+subsubsection\<open>Set Up an Environment for Simplification\<close>
 
 lemma rank_Inl: "rank(a) < rank(Inl(a))"
 apply (unfold Inl_def)
@@ -487,16 +487,16 @@
 
 lemmas rank_rls = rank_Inl rank_Inr rank_pair1 rank_pair2
 
-subsubsection{* Recursion over Vset Levels! *}
+subsubsection\<open>Recursion over Vset Levels!\<close>
 
-text{*NOT SUITABLE FOR REWRITING: recursive!*}
+text\<open>NOT SUITABLE FOR REWRITING: recursive!\<close>
 lemma Vrec: "Vrec(a,H) = H(a, \<lambda>x\<in>Vset(rank(a)). Vrec(x,H))"
 apply (unfold Vrec_def)
 apply (subst transrec, simp)
 apply (rule refl [THEN lam_cong, THEN subst_context], simp add: lt_def)
 done
 
-text{*This form avoids giant explosions in proofs.  NOTE USE OF == *}
+text\<open>This form avoids giant explosions in proofs.  NOTE USE OF ==\<close>
 lemma def_Vrec:
     "[| !!x. h(x)==Vrec(x,H) |] ==>
      h(a) = H(a, \<lambda>x\<in>Vset(rank(a)). h(x))"
@@ -504,7 +504,7 @@
 apply (rule Vrec)
 done
 
-text{*NOT SUITABLE FOR REWRITING: recursive!*}
+text\<open>NOT SUITABLE FOR REWRITING: recursive!\<close>
 lemma Vrecursor:
      "Vrecursor(H,a) = H(\<lambda>x\<in>Vset(rank(a)). Vrecursor(H,x),  a)"
 apply (unfold Vrecursor_def)
@@ -512,7 +512,7 @@
 apply (rule refl [THEN lam_cong, THEN subst_context], simp add: lt_def)
 done
 
-text{*This form avoids giant explosions in proofs.  NOTE USE OF == *}
+text\<open>This form avoids giant explosions in proofs.  NOTE USE OF ==\<close>
 lemma def_Vrecursor:
      "h == Vrecursor(H) ==> h(a) = H(\<lambda>x\<in>Vset(rank(a)). h(x),  a)"
 apply simp
@@ -520,7 +520,7 @@
 done
 
 
-subsection{* The Datatype Universe: @{term "univ(A)"} *}
+subsection\<open>The Datatype Universe: @{term "univ(A)"}\<close>
 
 lemma univ_mono: "A<=B ==> univ(A) \<subseteq> univ(B)"
 apply (unfold univ_def)
@@ -533,7 +533,7 @@
 apply (erule Transset_Vfrom)
 done
 
-subsubsection{* The Set @{term"univ(A)"} as a Limit *}
+subsubsection\<open>The Set @{term"univ(A)"} as a Limit\<close>
 
 lemma univ_eq_UN: "univ(A) = (\<Union>i\<in>nat. Vfrom(A,i))"
 apply (unfold univ_def)
@@ -564,7 +564,7 @@
 apply (blast elim: equalityCE)
 done
 
-subsection{* Closure Properties for @{term "univ(A)"}*}
+subsection\<open>Closure Properties for @{term "univ(A)"}\<close>
 
 lemma zero_in_univ: "0 \<in> univ(A)"
 apply (unfold univ_def)
@@ -581,7 +581,7 @@
 
 lemmas A_into_univ = A_subset_univ [THEN subsetD]
 
-subsubsection{* Closure under Unordered and Ordered Pairs *}
+subsubsection\<open>Closure under Unordered and Ordered Pairs\<close>
 
 lemma singleton_in_univ: "a: univ(A) ==> {a} \<in> univ(A)"
 apply (unfold univ_def)
@@ -612,24 +612,24 @@
 done
 
 
-subsubsection{* The Natural Numbers *}
+subsubsection\<open>The Natural Numbers\<close>
 
 lemma nat_subset_univ: "nat \<subseteq> univ(A)"
 apply (unfold univ_def)
 apply (rule i_subset_Vfrom)
 done
 
-text{* n:nat ==> n:univ(A) *}
+text\<open>n:nat ==> n:univ(A)\<close>
 lemmas nat_into_univ = nat_subset_univ [THEN subsetD]
 
-subsubsection{* Instances for 1 and 2 *}
+subsubsection\<open>Instances for 1 and 2\<close>
 
 lemma one_in_univ: "1 \<in> univ(A)"
 apply (unfold univ_def)
 apply (rule Limit_nat [THEN one_in_VLimit])
 done
 
-text{*unused!*}
+text\<open>unused!\<close>
 lemma two_in_univ: "2 \<in> univ(A)"
 by (blast intro: nat_into_univ)
 
@@ -641,7 +641,7 @@
 lemmas bool_into_univ = bool_subset_univ [THEN subsetD]
 
 
-subsubsection{* Closure under Disjoint Union *}
+subsubsection\<open>Closure under Disjoint Union\<close>
 
 lemma Inl_in_univ: "a: univ(A) ==> Inl(a) \<in> univ(A)"
 apply (unfold univ_def)
@@ -672,9 +672,9 @@
   Closure under RepFun -- use   RepFun_subset *)
 
 
-subsection{* Finite Branching Closure Properties *}
+subsection\<open>Finite Branching Closure Properties\<close>
 
-subsubsection{* Closure under Finite Powerset *}
+subsubsection\<open>Closure under Finite Powerset\<close>
 
 lemma Fin_Vfrom_lemma:
      "[| b: Fin(Vfrom(A,i));  Limit(i) |] ==> \<exists>j. b \<subseteq> Vfrom(A,j) & j<i"
@@ -698,7 +698,7 @@
 apply (rule Limit_nat [THEN Fin_VLimit])
 done
 
-subsubsection{* Closure under Finite Powers: Functions from a Natural Number *}
+subsubsection\<open>Closure under Finite Powers: Functions from a Natural Number\<close>
 
 lemma nat_fun_VLimit:
      "[| n: nat;  Limit(i) |] ==> n -> Vfrom(A,i) \<subseteq> Vfrom(A,i)"
@@ -715,9 +715,9 @@
 done
 
 
-subsubsection{* Closure under Finite Function Space *}
+subsubsection\<open>Closure under Finite Function Space\<close>
 
-text{*General but seldom-used version; normally the domain is fixed*}
+text\<open>General but seldom-used version; normally the domain is fixed\<close>
 lemma FiniteFun_VLimit1:
      "Limit(i) ==> Vfrom(A,i) -||> Vfrom(A,i) \<subseteq> Vfrom(A,i)"
 apply (rule FiniteFun.dom_subset [THEN subset_trans])
@@ -730,7 +730,7 @@
 apply (rule Limit_nat [THEN FiniteFun_VLimit1])
 done
 
-text{*Version for a fixed domain*}
+text\<open>Version for a fixed domain\<close>
 lemma FiniteFun_VLimit:
      "[| W \<subseteq> Vfrom(A,i); Limit(i) |] ==> W -||> Vfrom(A,i) \<subseteq> Vfrom(A,i)"
 apply (rule subset_trans)
@@ -748,22 +748,22 @@
      "[| f: W -||> univ(A);  W \<subseteq> univ(A) |] ==> f \<in> univ(A)"
 by (erule FiniteFun_univ [THEN subsetD], assumption)
 
-text{*Remove @{text "\<subseteq>"} from the rule above*}
+text\<open>Remove @{text "\<subseteq>"} from the rule above\<close>
 lemmas FiniteFun_in_univ' = FiniteFun_in_univ [OF _ subsetI]
 
 
-subsection{** For QUniv.  Properties of Vfrom analogous to the "take-lemma" **}
+subsection\<open>* For QUniv.  Properties of Vfrom analogous to the "take-lemma" *\<close>
 
-text{* Intersecting a*b with Vfrom... *}
+text\<open>Intersecting a*b with Vfrom...\<close>
 
-text{*This version says a, b exist one level down, in the smaller set Vfrom(X,i)*}
+text\<open>This version says a, b exist one level down, in the smaller set Vfrom(X,i)\<close>
 lemma doubleton_in_Vfrom_D:
      "[| {a,b} \<in> Vfrom(X,succ(i));  Transset(X) |]
       ==> a \<in> Vfrom(X,i)  &  b \<in> Vfrom(X,i)"
 by (drule Transset_Vfrom_succ [THEN equalityD1, THEN subsetD, THEN PowD],
     assumption, fast)
 
-text{*This weaker version says a, b exist at the same level*}
+text\<open>This weaker version says a, b exist at the same level\<close>
 lemmas Vfrom_doubleton_D = Transset_Vfrom [THEN Transset_doubleton_D]
 
 (** Using only the weaker theorem would prove <a,b> \<in> Vfrom(X,i)
@@ -788,10 +788,10 @@
 
 
 ML
-{*
+\<open>
 val rank_ss =
   simpset_of (@{context} addsimps [@{thm VsetI}]
     addsimps @{thms rank_rls} @ (@{thms rank_rls} RLN (2, [@{thm lt_trans}])));
-*}
+\<close>
 
 end
--- a/src/ZF/WF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/WF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -14,7 +14,7 @@
 a mess.
 *)
 
-section{*Well-Founded Recursion*}
+section\<open>Well-Founded Recursion\<close>
 
 theory WF imports Trancl begin
 
@@ -50,9 +50,9 @@
     "wfrec[A](r,a,H) == wfrec(r \<inter> A*A, a, H)"
 
 
-subsection{*Well-Founded Relations*}
+subsection\<open>Well-Founded Relations\<close>
 
-subsubsection{*Equivalences between @{term wf} and @{term wf_on}*}
+subsubsection\<open>Equivalences between @{term wf} and @{term wf_on}\<close>
 
 lemma wf_imp_wf_on: "wf(r) ==> wf[A](r)"
 by (unfold wf_def wf_on_def, force)
@@ -75,10 +75,10 @@
 lemma wf_subset: "[|wf(s); r<=s|] ==> wf(r)"
 by (simp add: wf_def, fast)
 
-subsubsection{*Introduction Rules for @{term wf_on}*}
+subsubsection\<open>Introduction Rules for @{term wf_on}\<close>
 
-text{*If every non-empty subset of @{term A} has an @{term r}-minimal element
-   then we have @{term "wf[A](r)"}.*}
+text\<open>If every non-empty subset of @{term A} has an @{term r}-minimal element
+   then we have @{term "wf[A](r)"}.\<close>
 lemma wf_onI:
  assumes prem: "!!Z u. [| Z<=A;  u \<in> Z;  \<forall>x\<in>Z. \<exists>y\<in>Z. <y,x>:r |] ==> False"
  shows         "wf[A](r)"
@@ -87,9 +87,9 @@
 apply (rule_tac Z = Z in prem, blast+)
 done
 
-text{*If @{term r} allows well-founded induction over @{term A}
+text\<open>If @{term r} allows well-founded induction over @{term A}
    then we have @{term "wf[A](r)"}.   Premise is equivalent to
-  @{prop "!!B. \<forall>x\<in>A. (\<forall>y. <y,x>: r \<longrightarrow> y \<in> B) \<longrightarrow> x \<in> B ==> A<=B"} *}
+  @{prop "!!B. \<forall>x\<in>A. (\<forall>y. <y,x>: r \<longrightarrow> y \<in> B) \<longrightarrow> x \<in> B ==> A<=B"}\<close>
 lemma wf_onI2:
  assumes prem: "!!y B. [| \<forall>x\<in>A. (\<forall>y\<in>A. <y,x>:r \<longrightarrow> y \<in> B) \<longrightarrow> x \<in> B;   y \<in> A |]
                        ==> y \<in> B"
@@ -101,10 +101,10 @@
 done
 
 
-subsubsection{*Well-founded Induction*}
+subsubsection\<open>Well-founded Induction\<close>
 
-text{*Consider the least @{term z} in @{term "domain(r)"} such that
-  @{term "P(z)"} does not hold...*}
+text\<open>Consider the least @{term z} in @{term "domain(r)"} such that
+  @{term "P(z)"} does not hold...\<close>
 lemma wf_induct_raw:
     "[| wf(r);
         !!x.[| \<forall>y. <y,x>: r \<longrightarrow> P(y) |] ==> P(x) |]
@@ -116,7 +116,7 @@
 
 lemmas wf_induct = wf_induct_raw [rule_format, consumes 1, case_names step, induct set: wf]
 
-text{*The form of this rule is designed to match @{text wfI}*}
+text\<open>The form of this rule is designed to match @{text wfI}\<close>
 lemma wf_induct2:
     "[| wf(r);  a \<in> A;  field(r)<=A;
         !!x.[| x \<in> A;  \<forall>y. <y,x>: r \<longrightarrow> P(y) |] ==> P(x) |]
@@ -141,8 +141,8 @@
   wf_on_induct_raw [rule_format, consumes 2, case_names step, induct set: wf_on]
 
 
-text{*If @{term r} allows well-founded induction
-   then we have @{term "wf(r)"}.*}
+text\<open>If @{term r} allows well-founded induction
+   then we have @{term "wf(r)"}.\<close>
 lemma wfI:
     "[| field(r)<=A;
         !!y B. [| \<forall>x\<in>A. (\<forall>y\<in>A. <y,x>:r \<longrightarrow> y \<in> B) \<longrightarrow> x \<in> B;  y \<in> A|]
@@ -155,7 +155,7 @@
 done
 
 
-subsection{*Basic Properties of Well-Founded Relations*}
+subsection\<open>Basic Properties of Well-Founded Relations\<close>
 
 lemma wf_not_refl: "wf(r) ==> <a,a> \<notin> r"
 by (erule_tac a=a in wf_induct, blast)
@@ -191,8 +191,8 @@
 
 
 
-text{*transitive closure of a WF relation is WF provided
-  @{term A} is downward closed*}
+text\<open>transitive closure of a WF relation is WF provided
+  @{term A} is downward closed\<close>
 lemma wf_on_trancl:
     "[| wf[A](r);  r-``A \<subseteq> A |] ==> wf[A](r^+)"
 apply (rule wf_onI2)
@@ -210,13 +210,13 @@
 done
 
 
-text{*@{term "r-``{a}"} is the set of everything under @{term a} in @{term r}*}
+text\<open>@{term "r-``{a}"} is the set of everything under @{term a} in @{term r}\<close>
 
 lemmas underI = vimage_singleton_iff [THEN iffD2]
 lemmas underD = vimage_singleton_iff [THEN iffD1]
 
 
-subsection{*The Predicate @{term is_recfun}*}
+subsection\<open>The Predicate @{term is_recfun}\<close>
 
 lemma is_recfun_type: "is_recfun(r,a,H,f) ==> f \<in> r-``{a} -> range(f)"
 apply (unfold is_recfun_def)
@@ -229,7 +229,7 @@
 lemma apply_recfun:
     "[| is_recfun(r,a,H,f); <x,a>:r |] ==> f`x = H(x, restrict(f,r-``{x}))"
 apply (unfold is_recfun_def)
-  txt{*replace f only on the left-hand side*}
+  txt\<open>replace f only on the left-hand side\<close>
 apply (erule_tac P = "%x. t(x) = u" for t u in ssubst)
 apply (simp add: underI)
 done
@@ -262,7 +262,7 @@
 apply (blast dest: transD intro: is_recfun_equal)
 done
 
-subsection{*Recursion: Main Existence Lemma*}
+subsection\<open>Recursion: Main Existence Lemma\<close>
 
 lemma is_recfun_functional:
      "[| wf(r); trans(r); is_recfun(r,a,H,f); is_recfun(r,a,H,g) |]  ==>  f=g"
@@ -287,7 +287,7 @@
 apply (rule_tac f = "\<lambda>y\<in>r-``{a1}. wftrec (r,y,H)" in is_the_recfun)
   apply typecheck
 apply (unfold is_recfun_def wftrec_def)
-  --{*Applying the substitution: must keep the quantified assumption!*}
+  --\<open>Applying the substitution: must keep the quantified assumption!\<close>
 apply (rule lam_cong [OF refl])
 apply (drule underD)
 apply (fold is_recfun_def)
@@ -307,7 +307,7 @@
 done
 
 
-subsection{*Unfolding @{term "wftrec(r,a,H)"}*}
+subsection\<open>Unfolding @{term "wftrec(r,a,H)"}\<close>
 
 lemma the_recfun_cut:
      "[| wf(r);  trans(r);  <b,a>:r |]
@@ -324,7 +324,7 @@
 done
 
 
-subsubsection{*Removal of the Premise @{term "trans(r)"}*}
+subsubsection\<open>Removal of the Premise @{term "trans(r)"}\<close>
 
 (*NOT SUITABLE FOR REWRITING: it is recursive!*)
 lemma wfrec:
@@ -363,7 +363,7 @@
 apply (simp add: vimage_Int_square cons_subset_iff)
 done
 
-text{*Minimal-element characterization of well-foundedness*}
+text\<open>Minimal-element characterization of well-foundedness\<close>
 lemma wf_eq_minimal:
      "wf(r) \<longleftrightarrow> (\<forall>Q x. x \<in> Q \<longrightarrow> (\<exists>z\<in>Q. \<forall>y. <y,z>:r \<longrightarrow> y\<notin>Q))"
 by (unfold wf_def, blast)
--- a/src/ZF/ZF.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ZF.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1993  University of Cambridge
 *)
 
-section{*Zermelo-Fraenkel Set Theory*}
+section\<open>Zermelo-Fraenkel Set Theory\<close>
 
 theory ZF
 imports "~~/src/FOL/FOL"
@@ -15,27 +15,27 @@
 instance i :: "term" ..
 
 axiomatization
-  zero :: "i"  ("0")   --{*the empty set*}  and
-  Pow :: "i => i"  --{*power sets*}  and
-  Inf :: "i"  --{*infinite set*}
+  zero :: "i"  ("0")   --\<open>the empty set\<close>  and
+  Pow :: "i => i"  --\<open>power sets\<close>  and
+  Inf :: "i"  --\<open>infinite set\<close>
 
-text {*Bounded Quantifiers *}
+text \<open>Bounded Quantifiers\<close>
 consts
   Ball   :: "[i, i => o] => o"
   Bex   :: "[i, i => o] => o"
 
-text {*General Union and Intersection *}
+text \<open>General Union and Intersection\<close>
 axiomatization Union :: "i => i"
 consts Inter :: "i => i"
 
-text {*Variations on Replacement *}
+text \<open>Variations on Replacement\<close>
 axiomatization PrimReplace :: "[i, [i, i] => o] => i"
 consts
   Replace     :: "[i, [i, i] => o] => i"
   RepFun      :: "[i, i => i] => i"
   Collect     :: "[i, i => o] => i"
 
-text{*Definite descriptions -- via Replace over the set "1"*}
+text\<open>Definite descriptions -- via Replace over the set "1"\<close>
 consts
   The         :: "(i => o) => i"      (binder "THE " 10)
   If          :: "[o, i, i] => i"     ("(if (_)/ then (_)/ else (_))" [10] 10)
@@ -45,59 +45,59 @@
   "if(P,a,b) == If(P,a,b)"
 
 
-text {*Finite Sets *}
+text \<open>Finite Sets\<close>
 consts
   Upair :: "[i, i] => i"
   cons  :: "[i, i] => i"
   succ  :: "i => i"
 
-text {*Ordered Pairing *}
+text \<open>Ordered Pairing\<close>
 consts
   Pair  :: "[i, i] => i"
   fst   :: "i => i"
   snd   :: "i => i"
-  split :: "[[i, i] => 'a, i] => 'a::{}"  --{*for pattern-matching*}
+  split :: "[[i, i] => 'a, i] => 'a::{}"  --\<open>for pattern-matching\<close>
 
-text {*Sigma and Pi Operators *}
+text \<open>Sigma and Pi Operators\<close>
 consts
   Sigma :: "[i, i => i] => i"
   Pi    :: "[i, i => i] => i"
 
-text {*Relations and Functions *}
+text \<open>Relations and Functions\<close>
 consts
   "domain"    :: "i => i"
   range       :: "i => i"
   field       :: "i => i"
   converse    :: "i => i"
-  relation    :: "i => o"        --{*recognizes sets of pairs*}
-  "function"  :: "i => o"        --{*recognizes functions; can have non-pairs*}
+  relation    :: "i => o"        --\<open>recognizes sets of pairs\<close>
+  "function"  :: "i => o"        --\<open>recognizes functions; can have non-pairs\<close>
   Lambda      :: "[i, i => i] => i"
   restrict    :: "[i, i] => i"
 
-text {*Infixes in order of decreasing precedence *}
+text \<open>Infixes in order of decreasing precedence\<close>
 consts
 
-  Image       :: "[i, i] => i"    (infixl "``" 90) --{*image*}
-  vimage      :: "[i, i] => i"    (infixl "-``" 90) --{*inverse image*}
-  "apply"     :: "[i, i] => i"    (infixl "`" 90) --{*function application*}
-  "Int"       :: "[i, i] => i"    (infixl "Int" 70) --{*binary intersection*}
-  "Un"        :: "[i, i] => i"    (infixl "Un" 65) --{*binary union*}
-  Diff        :: "[i, i] => i"    (infixl "-" 65) --{*set difference*}
-  Subset      :: "[i, i] => o"    (infixl "<=" 50) --{*subset relation*}
+  Image       :: "[i, i] => i"    (infixl "``" 90) --\<open>image\<close>
+  vimage      :: "[i, i] => i"    (infixl "-``" 90) --\<open>inverse image\<close>
+  "apply"     :: "[i, i] => i"    (infixl "`" 90) --\<open>function application\<close>
+  "Int"       :: "[i, i] => i"    (infixl "Int" 70) --\<open>binary intersection\<close>
+  "Un"        :: "[i, i] => i"    (infixl "Un" 65) --\<open>binary union\<close>
+  Diff        :: "[i, i] => i"    (infixl "-" 65) --\<open>set difference\<close>
+  Subset      :: "[i, i] => o"    (infixl "<=" 50) --\<open>subset relation\<close>
 
 axiomatization
-  mem         :: "[i, i] => o"    (infixl ":" 50) --{*membership relation*}
+  mem         :: "[i, i] => o"    (infixl ":" 50) --\<open>membership relation\<close>
 
 abbreviation
-  not_mem :: "[i, i] => o"  (infixl "~:" 50)  --{*negated membership relation*}
+  not_mem :: "[i, i] => o"  (infixl "~:" 50)  --\<open>negated membership relation\<close>
   where "x ~: y == ~ (x : y)"
 
 abbreviation
-  cart_prod :: "[i, i] => i"    (infixr "*" 80) --{*Cartesian product*}
+  cart_prod :: "[i, i] => i"    (infixr "*" 80) --\<open>Cartesian product\<close>
   where "A * B == Sigma(A, %_. B)"
 
 abbreviation
-  function_space :: "[i, i] => i"  (infixr "->" 60) --{*function space*}
+  function_space :: "[i, i] => i"  (infixr "->" 60) --\<open>function space\<close>
   where "A -> B == Pi(A, %_. B)"
 
 
@@ -290,14 +290,14 @@
   restrict_def: "restrict(r,A) == {z \<in> r. \<exists>x\<in>A. \<exists>y. z = <x,y>}"
 
 
-subsection {* Substitution*}
+subsection \<open>Substitution\<close>
 
 (*Useful examples:  singletonI RS subst_elem,  subst_elem RSN (2,IntI) *)
 lemma subst_elem: "[| b\<in>A;  a=b |] ==> a\<in>A"
 by (erule ssubst, assumption)
 
 
-subsection{*Bounded universal quantifier*}
+subsection\<open>Bounded universal quantifier\<close>
 
 lemma ballI [intro!]: "[| !!x. x\<in>A ==> P(x) |] ==> \<forall>x\<in>A. P(x)"
 by (simp add: Ball_def)
@@ -336,7 +336,7 @@
   and [symmetric, defn] = atomize_ball
 
 
-subsection{*Bounded existential quantifier*}
+subsection\<open>Bounded existential quantifier\<close>
 
 lemma bexI [intro]: "[| P(x);  x: A |] ==> \<exists>x\<in>A. P(x)"
 by (simp add: Bex_def, blast)
@@ -363,7 +363,7 @@
 
 
 
-subsection{*Rules for subsets*}
+subsection\<open>Rules for subsets\<close>
 
 lemma subsetI [intro!]:
     "(!!x. x\<in>A ==> x\<in>B) ==> A \<subseteq> B"
@@ -403,11 +403,11 @@
 apply (rule iff_refl)
 done
 
-text{*For calculations*}
+text\<open>For calculations\<close>
 declare subsetD [trans] rev_subsetD [trans] subset_trans [trans]
 
 
-subsection{*Rules for equality*}
+subsection\<open>Rules for equality\<close>
 
 (*Anti-symmetry of the subset relation*)
 lemma equalityI [intro]: "[| A \<subseteq> B;  B \<subseteq> A |] ==> A = B"
@@ -432,7 +432,7 @@
   by auto
 
 
-subsection{*Rules for Replace -- the derived form of replacement*}
+subsection\<open>Rules for Replace -- the derived form of replacement\<close>
 
 lemma Replace_iff:
     "b \<in> {y. x\<in>A, P(x,y)}  <->  (\<exists>x\<in>A. P(x,b) & (\<forall>y. P(x,y) \<longrightarrow> y=b))"
@@ -468,7 +468,7 @@
 done
 
 
-subsection{*Rules for RepFun*}
+subsection\<open>Rules for RepFun\<close>
 
 lemma RepFunI: "a \<in> A ==> f(a) \<in> {f(x). x\<in>A}"
 by (simp add: RepFun_def Replace_iff, blast)
@@ -496,7 +496,7 @@
 by blast
 
 
-subsection{*Rules for Collect -- forming a subset by separation*}
+subsection\<open>Rules for Collect -- forming a subset by separation\<close>
 
 (*Separation is derivable from Replacement*)
 lemma separation [simp]: "a \<in> {x\<in>A. P(x)} <-> a\<in>A & P(a)"
@@ -520,7 +520,7 @@
 by (simp add: Collect_def)
 
 
-subsection{*Rules for Unions*}
+subsection\<open>Rules for Unions\<close>
 
 declare Union_iff [simp]
 
@@ -532,7 +532,7 @@
 by (simp, blast)
 
 
-subsection{*Rules for Unions of families*}
+subsection\<open>Rules for Unions of families\<close>
 (* @{term"\<Union>x\<in>A. B(x)"} abbreviates @{term"\<Union>({B(x). x\<in>A})"} *)
 
 lemma UN_iff [simp]: "b \<in> (\<Union>x\<in>A. B(x)) <-> (\<exists>x\<in>A. b \<in> B(x))"
@@ -559,7 +559,7 @@
   the search space.*)
 
 
-subsection{*Rules for the empty set*}
+subsection\<open>Rules for the empty set\<close>
 
 (*The set @{term"{x\<in>0. False}"} is empty; by foundation it equals 0
   See Suppes, page 21.*)
@@ -589,7 +589,7 @@
 by blast
 
 
-subsection{*Rules for Inter*}
+subsection\<open>Rules for Inter\<close>
 
 (*Not obviously useful for proving InterI, InterD, InterE*)
 lemma Inter_iff: "A \<in> \<Inter>(C) <-> (\<forall>x\<in>C. A: x) & C\<noteq>0"
@@ -611,7 +611,7 @@
 by (simp add: Inter_def, blast)
 
 
-subsection{*Rules for Intersections of families*}
+subsection\<open>Rules for Intersections of families\<close>
 
 (* @{term"\<Inter>x\<in>A. B(x)"} abbreviates @{term"\<Inter>({B(x). x\<in>A})"} *)
 
@@ -631,7 +631,7 @@
 (*No "Addcongs [INT_cong]" because @{term\<Inter>} is a combination of constants*)
 
 
-subsection{*Rules for Powersets*}
+subsection\<open>Rules for Powersets\<close>
 
 lemma PowI: "A \<subseteq> B ==> A \<in> Pow(B)"
 by (erule Pow_iff [THEN iffD2])
@@ -641,11 +641,11 @@
 
 declare Pow_iff [iff]
 
-lemmas Pow_bottom = empty_subsetI [THEN PowI]    --{* @{term"0 \<in> Pow(B)"} *}
-lemmas Pow_top = subset_refl [THEN PowI]         --{* @{term"A \<in> Pow(A)"} *}
+lemmas Pow_bottom = empty_subsetI [THEN PowI]    --\<open>@{term"0 \<in> Pow(B)"}\<close>
+lemmas Pow_top = subset_refl [THEN PowI]         --\<open>@{term"A \<in> Pow(A)"}\<close>
 
 
-subsection{*Cantor's Theorem: There is no surjection from a set to its powerset.*}
+subsection\<open>Cantor's Theorem: There is no surjection from a set to its powerset.\<close>
 
 (*The search is undirected.  Allowing redundant introduction rules may
   make it diverge.  Variable b represents ANY map, such as
--- a/src/ZF/Zorn.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/Zorn.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,12 +3,12 @@
     Copyright   1994  University of Cambridge
 *)
 
-section{*Zorn's Lemma*}
+section\<open>Zorn's Lemma\<close>
 
 theory Zorn imports OrderArith AC Inductive_ZF begin
 
-text{*Based upon the unpublished article ``Towards the Mechanization of the
-Proofs of Some Classical Theorems of Set Theory,'' by Abrial and Laffitte.*}
+text\<open>Based upon the unpublished article ``Towards the Mechanization of the
+Proofs of Some Classical Theorems of Set Theory,'' by Abrial and Laffitte.\<close>
 
 definition
   Subset_rel :: "i=>i"  where
@@ -31,16 +31,16 @@
     "increasing(A) == {f \<in> Pow(A)->Pow(A). \<forall>x. x<=A \<longrightarrow> x<=f`x}"
 
 
-text{*Lemma for the inductive definition below*}
+text\<open>Lemma for the inductive definition below\<close>
 lemma Union_in_Pow: "Y \<in> Pow(Pow(A)) ==> \<Union>(Y) \<in> Pow(A)"
 by blast
 
 
-text{*We could make the inductive definition conditional on
+text\<open>We could make the inductive definition conditional on
     @{term "next \<in> increasing(S)"}
     but instead we make this a side-condition of an introduction rule.  Thus
     the induction rule lets us assume that condition!  Many inductive proofs
-    are therefore unconditional.*}
+    are therefore unconditional.\<close>
 consts
   "TFin" :: "[i,i]=>i"
 
@@ -57,7 +57,7 @@
   type_intros   CollectD1 [THEN apply_funtype] Union_in_Pow
 
 
-subsection{*Mathematical Preamble *}
+subsection\<open>Mathematical Preamble\<close>
 
 lemma Union_lemma0: "(\<forall>x\<in>C. x<=A | B<=x) ==> \<Union>(C)<=A | B<=\<Union>(C)"
 by blast
@@ -67,7 +67,7 @@
 by blast
 
 
-subsection{*The Transfinite Construction *}
+subsection\<open>The Transfinite Construction\<close>
 
 lemma increasingD1: "f \<in> increasing(A) ==> f \<in> Pow(A)->Pow(A)"
 apply (unfold increasing_def)
@@ -82,7 +82,7 @@
 lemmas TFin_is_subset = TFin.dom_subset [THEN subsetD, THEN PowD]
 
 
-text{*Structural induction on @{term "TFin(S,next)"} *}
+text\<open>Structural induction on @{term "TFin(S,next)"}\<close>
 lemma TFin_induct:
   "[| n \<in> TFin(S,next);
       !!x. [| x \<in> TFin(S,next);  P(x);  next \<in> increasing(S) |] ==> P(next`x);
@@ -91,12 +91,12 @@
 by (erule TFin.induct, blast+)
 
 
-subsection{*Some Properties of the Transfinite Construction *}
+subsection\<open>Some Properties of the Transfinite Construction\<close>
 
 lemmas increasing_trans = subset_trans [OF _ increasingD2,
                                         OF _ _ TFin_is_subset]
 
-text{*Lemma 1 of section 3.1*}
+text\<open>Lemma 1 of section 3.1\<close>
 lemma TFin_linear_lemma1:
      "[| n \<in> TFin(S,next);  m \<in> TFin(S,next);
          \<forall>x \<in> TFin(S,next) . x<=m \<longrightarrow> x=m | next`x<=m |]
@@ -107,19 +107,19 @@
 apply (blast dest: increasing_trans)
 done
 
-text{*Lemma 2 of section 3.2.  Interesting in its own right!
-  Requires @{term "next \<in> increasing(S)"} in the second induction step.*}
+text\<open>Lemma 2 of section 3.2.  Interesting in its own right!
+  Requires @{term "next \<in> increasing(S)"} in the second induction step.\<close>
 lemma TFin_linear_lemma2:
     "[| m \<in> TFin(S,next);  next \<in> increasing(S) |]
      ==> \<forall>n \<in> TFin(S,next). n<=m \<longrightarrow> n=m | next`n \<subseteq> m"
 apply (erule TFin_induct)
 apply (rule impI [THEN ballI])
-txt{*case split using @{text TFin_linear_lemma1}*}
+txt\<open>case split using @{text TFin_linear_lemma1}\<close>
 apply (rule_tac n1 = n and m1 = x in TFin_linear_lemma1 [THEN disjE],
        assumption+)
 apply (blast del: subsetI
              intro: increasing_trans subsetI, blast)
-txt{*second induction step*}
+txt\<open>second induction step\<close>
 apply (rule impI [THEN ballI])
 apply (rule Union_lemma0 [THEN disjE])
 apply (erule_tac [3] disjI2)
@@ -133,13 +133,13 @@
 apply (blast dest: TFin_is_subset)+
 done
 
-text{*a more convenient form for Lemma 2*}
+text\<open>a more convenient form for Lemma 2\<close>
 lemma TFin_subsetD:
      "[| n<=m;  m \<in> TFin(S,next);  n \<in> TFin(S,next);  next \<in> increasing(S) |]
       ==> n=m | next`n \<subseteq> m"
 by (blast dest: TFin_linear_lemma2 [rule_format])
 
-text{*Consequences from section 3.3 -- Property 3.2, the ordering is total*}
+text\<open>Consequences from section 3.3 -- Property 3.2, the ordering is total\<close>
 lemma TFin_subset_linear:
      "[| m \<in> TFin(S,next);  n \<in> TFin(S,next);  next \<in> increasing(S) |]
       ==> n \<subseteq> m | m<=n"
@@ -151,7 +151,7 @@
 done
 
 
-text{*Lemma 3 of section 3.3*}
+text\<open>Lemma 3 of section 3.3\<close>
 lemma equal_next_upper:
      "[| n \<in> TFin(S,next);  m \<in> TFin(S,next);  m = next`m |] ==> n \<subseteq> m"
 apply (erule TFin_induct)
@@ -159,7 +159,7 @@
 apply (assumption+, force, blast)
 done
 
-text{*Property 3.3 of section 3.3*}
+text\<open>Property 3.3 of section 3.3\<close>
 lemma equal_next_Union:
      "[| m \<in> TFin(S,next);  next \<in> increasing(S) |]
       ==> m = next`m <-> m = \<Union>(TFin(S,next))"
@@ -174,12 +174,12 @@
 done
 
 
-subsection{*Hausdorff's Theorem: Every Set Contains a Maximal Chain*}
+subsection\<open>Hausdorff's Theorem: Every Set Contains a Maximal Chain\<close>
 
-text{*NOTE: We assume the partial ordering is @{text "\<subseteq>"}, the subset
-relation!*}
+text\<open>NOTE: We assume the partial ordering is @{text "\<subseteq>"}, the subset
+relation!\<close>
 
-text{** Defining the "next" operation for Hausdorff's Theorem **}
+text\<open>* Defining the "next" operation for Hausdorff's Theorem *\<close>
 
 lemma chain_subset_Pow: "chain(A) \<subseteq> Pow(A)"
 apply (unfold chain_def)
@@ -211,7 +211,7 @@
 apply (simp add: super_def)
 done
 
-text{*This justifies Definition 4.4*}
+text\<open>This justifies Definition 4.4\<close>
 lemma Hausdorff_next_exists:
      "ch \<in> (\<Pi> X \<in> Pow(chain(S))-{0}. X) ==>
       \<exists>next \<in> increasing(S). \<forall>X \<in> Pow(S).
@@ -226,7 +226,7 @@
 apply (simp (no_asm_simp))
 apply (blast dest: super_subset_chain [THEN subsetD] 
                    chain_subset_Pow [THEN subsetD] choice_super)
-txt{*Now, verify that it increases*}
+txt\<open>Now, verify that it increases\<close>
 apply (simp (no_asm_simp) add: Pow_iff subset_refl)
 apply safe
 apply (drule choice_super)
@@ -234,7 +234,7 @@
 apply (simp add: super_def, blast)
 done
 
-text{*Lemma 4*}
+text\<open>Lemma 4\<close>
 lemma TFin_chain_lemma4:
      "[| c \<in> TFin(S,next);
          ch \<in> (\<Pi> X \<in> Pow(chain(S))-{0}. X);
@@ -248,7 +248,7 @@
 apply (unfold chain_def)
 apply (rule CollectI, blast, safe)
 apply (rule_tac m1=B and n1=Ba in TFin_subset_linear [THEN disjE], fast+)
-      txt{*@{text "Blast_tac's"} slow*}
+      txt\<open>@{text "Blast_tac's"} slow\<close>
 done
 
 theorem Hausdorff: "\<exists>c. c \<in> maxchain(S)"
@@ -272,10 +272,10 @@
 done
 
 
-subsection{*Zorn's Lemma: If All Chains in S Have Upper Bounds In S,
-       then S contains a Maximal Element*}
+subsection\<open>Zorn's Lemma: If All Chains in S Have Upper Bounds In S,
+       then S contains a Maximal Element\<close>
 
-text{*Used in the proof of Zorn's Lemma*}
+text\<open>Used in the proof of Zorn's Lemma\<close>
 lemma chain_extend:
     "[| c \<in> chain(A);  z \<in> A;  \<forall>x \<in> c. x<=z |] ==> cons(z,c) \<in> chain(A)"
 by (unfold chain_def, blast)
@@ -296,7 +296,7 @@
 apply (fast elim: equalityE)
 done
 
-text {* Alternative version of Zorn's Lemma *}
+text \<open>Alternative version of Zorn's Lemma\<close>
 
 theorem Zorn2:
   "\<forall>c \<in> chain(S). \<exists>y \<in> S. \<forall>x \<in> c. x \<subseteq> y ==> \<exists>y \<in> S. \<forall>z \<in> S. y<=z \<longrightarrow> y=z"
@@ -317,21 +317,21 @@
 done
 
 
-subsection{*Zermelo's Theorem: Every Set can be Well-Ordered*}
+subsection\<open>Zermelo's Theorem: Every Set can be Well-Ordered\<close>
 
-text{*Lemma 5*}
+text\<open>Lemma 5\<close>
 lemma TFin_well_lemma5:
      "[| n \<in> TFin(S,next);  Z \<subseteq> TFin(S,next);  z:Z;  ~ \<Inter>(Z) \<in> Z |]
       ==> \<forall>m \<in> Z. n \<subseteq> m"
 apply (erule TFin_induct)
-prefer 2 apply blast txt{*second induction step is easy*}
+prefer 2 apply blast txt\<open>second induction step is easy\<close>
 apply (rule ballI)
 apply (rule bspec [THEN TFin_subsetD, THEN disjE], auto)
 apply (subgoal_tac "m = \<Inter>(Z) ")
 apply blast+
 done
 
-text{*Well-ordering of @{term "TFin(S,next)"} *}
+text\<open>Well-ordering of @{term "TFin(S,next)"}\<close>
 lemma well_ord_TFin_lemma: "[| Z \<subseteq> TFin(S,next);  z \<in> Z |] ==> \<Inter>(Z) \<in> Z"
 apply (rule classical)
 apply (subgoal_tac "Z = {\<Union>(TFin (S,next))}")
@@ -341,27 +341,27 @@
 apply (rule_tac [2] subset_refl [THEN TFin_UnionI, THEN TFin_well_lemma5, THEN bspec], blast+)
 done
 
-text{*This theorem just packages the previous result*}
+text\<open>This theorem just packages the previous result\<close>
 lemma well_ord_TFin:
      "next \<in> increasing(S) 
       ==> well_ord(TFin(S,next), Subset_rel(TFin(S,next)))"
 apply (rule well_ordI)
 apply (unfold Subset_rel_def linear_def)
-txt{*Prove the well-foundedness goal*}
+txt\<open>Prove the well-foundedness goal\<close>
 apply (rule wf_onI)
 apply (frule well_ord_TFin_lemma, assumption)
 apply (drule_tac x = "\<Inter>(Z) " in bspec, assumption)
 apply blast
-txt{*Now prove the linearity goal*}
+txt\<open>Now prove the linearity goal\<close>
 apply (intro ballI)
 apply (case_tac "x=y")
  apply blast
-txt{*The @{term "x\<noteq>y"} case remains*}
+txt\<open>The @{term "x\<noteq>y"} case remains\<close>
 apply (rule_tac n1=x and m1=y in TFin_subset_linear [THEN disjE],
        assumption+, blast+)
 done
 
-text{** Defining the "next" operation for Zermelo's Theorem **}
+text\<open>* Defining the "next" operation for Zermelo's Theorem *\<close>
 
 lemma choice_Diff:
      "[| ch \<in> (\<Pi> X \<in> Pow(S) - {0}. X);  X \<subseteq> S;  X\<noteq>S |] ==> ch ` (S-X) \<in> S-X"
@@ -369,7 +369,7 @@
 apply (blast elim!: equalityE)
 done
 
-text{*This justifies Definition 6.1*}
+text\<open>This justifies Definition 6.1\<close>
 lemma Zermelo_next_exists:
      "ch \<in> (\<Pi> X \<in> Pow(S)-{0}. X) ==>
            \<exists>next \<in> increasing(S). \<forall>X \<in> Pow(S).
@@ -380,16 +380,16 @@
 apply (unfold increasing_def)
 apply (rule CollectI)
 apply (rule lam_type)
-txt{*Type checking is surprisingly hard!*}
+txt\<open>Type checking is surprisingly hard!\<close>
 apply (simp (no_asm_simp) add: Pow_iff cons_subset_iff subset_refl)
 apply (blast intro!: choice_Diff [THEN DiffD1])
-txt{*Verify that it increases*}
+txt\<open>Verify that it increases\<close>
 apply (intro allI impI)
 apply (simp add: Pow_iff subset_consI subset_refl)
 done
 
 
-text{*The construction of the injection*}
+text\<open>The construction of the injection\<close>
 lemma choice_imp_injection:
      "[| ch \<in> (\<Pi> X \<in> Pow(S)-{0}. X);
          next \<in> increasing(S);
@@ -404,8 +404,8 @@
 prefer 2 apply (blast elim: equalityE)
 apply (subgoal_tac "\<Union>({y \<in> TFin (S,next) . x \<notin> y}) \<noteq> S")
 prefer 2 apply (blast elim: equalityE)
-txt{*For proving @{text "x \<in> next`\<Union>(...)"}.
-  Abrial and Laffitte's justification appears to be faulty.*}
+txt\<open>For proving @{text "x \<in> next`\<Union>(...)"}.
+  Abrial and Laffitte's justification appears to be faulty.\<close>
 apply (subgoal_tac "~ next ` Union({y \<in> TFin (S,next) . x \<notin> y}) 
                     \<subseteq> \<Union>({y \<in> TFin (S,next) . x \<notin> y}) ")
  prefer 2
@@ -415,11 +415,11 @@
 apply (subgoal_tac "x \<in> next ` Union({y \<in> TFin (S,next) . x \<notin> y}) ")
  prefer 2
  apply (blast intro!: Collect_subset [THEN TFin_UnionI] TFin.nextI)
-txt{*End of the lemmas!*}
+txt\<open>End of the lemmas!\<close>
 apply (simp add: Collect_subset [THEN TFin_UnionI, THEN TFin_is_subset])
 done
 
-text{*The wellordering theorem*}
+text\<open>The wellordering theorem\<close>
 theorem AC_well_ord: "\<exists>r. well_ord(S,r)"
 apply (rule AC_Pi_Pow [THEN exE])
 apply (rule Zermelo_next_exists [THEN bexE], assumption)
@@ -430,9 +430,9 @@
 done
 
 
-subsection {* Zorn's Lemma for Partial Orders *}
+subsection \<open>Zorn's Lemma for Partial Orders\<close>
 
-text {* Reimported from HOL by Clemens Ballarin. *}
+text \<open>Reimported from HOL by Clemens Ballarin.\<close>
 
 
 definition Chain :: "i => i" where
@@ -449,7 +449,7 @@
   shows "\<exists>m\<in>field(r). \<forall>a\<in>field(r). <m, a> \<in> r \<longrightarrow> a = m"
 proof -
   have "Preorder(r)" using po by (simp add: partial_order_on_def)
-  --{* Mirror r in the set of subsets below (wrt r) elements of A (?). *}
+  --\<open>Mirror r in the set of subsets below (wrt r) elements of A (?).\<close>
   let ?B = "\<lambda>x\<in>field(r). r -`` {x}" let ?S = "?B `` field(r)"
   have "\<forall>C\<in>chain(?S). \<exists>U\<in>?S. \<forall>A\<in>C. A \<subseteq> U"
   proof (clarsimp simp: chain_def Subset_rel_def bex_image_simp)
@@ -478,7 +478,7 @@
       assume "a \<in> field(r)" "r -`` {a} \<in> C" "b \<in> field(r)" "r -`` {b} \<in> C"
       hence "r -`` {a} \<subseteq> r -`` {b} | r -`` {b} \<subseteq> r -`` {a}" using 2 by auto
       then show "<a, b> \<in> r | <b, a> \<in> r"
-        using `Preorder(r)` `a \<in> field(r)` `b \<in> field(r)`
+        using \<open>Preorder(r)\<close> \<open>a \<in> field(r)\<close> \<open>b \<in> field(r)\<close>
         by (simp add: subset_vimage1_vimage1_iff)
     qed
     then obtain u where uA: "u \<in> field(r)" "\<forall>a\<in>?A. <a, u> \<in> r"
@@ -498,20 +498,20 @@
         apply (erule lamE)
         apply simp
         done
-      then show "<a, u> \<in> r" using uA aB `Preorder(r)`
+      then show "<a, u> \<in> r" using uA aB \<open>Preorder(r)\<close>
         by (auto simp: preorder_on_def refl_def) (blast dest: trans_onD)+
     qed
     then show "\<exists>U\<in>field(r). \<forall>A\<in>C. A \<subseteq> r -`` {U}"
-      using `u \<in> field(r)` ..
+      using \<open>u \<in> field(r)\<close> ..
   qed
   from Zorn2 [OF this]
   obtain m B where "m \<in> field(r)" "B = r -`` {m}"
     "\<forall>x\<in>field(r). B \<subseteq> r -`` {x} \<longrightarrow> B = r -`` {x}"
     by (auto elim!: lamE simp: ball_image_simp)
   then have "\<forall>a\<in>field(r). <m, a> \<in> r \<longrightarrow> a = m"
-    using po `Preorder(r)` `m \<in> field(r)`
+    using po \<open>Preorder(r)\<close> \<open>m \<in> field(r)\<close>
     by (auto simp: subset_vimage1_vimage1_iff Partial_order_eq_vimage1_vimage1_iff)
-  then show ?thesis using `m \<in> field(r)` by blast
+  then show ?thesis using \<open>m \<in> field(r)\<close> by blast
 qed
 
 end
--- a/src/ZF/equalities.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/equalities.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,12 +3,12 @@
     Copyright   1992  University of Cambridge
 *)
 
-section{*Basic Equalities and Inclusions*}
+section\<open>Basic Equalities and Inclusions\<close>
 
 theory equalities imports pair begin
 
-text{*These cover union, intersection, converse, domain, range, etc.  Philippe
-de Groote proved many of the inclusions.*}
+text\<open>These cover union, intersection, converse, domain, range, etc.  Philippe
+de Groote proved many of the inclusions.\<close>
 
 lemma in_mono: "A\<subseteq>B ==> x\<in>A \<longrightarrow> x\<in>B"
 by blast
@@ -16,11 +16,11 @@
 lemma the_eq_0 [simp]: "(THE x. False) = 0"
 by (blast intro: the_0)
 
-subsection{*Bounded Quantifiers*}
-text {* \medskip
+subsection\<open>Bounded Quantifiers\<close>
+text \<open>\medskip
 
   The following are not added to the default simpset because
-  (a) they duplicate the body and (b) there are no similar rules for @{text Int}.*}
+  (a) they duplicate the body and (b) there are no similar rules for @{text Int}.\<close>
 
 lemma ball_Un: "(\<forall>x \<in> A\<union>B. P(x)) \<longleftrightarrow> (\<forall>x \<in> A. P(x)) & (\<forall>x \<in> B. P(x))"
   by blast
@@ -34,7 +34,7 @@
 lemma bex_UN: "(\<exists>z \<in> (\<Union>x\<in>A. B(x)). P(z)) \<longleftrightarrow> (\<exists>x\<in>A. \<exists>z\<in>B(x). P(z))"
   by blast
 
-subsection{*Converse of a Relation*}
+subsection\<open>Converse of a Relation\<close>
 
 lemma converse_iff [simp]: "<a,b>\<in> converse(r) \<longleftrightarrow> <b,a>\<in>r"
 by (unfold converse_def, blast)
@@ -68,7 +68,7 @@
 by blast
 
 
-subsection{*Finite Set Constructions Using @{term cons}*}
+subsection\<open>Finite Set Constructions Using @{term cons}\<close>
 
 lemma cons_subsetI: "[| a\<in>C; B\<subseteq>C |] ==> cons(a,B) \<subseteq> C"
 by blast
@@ -138,7 +138,7 @@
 by (unfold succ_def, blast)
 
 
-subsection{*Binary Intersection*}
+subsection\<open>Binary Intersection\<close>
 
 (** Intersection is the greatest lower bound of two sets **)
 
@@ -207,7 +207,7 @@
 lemma cons_Int_distrib: "cons(x, A \<inter> B) = cons(x, A) \<inter> cons(x, B)"
 by auto
 
-subsection{*Binary Union*}
+subsection\<open>Binary Union\<close>
 
 (** Union is the least upper bound of two sets *)
 
@@ -265,7 +265,7 @@
 lemma Un_eq_Union: "A \<union> B = \<Union>({A, B})"
 by blast
 
-subsection{*Set Difference*}
+subsection\<open>Set Difference\<close>
 
 lemma Diff_subset: "A-B \<subseteq> A"
 by blast
@@ -342,7 +342,7 @@
 by (blast elim!: equalityE)
 
 
-subsection{*Big Union and Intersection*}
+subsection\<open>Big Union and Intersection\<close>
 
 (** Big Union is the least upper bound of a set  **)
 
@@ -414,7 +414,7 @@
      "\<Inter>(cons(a,B)) = (if B=0 then a else a \<inter> \<Inter>(B))"
 by force
 
-subsection{*Unions and Intersections of Families*}
+subsection\<open>Unions and Intersections of Families\<close>
 
 lemma subset_UN_iff_eq: "A \<subseteq> (\<Union>i\<in>I. B(i)) \<longleftrightarrow> A = (\<Union>i\<in>I. A \<inter> B(i))"
 by (blast elim!: equalityE)
@@ -737,7 +737,7 @@
 by blast
 
 
-subsection{*Image of a Set under a Function or Relation*}
+subsection\<open>Image of a Set under a Function or Relation\<close>
 
 lemma image_iff: "b \<in> r``A \<longleftrightarrow> (\<exists>x\<in>A. <x,b>\<in>r)"
 by (unfold image_def, blast)
@@ -789,7 +789,7 @@
 by blast
 
 
-subsection{*Inverse Image of a Set under a Function or Relation*}
+subsection\<open>Inverse Image of a Set under a Function or Relation\<close>
 
 lemma vimage_iff:
     "a \<in> r-``B \<longleftrightarrow> (\<exists>y\<in>B. <a,y>\<in>r)"
@@ -874,7 +874,7 @@
 done
 
 
-subsection{*Powerset Operator*}
+subsection\<open>Powerset Operator\<close>
 
 lemma Pow_0 [simp]: "Pow(0) = {0}"
 by blast
@@ -907,7 +907,7 @@
 by (blast elim!: not_emptyE)
 
 
-subsection{*RepFun*}
+subsection\<open>RepFun\<close>
 
 lemma RepFun_subset: "[| !!x. x\<in>A ==> f(x) \<in> B |] ==> {f(x). x\<in>A} \<subseteq> B"
 by blast
@@ -919,7 +919,7 @@
 by force
 
 
-subsection{*Collect*}
+subsection\<open>Collect\<close>
 
 lemma Collect_subset: "Collect(A,P) \<subseteq> A"
 by blast
@@ -969,7 +969,7 @@
                     Inter_greatest Int_greatest RepFun_subset
                     Un_upper1 Un_upper2 Int_lower1 Int_lower2
 
-ML {*
+ML \<open>
 val subset_cs =
   claset_of (@{context}
     delrules [@{thm subsetI}, @{thm subsetCE}]
@@ -978,7 +978,7 @@
     addSEs [@{thm cons_subsetE}]);
 
 val ZF_cs = claset_of (@{context} delrules [@{thm equalityI}]);
-*}
+\<close>
 
 end
 
--- a/src/ZF/ex/CoUnit.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/CoUnit.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1994  University of Cambridge
 *)
 
-section {* Trivial codatatype definitions, one of which goes wrong! *}
+section \<open>Trivial codatatype definitions, one of which goes wrong!\<close>
 
 theory CoUnit imports Main begin
 
-text {*
+text \<open>
   See discussion in: L C Paulson.  A Concrete Final Coalgebra Theorem
   for ZF Set Theory.  Report 334, Cambridge University Computer
   Laboratory.  1994.
@@ -17,7 +17,7 @@
   This degenerate definition does not work well because the one
   constructor's definition is trivial!  The same thing occurs with
   Aczel's Special Final Coalgebra Theorem.
-*}
+\<close>
 
 consts
   counit :: i
@@ -25,14 +25,14 @@
   "counit" = Con ("x \<in> counit")
 
 inductive_cases ConE: "Con(x) \<in> counit"
-  -- {* USELESS because folding on @{term "Con(xa) == xa"} fails. *}
+  -- \<open>USELESS because folding on @{term "Con(xa) == xa"} fails.\<close>
 
 lemma Con_iff: "Con(x) = Con(y) \<longleftrightarrow> x = y"
-  -- {* Proving freeness results. *}
+  -- \<open>Proving freeness results.\<close>
   by (auto elim!: counit.free_elims)
 
 lemma counit_eq_univ: "counit = quniv(0)"
-  -- {* Should be a singleton, not everything! *}
+  -- \<open>Should be a singleton, not everything!\<close>
   apply (rule counit.dom_subset [THEN equalityI])
   apply (rule subsetI)
   apply (erule counit.coinduct)
@@ -42,10 +42,10 @@
   done
 
 
-text {*
+text \<open>
   \medskip A similar example, but the constructor is non-degenerate
   and it works!  The resulting set is a singleton.
-*}
+\<close>
 
 consts
   counit2 :: i
@@ -56,7 +56,7 @@
 inductive_cases Con2E: "Con2(x, y) \<in> counit2"
 
 lemma Con2_iff: "Con2(x, y) = Con2(x', y') \<longleftrightarrow> x = x' & y = y'"
-  -- {* Proving freeness results. *}
+  -- \<open>Proving freeness results.\<close>
   by (fast elim!: counit2.free_elims)
 
 lemma Con2_bnd_mono: "bnd_mono(univ(0), %x. Con2(x, x))"
@@ -74,15 +74,15 @@
 
 lemma counit2_Int_Vset_subset [rule_format]:
   "Ord(i) ==> \<forall>x y. x \<in> counit2 \<longrightarrow> y \<in> counit2 \<longrightarrow> x \<inter> Vset(i) \<subseteq> y"
-  -- {* Lemma for proving finality. *}
+  -- \<open>Lemma for proving finality.\<close>
   apply (erule trans_induct)
   apply (tactic "safe_tac (put_claset subset_cs @{context})")
   apply (erule counit2.cases)
   apply (erule counit2.cases)
   apply (unfold counit2.con_defs)
-  apply (tactic {* fast_tac (put_claset subset_cs @{context}
+  apply (tactic \<open>fast_tac (put_claset subset_cs @{context}
     addSIs [@{thm QPair_Int_Vset_subset_UN} RS @{thm subset_trans}, @{thm QPair_mono}]
-    addSEs [@{thm Ord_in_Ord}, @{thm Pair_inject}]) 1 *})
+    addSEs [@{thm Ord_in_Ord}, @{thm Pair_inject}]) 1\<close>)
   done
 
 lemma counit2_implies_equal: "[| x \<in> counit2;  y \<in> counit2 |] ==> x = y"
--- a/src/ZF/ex/Group.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/Group.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -1,14 +1,14 @@
 (*  Title:      ZF/ex/Group.thy *)
 
-section {* Groups *}
+section \<open>Groups\<close>
 
 theory Group imports Main begin
 
-text{*Based on work by Clemens Ballarin, Florian Kammueller, L C Paulson and
-Markus Wenzel.*}
+text\<open>Based on work by Clemens Ballarin, Florian Kammueller, L C Paulson and
+Markus Wenzel.\<close>
 
 
-subsection {* Monoids *}
+subsection \<open>Monoids\<close>
 
 (*First, we must simulate a record declaration:
 record monoid =
@@ -47,7 +47,7 @@
       and l_one [simp]: "x \<in> carrier(G) \<Longrightarrow> \<one> \<cdot> x = x"
       and r_one [simp]: "x \<in> carrier(G) \<Longrightarrow> x \<cdot> \<one> = x"
 
-text{*Simulating the record*}
+text\<open>Simulating the record\<close>
 lemma carrier_eq [simp]: "carrier(<A,Z>) = A"
   by (simp add: carrier_def)
 
@@ -81,9 +81,9 @@
   finally show ?thesis .
 qed
 
-text {*
+text \<open>
   A group is a monoid all of whose elements are invertible.
-*}
+\<close>
 
 locale group = monoid +
   assumes inv_ex:
@@ -174,7 +174,7 @@
   by simp
 
 
-subsection {* Cancellation Laws and Basic Properties *}
+subsection \<open>Cancellation Laws and Basic Properties\<close>
 
 lemma (in group) l_cancel [simp]:
   assumes "x \<in> carrier(G)" "y \<in> carrier(G)" "z \<in> carrier(G)"
@@ -226,7 +226,7 @@
 lemma (in group) inv_inv [simp]: "x \<in> carrier(G) \<Longrightarrow> inv (inv x) = x"
   by (auto intro: inv_equality)
 
-text{*This proof is by cancellation*}
+text\<open>This proof is by cancellation\<close>
 lemma (in group) inv_mult_group:
   "\<lbrakk>x \<in> carrier(G); y \<in> carrier(G)\<rbrakk> \<Longrightarrow> inv (x \<cdot> y) = inv y \<cdot> inv x"
 proof -
@@ -237,7 +237,7 @@
 qed
 
 
-subsection {* Substructures *}
+subsection \<open>Substructures\<close>
 
 locale subgroup = fixes H and G (structure)
   assumes subset: "H \<subseteq> carrier(G)"
@@ -272,24 +272,24 @@
   by (rule groupI) (auto intro: m_assoc l_inv mem_carrier)
 qed
 
-text {*
+text \<open>
   Since @{term H} is nonempty, it contains some element @{term x}.  Since
   it is closed under inverse, it contains @{text "inv x"}.  Since
   it is closed under product, it contains @{text "x \<cdot> inv x = \<one>"}.
-*}
+\<close>
 
-text {*
+text \<open>
   Since @{term H} is nonempty, it contains some element @{term x}.  Since
   it is closed under inverse, it contains @{text "inv x"}.  Since
   it is closed under product, it contains @{text "x \<cdot> inv x = \<one>"}.
-*}
+\<close>
 
 lemma (in group) one_in_subset:
   "\<lbrakk>H \<subseteq> carrier(G); H \<noteq> 0; \<forall>a \<in> H. inv a \<in> H; \<forall>a\<in>H. \<forall>b\<in>H. a \<cdot> b \<in> H\<rbrakk>
    \<Longrightarrow> \<one> \<in> H"
 by (force simp add: l_inv)
 
-text {* A characterization of subgroups: closed, non-empty subset. *}
+text \<open>A characterization of subgroups: closed, non-empty subset.\<close>
 
 declare monoid.one_closed [simp] group.inv_closed [simp]
   monoid.l_one [simp] monoid.r_one [simp] group.inv_inv [simp]
@@ -299,7 +299,7 @@
   by (blast dest: subgroup.one_closed)
 
 
-subsection {* Direct Products *}
+subsection \<open>Direct Products\<close>
 
 definition
   DirProdGroup :: "[i,i] => i"  (infixr "\<Otimes>" 80) where
@@ -341,7 +341,7 @@
   apply (simp_all add: assms group.l_inv)
   done
 
-subsection {* Isomorphisms *}
+subsection \<open>Isomorphisms\<close>
 
 definition
   hom :: "[i,i] => i" where
@@ -367,7 +367,7 @@
   by (simp add: hom_def)
 
 
-subsection {* Isomorphisms *}
+subsection \<open>Isomorphisms\<close>
 
 definition
   iso :: "[i,i] => i"  (infixr "\<cong>" 60) where
@@ -411,8 +411,8 @@
     by (auto intro: lam_type simp add: iso_def hom_def inj_def surj_def bij_def)
 qed
 
-text{*Basis for homomorphism proofs: we assume two groups @{term G} and
-  @{term H}, with a homomorphism @{term h} between them*}
+text\<open>Basis for homomorphism proofs: we assume two groups @{term G} and
+  @{term H}, with a homomorphism @{term h} between them\<close>
 locale group_hom = G: group G + H: group H
   for G (structure) and H (structure) and h +
   assumes homh: "h \<in> hom(G,H)"
@@ -448,15 +448,15 @@
   with x show ?thesis by (simp del: inv)
 qed
 
-subsection {* Commutative Structures *}
+subsection \<open>Commutative Structures\<close>
 
-text {*
+text \<open>
   Naming convention: multiplicative structures that are commutative
   are called \emph{commutative}, additive structures are called
   \emph{Abelian}.
-*}
+\<close>
 
-subsection {* Definition *}
+subsection \<open>Definition\<close>
 
 locale comm_monoid = monoid +
   assumes m_comm: "\<lbrakk>x \<in> carrier(G); y \<in> carrier(G)\<rbrakk> \<Longrightarrow> x \<cdot> y = y \<cdot> x"
@@ -499,7 +499,7 @@
 qed
 
 
-subsection {* Bijections of a Set, Permutation Groups, Automorphism Groups *}
+subsection \<open>Bijections of a Set, Permutation Groups, Automorphism Groups\<close>
 
 definition
   BijGroup :: "i=>i" where
@@ -509,7 +509,7 @@
      id(S), 0>"
 
 
-subsection {*Bijections Form a Group *}
+subsection \<open>Bijections Form a Group\<close>
 
 theorem group_BijGroup: "group(BijGroup(S))"
 apply (simp add: BijGroup_def)
@@ -520,7 +520,7 @@
 done
 
 
-subsection{*Automorphisms Form a Group*}
+subsection\<open>Automorphisms Form a Group\<close>
 
 lemma Bij_Inv_mem: "\<lbrakk>f \<in> bij(S,S);  x \<in> S\<rbrakk> \<Longrightarrow> converse(f) ` x \<in> S"
 by (blast intro: apply_funtype bij_is_fun bij_converse_bij)
@@ -573,7 +573,7 @@
 
 
 
-subsection{*Cosets and Quotient Groups*}
+subsection\<open>Cosets and Quotient Groups\<close>
 
 definition
   r_coset  :: "[i,i,i] => i"  (infixl "#>\<index>" 60) where
@@ -603,7 +603,7 @@
   normal  (infixl "\<lhd>" 60)
 
 
-subsection {*Basic Properties of Cosets*}
+subsection \<open>Basic Properties of Cosets\<close>
 
 lemma (in group) coset_mult_assoc:
      "\<lbrakk>M \<subseteq> carrier(G); g \<in> carrier(G); h \<in> carrier(G)\<rbrakk>
@@ -628,7 +628,7 @@
 
 lemma (in group) coset_join2:
      "\<lbrakk>x \<in> carrier(G);  subgroup(H,G);  x\<in>H\<rbrakk> \<Longrightarrow> H #> x = H"
-  --{*Alternative proof is to put @{term "x=\<one>"} in @{text repr_independence}.*}
+  --\<open>Alternative proof is to put @{term "x=\<one>"} in @{text repr_independence}.\<close>
 by (force simp add: subgroup.m_closed r_coset_def solve_equation)
 
 lemma (in group) r_coset_subset_G:
@@ -644,7 +644,7 @@
 by (auto simp add: RCOSETS_def)
 
 
-text{*Really needed?*}
+text\<open>Really needed?\<close>
 lemma (in group) transpose_inv:
      "\<lbrakk>x \<cdot> y = z;  x \<in> carrier(G);  y \<in> carrier(G);  z \<in> carrier(G)\<rbrakk>
       \<Longrightarrow> (inv x) \<cdot> z = y"
@@ -652,7 +652,7 @@
 
 
 
-subsection {* Normal subgroups *}
+subsection \<open>Normal subgroups\<close>
 
 lemma normal_imp_subgroup: "H \<lhd> G ==> subgroup(H,G)"
   by (simp add: normal_def subgroup_def)
@@ -678,7 +678,7 @@
 apply (blast intro: inv_op_closed1)
 done
 
-text{*Alternative characterization of normal subgroups*}
+text\<open>Alternative characterization of normal subgroups\<close>
 lemma (in group) normal_inv_iff:
      "(N \<lhd> G) \<longleftrightarrow>
       (subgroup(N,G) & (\<forall>x \<in> carrier(G). \<forall>h \<in> N. x \<cdot> h \<cdot> (inv x) \<in> N))"
@@ -727,7 +727,7 @@
 qed
 
 
-subsection{*More Properties of Cosets*}
+subsection\<open>More Properties of Cosets\<close>
 
 lemma (in group) l_coset_subset_G:
      "\<lbrakk>H \<subseteq> carrier(G); x \<in> carrier(G)\<rbrakk> \<Longrightarrow> x <# H \<subseteq> carrier(G)"
@@ -788,7 +788,7 @@
 done
 
 
-subsubsection {* Set of inverses of an @{text r_coset}. *}
+subsubsection \<open>Set of inverses of an @{text r_coset}.\<close>
 
 lemma (in normal) rcos_inv:
   assumes x:     "x \<in> carrier(G)"
@@ -817,7 +817,7 @@
 
 
 
-subsubsection {*Theorems for @{text "<#>"} with @{text "#>"} or @{text "<#"}.*}
+subsubsection \<open>Theorems for @{text "<#>"} with @{text "#>"} or @{text "<#"}.\<close>
 
 lemma (in group) setmult_rcos_assoc:
      "\<lbrakk>H \<subseteq> carrier(G); K \<subseteq> carrier(G); x \<in> carrier(G)\<rbrakk>
@@ -852,12 +852,12 @@
 by (simp add: rcos_mult_step1 rcos_mult_step2 rcos_mult_step3)
 
 lemma (in normal) rcosets_mult_eq: "M \<in> rcosets H \<Longrightarrow> H <#> M = M"
-  -- {* generalizes @{text subgroup_mult_id} *}
+  -- \<open>generalizes @{text subgroup_mult_id}\<close>
   by (auto simp add: RCOSETS_def subset
         setmult_rcos_assoc subgroup_mult_id normal_axioms normal.axioms)
 
 
-subsubsection{*Two distinct right cosets are disjoint*}
+subsubsection\<open>Two distinct right cosets are disjoint\<close>
 
 definition
   r_congruent :: "[i,i] => i" ("rcong\<index> _" [60] 60) where
@@ -897,9 +897,9 @@
   qed
 qed
 
-text{*Equivalence classes of @{text rcong} correspond to left cosets.
+text\<open>Equivalence classes of @{text rcong} correspond to left cosets.
   Was there a mistake in the definitions? I'd have expected them to
-  correspond to right cosets.*}
+  correspond to right cosets.\<close>
 lemma (in subgroup) l_coset_eq_rcong:
   assumes "group(G)"
   assumes a: "a \<in> carrier(G)"
@@ -937,7 +937,7 @@
 qed
 
 
-subsection {*Order of a Group and Lagrange's Theorem*}
+subsection \<open>Order of a Group and Lagrange's Theorem\<close>
 
 definition
   order :: "i => i" where
@@ -972,8 +972,8 @@
 apply (simp add: r_coset_subset_G [THEN subset_Finite])
 done
 
-text{*More general than the HOL version, which also requires @{term G} to
-      be finite.*}
+text\<open>More general than the HOL version, which also requires @{term G} to
+      be finite.\<close>
 lemma (in group) card_cosets_equal:
   assumes H:   "H \<subseteq> carrier(G)"
   shows "c \<in> rcosets H \<Longrightarrow> |c| = |H|"
@@ -1017,11 +1017,11 @@
 done
 
 
-subsection {*Quotient Groups: Factorization of a Group*}
+subsection \<open>Quotient Groups: Factorization of a Group\<close>
 
 definition
   FactGroup :: "[i,i] => i" (infixl "Mod" 65) where
-    --{*Actually defined for groups rather than monoids*}
+    --\<open>Actually defined for groups rather than monoids\<close>
   "G Mod H ==
      <rcosets\<^bsub>G\<^esub> H, \<lambda><K1,K2> \<in> (rcosets\<^bsub>G\<^esub> H) \<times> (rcosets\<^bsub>G\<^esub> H). K1 <#>\<^bsub>G\<^esub> K2, H, 0>"
 
@@ -1071,21 +1071,21 @@
 apply (simp_all add: FactGroup_def setinv_closed rcosets_inv_mult_group_eq)
 done
 
-text{*The coset map is a homomorphism from @{term G} to the quotient group
-  @{term "G Mod H"}*}
+text\<open>The coset map is a homomorphism from @{term G} to the quotient group
+  @{term "G Mod H"}\<close>
 lemma (in normal) r_coset_hom_Mod:
   "(\<lambda>a \<in> carrier(G). H #> a) \<in> hom(G, G Mod H)"
 by (auto simp add: FactGroup_def RCOSETS_def hom_def rcos_sum intro: lam_type)
 
 
-subsection{*The First Isomorphism Theorem*}
+subsection\<open>The First Isomorphism Theorem\<close>
 
-text{*The quotient by the kernel of a homomorphism is isomorphic to the
-  range of that homomorphism.*}
+text\<open>The quotient by the kernel of a homomorphism is isomorphic to the
+  range of that homomorphism.\<close>
 
 definition
   kernel :: "[i,i,i] => i" where
-    --{*the kernel of a homomorphism*}
+    --\<open>the kernel of a homomorphism\<close>
   "kernel(G,H,h) == {x \<in> carrier(G). h ` x = \<one>\<^bsub>H\<^esub>}"
 
 lemma (in group_hom) subgroup_kernel: "subgroup (kernel(G,H,h), G)"
@@ -1093,7 +1093,7 @@
 apply (auto simp add: kernel_def group.intro)
 done
 
-text{*The kernel of a homomorphism is a normal subgroup*}
+text\<open>The kernel of a homomorphism is a normal subgroup\<close>
 lemma (in group_hom) normal_kernel: "(kernel(G,H,h)) \<lhd> G"
 apply (simp add: group.normal_inv_iff subgroup_kernel group.intro)
 apply (simp add: kernel_def)
@@ -1161,7 +1161,7 @@
 qed
 
 
-text{*Lemma for the following injectivity result*}
+text\<open>Lemma for the following injectivity result\<close>
 lemma (in group_hom) FactGroup_subset:
      "\<lbrakk>g \<in> carrier(G); g' \<in> carrier(G); h ` g = h ` g'\<rbrakk>
       \<Longrightarrow>  kernel(G,H,h) #> g \<subseteq> kernel(G,H,h) #> g'"
@@ -1201,8 +1201,8 @@
 
 
 
-text{*If the homomorphism @{term h} is onto @{term H}, then so is the
-homomorphism from the quotient group*}
+text\<open>If the homomorphism @{term h} is onto @{term H}, then so is the
+homomorphism from the quotient group\<close>
 lemma (in group_hom) FactGroup_surj:
   assumes h: "h \<in> surj(carrier(G), carrier(H))"
   shows "(\<lambda>X\<in>carrier (G Mod kernel(G,H,h)). contents (h `` X))
@@ -1215,14 +1215,14 @@
   hence "(\<Union>x\<in>kernel(G,H,h) #> g. {h ` x}) = {y}"
     by (auto simp add: y kernel_def r_coset_def)
   with g show "\<exists>x\<in>carrier(G Mod kernel(G, H, h)). contents(h `` x) = y"
-        --{*The witness is @{term "kernel(G,H,h) #> g"}*}
+        --\<open>The witness is @{term "kernel(G,H,h) #> g"}\<close>
     by (force simp add: FactGroup_def RCOSETS_def
            image_eq_UN [OF hom_is_fun] kernel_rcoset_subset)
 qed
 
 
-text{*If @{term h} is a homomorphism from @{term G} onto @{term H}, then the
- quotient group @{term "G Mod (kernel(G,H,h))"} is isomorphic to @{term H}.*}
+text\<open>If @{term h} is a homomorphism from @{term G} onto @{term H}, then the
+ quotient group @{term "G Mod (kernel(G,H,h))"} is isomorphic to @{term H}.\<close>
 theorem (in group_hom) FactGroup_iso:
   "h \<in> surj(carrier(G), carrier(H))
    \<Longrightarrow> (\<lambda>X\<in>carrier (G Mod kernel(G,H,h)). contents (h``X)) \<in> (G Mod (kernel(G,H,h))) \<cong> H"
--- a/src/ZF/ex/LList.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/LList.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -110,14 +110,14 @@
      "Ord(i) ==> l \<in> llist(quniv(A)) \<Longrightarrow> l \<inter> Vset(i) \<subseteq> univ(eclose(A))"
 proof (induct i arbitrary: l rule: trans_induct)
   case (step i l)
-  show ?case using `l \<in> llist(quniv(A))`
+  show ?case using \<open>l \<in> llist(quniv(A))\<close>
   proof (cases l rule: llist.cases)
     case LNil thus ?thesis
       by (simp add: QInl_def QInr_def llist.con_defs)
   next
     case (LCons a l) thus ?thesis using step.hyps
     proof (simp add: QInl_def QInr_def llist.con_defs)
-      show "<1; <a; l>> \<inter> Vset(i) \<subseteq> univ(eclose(A))" using LCons `Ord(i)`
+      show "<1; <a; l>> \<inter> Vset(i) \<subseteq> univ(eclose(A))" using LCons \<open>Ord(i)\<close>
         by (fast intro: step Ord_trans Int_lower1 [THEN subset_trans])
     qed
   qed
@@ -145,7 +145,7 @@
      "Ord(i) ==> <l,l'> \<in> lleq(A) \<Longrightarrow> l \<inter> Vset(i) \<subseteq> l'"
 proof (induct i arbitrary: l l' rule: trans_induct)
   case (step i l l')
-  show ?case using `\<langle>l, l'\<rangle> \<in> lleq(A)`
+  show ?case using \<open>\<langle>l, l'\<rangle> \<in> lleq(A)\<close>
   proof (cases rule: lleq.cases)
     case LNil thus ?thesis
       by (auto simp add: QInr_def llist.con_defs)
@@ -224,7 +224,7 @@
      "Ord(i) ==> l \<in> llist(bool) \<Longrightarrow> flip(l) \<inter> Vset(i) \<subseteq> univ(eclose(bool))"
 proof (induct i arbitrary: l rule: trans_induct)
   case (step i l)
-  show ?case using `l \<in> llist(bool)`
+  show ?case using \<open>l \<in> llist(bool)\<close>
   proof (cases l rule: llist.cases)
     case LNil thus ?thesis
       by (simp, simp add: QInl_def QInr_def llist.con_defs)
--- a/src/ZF/ex/Limit.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/Limit.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -944,8 +944,8 @@
              dest: projpair_ep_cont)+
 done
 
-text{*Proof's very like the previous one.  Is there a pattern that
-      could be exploited?*}
+text\<open>Proof's very like the previous one.  Is there a pattern that
+      could be exploited?\<close>
 lemma projpair_unique_aux2:
     "[|cpo(D); cpo(E); projpair(D,E,e,p); projpair(D,E,e',p');
        rel(cf(E,D),p',p)|] ==> rel(cf(D,E),e,e')"
@@ -1295,7 +1295,7 @@
 apply (simp add: Dinf_def [symmetric])
 apply (rule ballI)
 apply (simplesubst lub_iprod)
-  --{*Subst would rewrite the lhs. We want to change the rhs.*}
+  --\<open>Subst would rewrite the lhs. We want to change the rhs.\<close>
 apply (assumption | rule chain_Dinf emb_chain_cpo)+
 apply simp
 apply (subst Rp_cont [THEN cont_lub])
@@ -1736,7 +1736,7 @@
    apply blast
   apply assumption
  apply (simplesubst eps_split_right_le)
-    --{*Subst would rewrite the lhs. We want to change the rhs.*}
+    --\<open>Subst would rewrite the lhs. We want to change the rhs.\<close>
        prefer 2 apply assumption
       apply simp
      apply (assumption | rule add_le_self nat_0I nat_succI)+
--- a/src/ZF/ex/Primes.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/Primes.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,7 +3,7 @@
     Copyright   1996  University of Cambridge
 *)
 
-section{*The Divides Relation and Euclid's algorithm for the GCD*}
+section\<open>The Divides Relation and Euclid's algorithm for the GCD\<close>
 
 theory Primes imports Main begin
 
@@ -12,26 +12,26 @@
     "m dvd n == m \<in> nat & n \<in> nat & (\<exists>k \<in> nat. n = m#*k)"
 
 definition
-  is_gcd  :: "[i,i,i]=>o"     --{*definition of great common divisor*}  where
+  is_gcd  :: "[i,i,i]=>o"     --\<open>definition of great common divisor\<close>  where
     "is_gcd(p,m,n) == ((p dvd m) & (p dvd n))   &
                        (\<forall>d\<in>nat. (d dvd m) & (d dvd n) \<longrightarrow> d dvd p)"
 
 definition
-  gcd     :: "[i,i]=>i"       --{*Euclid's algorithm for the gcd*}  where
+  gcd     :: "[i,i]=>i"       --\<open>Euclid's algorithm for the gcd\<close>  where
     "gcd(m,n) == transrec(natify(n),
                         %n f. \<lambda>m \<in> nat.
                                 if n=0 then m else f`(m mod n)`n) ` natify(m)"
 
 definition
-  coprime :: "[i,i]=>o"       --{*the coprime relation*}  where
+  coprime :: "[i,i]=>o"       --\<open>the coprime relation\<close>  where
     "coprime(m,n) == gcd(m,n) = 1"
   
 definition
-  prime   :: i                --{*the set of prime numbers*}  where
+  prime   :: i                --\<open>the set of prime numbers\<close>  where
    "prime == {p \<in> nat. 1<p & (\<forall>m \<in> nat. m dvd p \<longrightarrow> m=1 | m=p)}"
 
 
-subsection{*The Divides Relation*}
+subsection\<open>The Divides Relation\<close>
 
 lemma dvdD: "m dvd n ==> m \<in> nat & n \<in> nat & (\<exists>k \<in> nat. n = m#*k)"
 by (unfold divides_def, assumption)
@@ -77,7 +77,7 @@
 done
 
 
-subsection{*Euclid's Algorithm for the GCD*}
+subsection\<open>Euclid's Algorithm for the GCD\<close>
 
 lemma gcd_0 [simp]: "gcd(m,0) = natify(m)"
 apply (simp add: gcd_def)
@@ -161,9 +161,9 @@
 by (blast intro: gcd_induct_lemma)
 
 
-subsection{*Basic Properties of @{term gcd}*}
+subsection\<open>Basic Properties of @{term gcd}\<close>
 
-text{*type of gcd*}
+text\<open>type of gcd\<close>
 lemma gcd_type [simp,TC]: "gcd(m, n) \<in> nat"
 apply (subgoal_tac "gcd (natify (m), natify (n)) \<in> nat")
 apply simp
@@ -173,7 +173,7 @@
 done
 
 
-text{* Property 1: gcd(a,b) divides a and b *}
+text\<open>Property 1: gcd(a,b) divides a and b\<close>
 
 lemma gcd_dvd_both:
      "[| m \<in> nat; n \<in> nat |] ==> gcd (m, n) dvd m & gcd (m, n) dvd n"
@@ -192,7 +192,7 @@
 apply auto
 done
 
-text{* if f divides a and b then f divides gcd(a,b) *}
+text\<open>if f divides a and b then f divides gcd(a,b)\<close>
 
 lemma dvd_mod: "[| f dvd a; f dvd b |] ==> f dvd (a mod b)"
 apply (simp add: divides_def)
@@ -201,8 +201,8 @@
 apply (blast intro: mod_mult_distrib2 [symmetric])
 done
 
-text{* Property 2: for all a,b,f naturals, 
-               if f divides a and f divides b then f divides gcd(a,b)*}
+text\<open>Property 2: for all a,b,f naturals, 
+               if f divides a and f divides b then f divides gcd(a,b)\<close>
 
 lemma gcd_greatest_raw [rule_format]:
      "[| m \<in> nat; n \<in> nat; f \<in> nat |]    
@@ -221,14 +221,14 @@
 by (blast intro!: gcd_greatest gcd_dvd1 gcd_dvd2 intro: dvd_trans)
 
 
-subsection{*The Greatest Common Divisor*}
+subsection\<open>The Greatest Common Divisor\<close>
 
-text{*The GCD exists and function gcd computes it.*}
+text\<open>The GCD exists and function gcd computes it.\<close>
 
 lemma is_gcd: "[| m \<in> nat; n \<in> nat |] ==> is_gcd(gcd(m,n), m, n)"
 by (simp add: is_gcd_def)
 
-text{*The GCD is unique*}
+text\<open>The GCD is unique\<close>
 
 lemma is_gcd_unique: "[|is_gcd(m,a,b); is_gcd(n,a,b); m\<in>nat; n\<in>nat|] ==> m=n"
 apply (simp add: is_gcd_def)
@@ -271,7 +271,7 @@
 by (simp add: gcd_commute [of 1])
 
 
-subsection{*Addition laws*}
+subsection\<open>Addition laws\<close>
 
 lemma gcd_add1 [simp]: "gcd (m #+ n, n) = gcd (m, n)"
 apply (subgoal_tac "gcd (m #+ natify (n), natify (n)) = gcd (m, natify (n))")
@@ -300,7 +300,7 @@
 done
 
 
-subsection{* Multiplication Laws*}
+subsection\<open>Multiplication Laws\<close>
 
 lemma gcd_mult_distrib2_raw:
      "[| k \<in> nat; m \<in> nat; n \<in> nat |]  
@@ -349,9 +349,9 @@
 by (auto simp add: prime_def)
 
 
-text{*This theorem leads immediately to a proof of the uniqueness of
+text\<open>This theorem leads immediately to a proof of the uniqueness of
   factorization.  If @{term p} divides a product of primes then it is
-  one of those primes.*}
+  one of those primes.\<close>
 
 lemma prime_dvd_mult:
      "[|p dvd m #* n; p \<in> prime; m \<in> nat; n \<in> nat |] ==> p dvd m \<or> p dvd n"
@@ -375,7 +375,7 @@
 done
 
 
-subsection{*The Square Root of a Prime is Irrational: Key Lemma*}
+subsection\<open>The Square Root of a Prime is Irrational: Key Lemma\<close>
 
 lemma prime_dvd_other_side:
      "\<lbrakk>n#*n = p#*(k#*k); p \<in> prime; n \<in> nat\<rbrakk> \<Longrightarrow> p dvd n"
--- a/src/ZF/ex/Ring.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/Ring.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -2,7 +2,7 @@
 
 *)
 
-section {* Rings *}
+section \<open>Rings\<close>
 
 theory Ring imports Group begin
 
@@ -39,7 +39,7 @@
   by (simp add: zero_def)
 
 
-text {* Derived operations. *}
+text \<open>Derived operations.\<close>
 
 definition
   a_inv :: "[i,i] => i" ("\<ominus>\<index> _" [81] 80) where
@@ -53,9 +53,9 @@
   assumes a_comm_monoid:
     "comm_monoid (<carrier(G), add_field(G), zero(G), 0>)"
 
-text {*
+text \<open>
   The following definition is redundant but simple to use.
-*}
+\<close>
 
 locale abelian_group = abelian_monoid +
   assumes a_comm_group:
@@ -75,7 +75,7 @@
                   a = \<zero> | b = \<zero>"
 
 
-subsection {* Basic Properties *}
+subsection \<open>Basic Properties\<close>
 
 lemma (in abelian_monoid) a_monoid:
      "monoid (<carrier(G), add_field(G), zero(G), 0>)"
@@ -172,9 +172,9 @@
 
 lemmas (in abelian_monoid) a_ac = a_assoc a_comm a_lcomm
 
-text {*
+text \<open>
   The following proofs are from Jacobson, Basic Algebra I, pp.~88--89
-*}
+\<close>
 
 context ring
 begin
@@ -228,7 +228,7 @@
 end
 
 
-subsection {* Morphisms *}
+subsection \<open>Morphisms\<close>
 
 definition
   ring_hom :: "[i,i] => i" where
--- a/src/ZF/ex/misc.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/ex/misc.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -5,53 +5,53 @@
 Composition of homomorphisms, Pastre's examples, ...
 *)
 
-section{*Miscellaneous ZF Examples*}
+section\<open>Miscellaneous ZF Examples\<close>
 
 theory misc imports Main begin
 
 
-subsection{*Various Small Problems*}
+subsection\<open>Various Small Problems\<close>
 
-text{*The singleton problems are much harder in HOL.*}
+text\<open>The singleton problems are much harder in HOL.\<close>
 lemma singleton_example_1:
      "\<forall>x \<in> S. \<forall>y \<in> S. x \<subseteq> y \<Longrightarrow> \<exists>z. S \<subseteq> {z}"
   by blast
 
 lemma singleton_example_2:
      "\<forall>x \<in> S. \<Union>S \<subseteq> x \<Longrightarrow> \<exists>z. S \<subseteq> {z}"
-  -- {*Variant of the problem above. *}
+  -- \<open>Variant of the problem above.\<close>
   by blast
 
 lemma "\<exists>!x. f (g(x)) = x \<Longrightarrow> \<exists>!y. g (f(y)) = y"
-  -- {* A unique fixpoint theorem --- @{text fast}/@{text best}/@{text auto} all fail. *} 
+  -- \<open>A unique fixpoint theorem --- @{text fast}/@{text best}/@{text auto} all fail.\<close> 
   apply (erule ex1E, rule ex1I, erule subst_context)
   apply (rule subst, assumption, erule allE, rule subst_context, erule mp)
   apply (erule subst_context)
   done
 
 
-text{*A weird property of ordered pairs.*}
+text\<open>A weird property of ordered pairs.\<close>
 lemma "b\<noteq>c ==> <a,b> \<inter> <a,c> = <a,a>"
 by (simp add: Pair_def Int_cons_left Int_cons_right doubleton_eq_iff, blast)
 
-text{*These two are cited in Benzmueller and Kohlhase's system description of
- LEO, CADE-15, 1998 (page 139-143) as theorems LEO could not prove.*}
+text\<open>These two are cited in Benzmueller and Kohlhase's system description of
+ LEO, CADE-15, 1998 (page 139-143) as theorems LEO could not prove.\<close>
 lemma "(X = Y \<union> Z) \<longleftrightarrow> (Y \<subseteq> X & Z \<subseteq> X & (\<forall>V. Y \<subseteq> V & Z \<subseteq> V \<longrightarrow> X \<subseteq> V))"
 by (blast intro!: equalityI)
 
-text{*the dual of the previous one*}
+text\<open>the dual of the previous one\<close>
 lemma "(X = Y \<inter> Z) \<longleftrightarrow> (X \<subseteq> Y & X \<subseteq> Z & (\<forall>V. V \<subseteq> Y & V \<subseteq> Z \<longrightarrow> V \<subseteq> X))"
 by (blast intro!: equalityI)
 
-text{*trivial example of term synthesis: apparently hard for some provers!*}
+text\<open>trivial example of term synthesis: apparently hard for some provers!\<close>
 schematic_lemma "a \<noteq> b ==> a:?X & b \<notin> ?X"
 by blast
 
-text{*Nice blast benchmark.  Proved in 0.3s; old tactics can't manage it!*}
+text\<open>Nice blast benchmark.  Proved in 0.3s; old tactics can't manage it!\<close>
 lemma "\<forall>x \<in> S. \<forall>y \<in> S. x \<subseteq> y ==> \<exists>z. S \<subseteq> {z}"
 by blast
 
-text{*variant of the benchmark above*}
+text\<open>variant of the benchmark above\<close>
 lemma "\<forall>x \<in> S. \<Union>(S) \<subseteq> x ==> \<exists>z. S \<subseteq> {z}"
 by blast
 
@@ -62,19 +62,19 @@
 lemma "(\<forall>F. {x} \<in> F \<longrightarrow> {y} \<in> F) \<longrightarrow> (\<forall>A. x \<in> A \<longrightarrow> y \<in> A)"
 by best
 
-text{*A characterization of functions suggested by Tobias Nipkow*}
+text\<open>A characterization of functions suggested by Tobias Nipkow\<close>
 lemma "r \<in> domain(r)->B  \<longleftrightarrow>  r \<subseteq> domain(r)*B & (\<forall>X. r `` (r -`` X) \<subseteq> X)"
 by (unfold Pi_def function_def, best)
 
 
-subsection{*Composition of homomorphisms is a Homomorphism*}
+subsection\<open>Composition of homomorphisms is a Homomorphism\<close>
 
-text{*Given as a challenge problem in
+text\<open>Given as a challenge problem in
   R. Boyer et al.,
   Set Theory in First-Order Logic: Clauses for G\"odel's Axioms,
-  JAR 2 (1986), 287-327 *}
+  JAR 2 (1986), 287-327\<close>
 
-text{*collecting the relevant lemmas*}
+text\<open>collecting the relevant lemmas\<close>
 declare comp_fun [simp] SigmaI [simp] apply_funtype [simp]
 
 (*Force helps prove conditions of rewrites such as comp_fun_apply, since
@@ -86,7 +86,7 @@
        (K O J) \<in> hom(A,f,C,h)"
 by force
 
-text{*Another version, with meta-level rewriting*}
+text\<open>Another version, with meta-level rewriting\<close>
 lemma "(!! A f B g. hom(A,f,B,g) ==  
            {H \<in> A->B. f \<in> A*A->A & g \<in> B*B->B &  
                      (\<forall>x \<in> A. \<forall>y \<in> A. H`(f`<x,y>) = g`<H`x,H`y>)}) 
@@ -94,11 +94,11 @@
 by force
 
 
-subsection{*Pastre's Examples*}
+subsection\<open>Pastre's Examples\<close>
 
-text{*D Pastre.  Automatic theorem proving in set theory. 
+text\<open>D Pastre.  Automatic theorem proving in set theory. 
         Artificial Intelligence, 10:1--27, 1978.
-Previously, these were done using ML code, but blast manages fine.*}
+Previously, these were done using ML code, but blast manages fine.\<close>
 
 lemmas compIs [intro] = comp_surj comp_inj comp_fun [intro]
 lemmas compDs [dest] =  comp_mem_injD1 comp_mem_surjD1 
--- a/src/ZF/func.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/func.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,11 +3,11 @@
     Copyright   1991  University of Cambridge
 *)
 
-section{*Functions, Function Spaces, Lambda-Abstraction*}
+section\<open>Functions, Function Spaces, Lambda-Abstraction\<close>
 
 theory func imports equalities Sum begin
 
-subsection{*The Pi Operator: Dependent Function Space*}
+subsection\<open>The Pi Operator: Dependent Function Space\<close>
 
 lemma subset_Sigma_imp_relation: "r \<subseteq> Sigma(A,B) ==> relation(r)"
 by (simp add: relation_def, blast)
@@ -55,7 +55,7 @@
 lemma fun_weaken_type: "[| f \<in> A->B;  B<=D |] ==> f \<in> A->D"
 by (unfold Pi_def, best)
 
-subsection{*Function Application*}
+subsection\<open>Function Application\<close>
 
 lemma apply_equality2: "[| <a,b>: f;  <a,c>: f;  f \<in> Pi(A,B) |] ==> b=c"
 by (unfold Pi_def function_def, blast)
@@ -129,7 +129,7 @@
 lemma Pair_mem_PiD: "[| <a,b>: f;  f \<in> Pi(A,B) |] ==> a \<in> A & b \<in> B(a) & f`a = b"
 by (blast intro: domain_type range_type apply_equality)
 
-subsection{*Lambda Abstraction*}
+subsection\<open>Lambda Abstraction\<close>
 
 lemma lamI: "a \<in> A ==> <a,b(a)> \<in> (\<lambda>x\<in>A. b(x))"
 apply (unfold lam_def)
@@ -202,7 +202,7 @@
 done
 
 
-subsection{*Extensionality*}
+subsection\<open>Extensionality\<close>
 
 (*Semi-extensionality!*)
 
@@ -242,7 +242,7 @@
 done
 
 
-subsection{*Images of Functions*}
+subsection\<open>Images of Functions\<close>
 
 lemma image_lam: "C \<subseteq> A ==> (\<lambda>x\<in>A. b(x)) `` C = {b(x). x \<in> C}"
 by (unfold lam_def, blast)
@@ -278,7 +278,7 @@
 by (blast dest: apply_equality apply_Pair)
 
 
-subsection{*Properties of @{term "restrict(f,A)"}*}
+subsection\<open>Properties of @{term "restrict(f,A)"}\<close>
 
 lemma restrict_subset: "restrict(f,A) \<subseteq> f"
 by (unfold restrict_def, blast)
@@ -338,7 +338,7 @@
 done
 
 
-subsection{*Unions of Functions*}
+subsection\<open>Unions of Functions\<close>
 
 (** The Union of a set of COMPATIBLE functions is a function **)
 
@@ -381,7 +381,7 @@
 lemma fun_disjoint_apply2: "c \<notin> domain(f) ==> (f \<union> g)`c = g`c"
 by (simp add: apply_def, blast)
 
-subsection{*Domain and Range of a Function or Relation*}
+subsection\<open>Domain and Range of a Function or Relation\<close>
 
 lemma domain_of_fun: "f \<in> Pi(A,B) ==> domain(f)=A"
 by (unfold Pi_def, blast)
@@ -392,7 +392,7 @@
 lemma range_of_fun: "f \<in> Pi(A,B) ==> f \<in> A->range(f)"
 by (blast intro: Pi_type apply_rangeI)
 
-subsection{*Extensions of Functions*}
+subsection\<open>Extensions of Functions\<close>
 
 lemma fun_extend:
      "[| f \<in> A->B;  c\<notin>A |] ==> cons(<c,b>,f) \<in> cons(c,A) -> cons(b,B)"
@@ -439,7 +439,7 @@
 by (simp add: succ_def mem_not_refl cons_fun_eq)
 
 
-subsection{*Function Updates*}
+subsection\<open>Function Updates\<close>
 
 definition
   update  :: "[i,i,i] => i"  where
@@ -487,9 +487,9 @@
 done
 
 
-subsection{*Monotonicity Theorems*}
+subsection\<open>Monotonicity Theorems\<close>
 
-subsubsection{*Replacement in its Various Forms*}
+subsubsection\<open>Replacement in its Various Forms\<close>
 
 (*Not easy to express monotonicity in P, since any "bigger" predicate
   would have to be single-valued*)
@@ -525,7 +525,7 @@
 lemma Diff_mono: "[| A<=C;  D<=B |] ==> A-B \<subseteq> C-D"
 by blast
 
-subsubsection{*Standard Products, Sums and Function Spaces *}
+subsubsection\<open>Standard Products, Sums and Function Spaces\<close>
 
 lemma Sigma_mono [rule_format]:
      "[| A<=C;  !!x. x \<in> A \<longrightarrow> B(x) \<subseteq> D(x) |] ==> Sigma(A,B) \<subseteq> Sigma(C,D)"
@@ -543,7 +543,7 @@
 apply (erule RepFun_mono)
 done
 
-subsubsection{*Converse, Domain, Range, Field*}
+subsubsection\<open>Converse, Domain, Range, Field\<close>
 
 lemma converse_mono: "r<=s ==> converse(r) \<subseteq> converse(s)"
 by blast
@@ -565,7 +565,7 @@
 by (erule field_mono [THEN subset_trans], blast)
 
 
-subsubsection{*Images*}
+subsubsection\<open>Images\<close>
 
 lemma image_pair_mono:
     "[| !! x y. <x,y>:r ==> <x,y>:s;  A<=B |] ==> r``A \<subseteq> s``B"
--- a/src/ZF/pair.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/pair.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -3,35 +3,35 @@
     Copyright   1992  University of Cambridge
 *)
 
-section{*Ordered Pairs*}
+section\<open>Ordered Pairs\<close>
 
 theory pair imports upair
 begin
 
 ML_file "simpdata.ML"
 
-setup {*
+setup \<open>
   map_theory_simpset
     (Simplifier.set_mksimps (fn ctxt =>
         map mk_eq o ZF_atomize o Drule.gen_all (Variable.maxidx_of ctxt))
       #> Simplifier.add_cong @{thm if_weak_cong})
-*}
+\<close>
 
-ML {* val ZF_ss = simpset_of @{context} *}
+ML \<open>val ZF_ss = simpset_of @{context}\<close>
 
-simproc_setup defined_Bex ("\<exists>x\<in>A. P(x) & Q(x)") = {*
+simproc_setup defined_Bex ("\<exists>x\<in>A. P(x) & Q(x)") = \<open>
   fn _ => Quantifier1.rearrange_bex
     (fn ctxt =>
       unfold_tac ctxt @{thms Bex_def} THEN
       Quantifier1.prove_one_point_ex_tac ctxt)
-*}
+\<close>
 
-simproc_setup defined_Ball ("\<forall>x\<in>A. P(x) \<longrightarrow> Q(x)") = {*
+simproc_setup defined_Ball ("\<forall>x\<in>A. P(x) \<longrightarrow> Q(x)") = \<open>
   fn _ => Quantifier1.rearrange_ball
     (fn ctxt =>
       unfold_tac ctxt @{thms Ball_def} THEN
       Quantifier1.prove_one_point_all_tac ctxt)
-*}
+\<close>
 
 
 (** Lemmas for showing that <a,b> uniquely determines a and b **)
@@ -78,9 +78,9 @@
 qed
 
 
-subsection{*Sigma: Disjoint Union of a Family of Sets*}
+subsection\<open>Sigma: Disjoint Union of a Family of Sets\<close>
 
-text{*Generalizes Cartesian product*}
+text\<open>Generalizes Cartesian product\<close>
 
 lemma Sigma_iff [simp]: "<a,b>: Sigma(A,B) \<longleftrightarrow> a \<in> A & b \<in> B(a)"
 by (simp add: Sigma_def)
@@ -123,7 +123,7 @@
 by blast
 
 
-subsection{*Projections @{term fst} and @{term snd}*}
+subsection\<open>Projections @{term fst} and @{term snd}\<close>
 
 lemma fst_conv [simp]: "fst(<a,b>) = a"
 by (simp add: fst_def)
@@ -141,7 +141,7 @@
 by auto
 
 
-subsection{*The Eliminator, @{term split}*}
+subsection\<open>The Eliminator, @{term split}\<close>
 
 (*A META-equality, so that it applies to higher types as well...*)
 lemma split [simp]: "split(%x y. c(x,y), <a,b>) == c(a,b)"
@@ -159,7 +159,7 @@
 by (auto simp add: split_def)
 
 
-subsection{*A version of @{term split} for Formulae: Result Type @{typ o}*}
+subsection\<open>A version of @{term split} for Formulae: Result Type @{typ o}\<close>
 
 lemma splitI: "R(a,b) ==> split(R, <a,b>)"
 by (simp add: split_def)
@@ -173,9 +173,9 @@
 lemma splitD: "split(R,<a,b>) ==> R(a,b)"
 by (simp add: split_def)
 
-text {*
+text \<open>
   \bigskip Complex rules for Sigma.
-*}
+\<close>
 
 lemma split_paired_Bex_Sigma [simp]:
      "(\<exists>z \<in> Sigma(A,B). P(z)) \<longleftrightarrow> (\<exists>x \<in> A. \<exists>y \<in> B(x). P(<x,y>))"
--- a/src/ZF/upair.thy	Thu Jul 23 14:20:51 2015 +0200
+++ b/src/ZF/upair.thy	Thu Jul 23 14:25:05 2015 +0200
@@ -9,7 +9,7 @@
     Ordered pairs and descriptions are defined using cons ("set notation")
 *)
 
-section{*Unordered Pairs*}
+section\<open>Unordered Pairs\<close>
 
 theory upair
 imports ZF
@@ -23,7 +23,7 @@
 by (simp add: Ball_def atomize_all atomize_imp)
 
 
-subsection{*Unordered Pairs: constant @{term Upair}*}
+subsection\<open>Unordered Pairs: constant @{term Upair}\<close>
 
 lemma Upair_iff [simp]: "c \<in> Upair(a,b) \<longleftrightarrow> (c=a | c=b)"
 by (unfold Upair_def, blast)
@@ -37,7 +37,7 @@
 lemma UpairE: "[| a \<in> Upair(b,c);  a=b ==> P;  a=c ==> P |] ==> P"
 by (simp, blast)
 
-subsection{*Rules for Binary Union, Defined via @{term Upair}*}
+subsection\<open>Rules for Binary Union, Defined via @{term Upair}\<close>
 
 lemma Un_iff [simp]: "c \<in> A \<union> B \<longleftrightarrow> (c \<in> A | c \<in> B)"
 apply (simp add: Un_def)
@@ -63,7 +63,7 @@
 lemma UnCI [intro!]: "(c \<notin> B ==> c \<in> A) ==> c \<in> A \<union> B"
 by (simp, blast)
 
-subsection{*Rules for Binary Intersection, Defined via @{term Upair}*}
+subsection\<open>Rules for Binary Intersection, Defined via @{term Upair}\<close>
 
 lemma Int_iff [simp]: "c \<in> A \<inter> B \<longleftrightarrow> (c \<in> A & c \<in> B)"
 apply (unfold Int_def)
@@ -83,7 +83,7 @@
 by simp
 
 
-subsection{*Rules for Set Difference, Defined via @{term Upair}*}
+subsection\<open>Rules for Set Difference, Defined via @{term Upair}\<close>
 
 lemma Diff_iff [simp]: "c \<in> A-B \<longleftrightarrow> (c \<in> A & c\<notin>B)"
 by (unfold Diff_def, blast)
@@ -101,7 +101,7 @@
 by simp
 
 
-subsection{*Rules for @{term cons}*}
+subsection\<open>Rules for @{term cons}\<close>
 
 lemma cons_iff [simp]: "a \<in> cons(b,A) \<longleftrightarrow> (a=b | a \<in> A)"
 apply (unfold cons_def)
@@ -137,7 +137,7 @@
 declare cons_not_0 [THEN not_sym, simp]
 
 
-subsection{*Singletons*}
+subsection\<open>Singletons\<close>
 
 lemma singleton_iff: "a \<in> {b} \<longleftrightarrow> a=b"
 by simp
@@ -148,7 +148,7 @@
 lemmas singletonE = singleton_iff [THEN iffD1, elim_format, elim!]
 
 
-subsection{*Descriptions*}
+subsection\<open>Descriptions\<close>
 
 lemma the_equality [intro]:
     "[| P(a);  !!x. P(x) ==> x=a |] ==> (THE x. P(x)) = a"
@@ -195,7 +195,7 @@
 by blast
 
 
-subsection{*Conditional Terms: @{text "if-then-else"}*}
+subsection\<open>Conditional Terms: @{text "if-then-else"}\<close>
 
 lemma if_true [simp]: "(if True then a else b) = a"
 by (unfold if_def, blast)
@@ -253,7 +253,7 @@
 lemmas if_splits = split_if split_if_asm
 
 
-subsection{*Consequences of Foundation*}
+subsection\<open>Consequences of Foundation\<close>
 
 (*was called mem_anti_sym*)
 lemma mem_asym: "[| a \<in> b;  ~P ==> b \<in> a |] ==> P"
@@ -281,7 +281,7 @@
 lemma eq_imp_not_mem: "a=A ==> a \<notin> A"
 by (blast intro: elim: mem_irrefl)
 
-subsection{*Rules for Successor*}
+subsection\<open>Rules for Successor\<close>
 
 lemma succ_iff: "i \<in> succ(j) \<longleftrightarrow> i=j | i \<in> j"
 by (unfold succ_def, blast)
@@ -321,7 +321,7 @@
 lemmas succ_inject = succ_inject_iff [THEN iffD1, dest!]
 
 
-subsection{*Miniscoping of the Bounded Universal Quantifier*}
+subsection\<open>Miniscoping of the Bounded Universal Quantifier\<close>
 
 lemma ball_simps1:
      "(\<forall>x\<in>A. P(x) & Q)   \<longleftrightarrow> (\<forall>x\<in>A. P(x)) & (A=0 | Q)"
@@ -352,7 +352,7 @@
 by blast
 
 
-subsection{*Miniscoping of the Bounded Existential Quantifier*}
+subsection\<open>Miniscoping of the Bounded Existential Quantifier\<close>
 
 lemma bex_simps1:
      "(\<exists>x\<in>A. P(x) & Q) \<longleftrightarrow> ((\<exists>x\<in>A. P(x)) & Q)"
@@ -404,9 +404,9 @@
 by blast
 
 
-subsection{*Miniscoping of the Replacement Operator*}
+subsection\<open>Miniscoping of the Replacement Operator\<close>
 
-text{*These cover both @{term Replace} and @{term Collect}*}
+text\<open>These cover both @{term Replace} and @{term Collect}\<close>
 lemma Rep_simps [simp]:
      "{x. y \<in> 0, R(x,y)} = 0"
      "{x \<in> 0. P(x)} = 0"
@@ -417,7 +417,7 @@
 by (simp_all, blast+)
 
 
-subsection{*Miniscoping of Unions*}
+subsection\<open>Miniscoping of Unions\<close>
 
 lemma UN_simps1:
      "(\<Union>x\<in>C. cons(a, B(x))) = (if C=0 then 0 else cons(a, \<Union>x\<in>C. B(x)))"
@@ -439,7 +439,7 @@
 
 lemmas UN_simps [simp] = UN_simps1 UN_simps2
 
-text{*Opposite of miniscoping: pull the operator out*}
+text\<open>Opposite of miniscoping: pull the operator out\<close>
 
 lemma UN_extend_simps1:
      "(\<Union>x\<in>C. A(x)) \<union> B   = (if C=0 then B else (\<Union>x\<in>C. A(x) \<union> B))"
@@ -467,7 +467,7 @@
 lemmas UN_extend_simps = UN_extend_simps1 UN_extend_simps2 UN_UN_extend
 
 
-subsection{*Miniscoping of Intersections*}
+subsection\<open>Miniscoping of Intersections\<close>
 
 lemma INT_simps1:
      "(\<Inter>x\<in>C. A(x) \<inter> B) = (\<Inter>x\<in>C. A(x)) \<inter> B"
@@ -486,7 +486,7 @@
 
 lemmas INT_simps [simp] = INT_simps1 INT_simps2
 
-text{*Opposite of miniscoping: pull the operator out*}
+text\<open>Opposite of miniscoping: pull the operator out\<close>
 
 
 lemma INT_extend_simps1:
@@ -508,7 +508,7 @@
 lemmas INT_extend_simps = INT_extend_simps1 INT_extend_simps2
 
 
-subsection{*Other simprules*}
+subsection\<open>Other simprules\<close>
 
 
 (*** Miniscoping: pushing in big Unions, Intersections, quantifiers, etc. ***)