isabelle update_cartouches -c;
authorwenzelm
Fri Jan 12 14:08:53 2018 +0100 (16 months ago)
changeset 6740623307fd33906
parent 67405 e9ab4ad7bd15
child 67407 dbaa38bd223a
isabelle update_cartouches -c;
src/Doc/Classes/Classes.thy
src/Doc/Functions/Functions.thy
src/Doc/How_to_Prove_it/How_to_Prove_it.thy
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/LaTeXsugar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Sugar/Sugar.thy
src/Doc/Tutorial/Advanced/Partial.thy
src/Doc/Tutorial/Advanced/WFrec.thy
src/Doc/Tutorial/Advanced/simp2.thy
src/Doc/Tutorial/CTL/Base.thy
src/Doc/Tutorial/CTL/CTL.thy
src/Doc/Tutorial/CTL/CTLind.thy
src/Doc/Tutorial/CTL/PDL.thy
src/Doc/Tutorial/CodeGen/CodeGen.thy
src/Doc/Tutorial/Datatype/ABexpr.thy
src/Doc/Tutorial/Datatype/Fundata.thy
src/Doc/Tutorial/Datatype/Nested.thy
src/Doc/Tutorial/Documents/Documents.thy
src/Doc/Tutorial/Fun/fun0.thy
src/Doc/Tutorial/Ifexpr/Ifexpr.thy
src/Doc/Tutorial/Inductive/AB.thy
src/Doc/Tutorial/Inductive/Advanced.thy
src/Doc/Tutorial/Inductive/Even.thy
src/Doc/Tutorial/Inductive/Mutual.thy
src/Doc/Tutorial/Inductive/Star.thy
src/Doc/Tutorial/Misc/AdvancedInd.thy
src/Doc/Tutorial/Misc/Itrev.thy
src/Doc/Tutorial/Misc/Option2.thy
src/Doc/Tutorial/Misc/Plus.thy
src/Doc/Tutorial/Misc/Tree.thy
src/Doc/Tutorial/Misc/Tree2.thy
src/Doc/Tutorial/Misc/appendix.thy
src/Doc/Tutorial/Misc/case_exprs.thy
src/Doc/Tutorial/Misc/fakenat.thy
src/Doc/Tutorial/Misc/natsum.thy
src/Doc/Tutorial/Misc/pairs2.thy
src/Doc/Tutorial/Misc/prime_def.thy
src/Doc/Tutorial/Misc/simp.thy
src/Doc/Tutorial/Misc/types.thy
src/Doc/Tutorial/Protocol/Event.thy
src/Doc/Tutorial/Protocol/Message.thy
src/Doc/Tutorial/Protocol/NS_Public.thy
src/Doc/Tutorial/Protocol/Public.thy
src/Doc/Tutorial/Recdef/Induction.thy
src/Doc/Tutorial/Recdef/Nested0.thy
src/Doc/Tutorial/Recdef/Nested1.thy
src/Doc/Tutorial/Recdef/Nested2.thy
src/Doc/Tutorial/Recdef/examples.thy
src/Doc/Tutorial/Recdef/simplification.thy
src/Doc/Tutorial/Recdef/termination.thy
src/Doc/Tutorial/Rules/Basic.thy
src/Doc/Tutorial/Rules/Blast.thy
src/Doc/Tutorial/Rules/Force.thy
src/Doc/Tutorial/Rules/Forward.thy
src/Doc/Tutorial/Rules/TPrimes.thy
src/Doc/Tutorial/Rules/Tacticals.thy
src/Doc/Tutorial/Rules/find2.thy
src/Doc/Tutorial/Sets/Examples.thy
src/Doc/Tutorial/Sets/Functions.thy
src/Doc/Tutorial/Sets/Recur.thy
src/Doc/Tutorial/Sets/Relations.thy
src/Doc/Tutorial/ToyList/ToyList.thy
src/Doc/Tutorial/ToyList/ToyList_Test.thy
src/Doc/Tutorial/Trie/Trie.thy
src/Doc/Tutorial/Types/Axioms.thy
src/Doc/Tutorial/Types/Numbers.thy
src/Doc/Tutorial/Types/Overloading.thy
src/Doc/Tutorial/Types/Pairs.thy
src/Doc/Tutorial/Types/Records.thy
src/Doc/Tutorial/Types/Typedefs.thy
src/HOL/Data_Structures/AA_Map.thy
src/HOL/Data_Structures/AA_Set.thy
src/HOL/Data_Structures/AList_Upd_Del.thy
src/HOL/Data_Structures/AVL_Map.thy
src/HOL/Data_Structures/AVL_Set.thy
src/HOL/Data_Structures/Brother12_Map.thy
src/HOL/Data_Structures/Brother12_Set.thy
src/HOL/Data_Structures/Cmp.thy
src/HOL/Data_Structures/Leftist_Heap.thy
src/HOL/Data_Structures/Less_False.thy
src/HOL/Data_Structures/List_Ins_Del.thy
src/HOL/Data_Structures/Map_by_Ordered.thy
src/HOL/Data_Structures/Set_by_Ordered.thy
src/HOL/Data_Structures/Sorted_Less.thy
src/HOL/Data_Structures/Tree234.thy
src/HOL/Data_Structures/Tree234_Set.thy
src/HOL/Data_Structures/Tree23_Set.thy
src/HOL/IMP/ACom.thy
src/HOL/IMP/AExp.thy
src/HOL/IMP/ASM.thy
src/HOL/IMP/Abs_Int0.thy
src/HOL/IMP/Abs_Int1.thy
src/HOL/IMP/Abs_Int1_const.thy
src/HOL/IMP/Abs_Int1_parity.thy
src/HOL/IMP/Abs_Int2.thy
src/HOL/IMP/Abs_Int2_ivl.thy
src/HOL/IMP/Abs_Int3.thy
src/HOL/IMP/Abs_Int_Tests.thy
src/HOL/IMP/Abs_Int_init.thy
src/HOL/IMP/Abs_State.thy
src/HOL/IMP/BExp.thy
src/HOL/IMP/Big_Step.thy
src/HOL/IMP/C_like.thy
src/HOL/IMP/Collecting.thy
src/HOL/IMP/Collecting1.thy
src/HOL/IMP/Collecting_Examples.thy
src/HOL/IMP/Compiler.thy
src/HOL/IMP/Compiler2.thy
src/HOL/IMP/Def_Init_Big.thy
src/HOL/IMP/Def_Init_Small.thy
src/HOL/IMP/Denotational.thy
src/HOL/IMP/Finite_Reachable.thy
src/HOL/IMP/Hoare.thy
src/HOL/IMP/Hoare_Examples.thy
src/HOL/IMP/Hoare_Total.thy
src/HOL/IMP/Hoare_Total_EX.thy
src/HOL/IMP/Hoare_Total_EX2.thy
src/HOL/IMP/Live.thy
src/HOL/IMP/Live_True.thy
src/HOL/IMP/OO.thy
src/HOL/IMP/Poly_Types.thy
src/HOL/IMP/Sec_Type_Expr.thy
src/HOL/IMP/Sec_Typing.thy
src/HOL/IMP/Sec_TypingT.thy
src/HOL/IMP/Sem_Equiv.thy
src/HOL/IMP/Small_Step.thy
src/HOL/IMP/Star.thy
src/HOL/IMP/Types.thy
src/HOL/IMP/VCG.thy
src/HOL/IMP/VCG_Total_EX.thy
src/HOL/IMP/VCG_Total_EX2.thy
src/HOL/IMP/Vars.thy
     1.1 --- a/src/Doc/Classes/Classes.thy	Thu Jan 11 13:48:17 2018 +0100
     1.2 +++ b/src/Doc/Classes/Classes.thy	Fri Jan 12 14:08:53 2018 +0100
     1.3 @@ -558,12 +558,12 @@
     1.4  context %quote semigroup
     1.5  begin
     1.6  
     1.7 -term %quote "x \<otimes> y" -- \<open>example 1\<close>
     1.8 -term %quote "(x::nat) \<otimes> y" -- \<open>example 2\<close>
     1.9 +term %quote "x \<otimes> y" \<comment> \<open>example 1\<close>
    1.10 +term %quote "(x::nat) \<otimes> y" \<comment> \<open>example 2\<close>
    1.11  
    1.12  end  %quote
    1.13  
    1.14 -term %quote "x \<otimes> y" -- \<open>example 3\<close>
    1.15 +term %quote "x \<otimes> y" \<comment> \<open>example 3\<close>
    1.16  
    1.17  text \<open>
    1.18    \<^noindent> Here in example 1, the term refers to the local class
     2.1 --- a/src/Doc/Functions/Functions.thy	Thu Jan 11 13:48:17 2018 +0100
     2.2 +++ b/src/Doc/Functions/Functions.thy	Fri Jan 12 14:08:53 2018 +0100
     2.3 @@ -1095,11 +1095,11 @@
     2.4    let ?R = "measure (\<lambda>x. 101 - x)"
     2.5    show "wf ?R" ..
     2.6  
     2.7 -  fix n :: nat assume "\<not> 100 < n" -- "Assumptions for both calls"
     2.8 +  fix n :: nat assume "\<not> 100 < n" \<comment> "Assumptions for both calls"
     2.9  
    2.10 -  thus "(n + 11, n) \<in> ?R" by simp -- "Inner call"
    2.11 +  thus "(n + 11, n) \<in> ?R" by simp \<comment> "Inner call"
    2.12  
    2.13 -  assume inner_trm: "f91_dom (n + 11)" -- "Outer call"
    2.14 +  assume inner_trm: "f91_dom (n + 11)" \<comment> "Outer call"
    2.15    with f91_estimate have "n + 11 < f91 (n + 11) + 11" .
    2.16    with \<open>\<not> 100 < n\<close> show "(f91 (n + 11), n) \<in> ?R" by simp
    2.17  qed
     3.1 --- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Thu Jan 11 13:48:17 2018 +0100
     3.2 +++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy	Fri Jan 12 14:08:53 2018 +0100
     3.3 @@ -3,7 +3,7 @@
     3.4  imports Complex_Main
     3.5  begin
     3.6  (*>*)
     3.7 -text{*
     3.8 +text\<open>
     3.9  \chapter{@{theory Main}}
    3.10  
    3.11  \section{Natural numbers}
    3.12 @@ -34,12 +34,12 @@
    3.13  
    3.14  \noindent
    3.15  Example:
    3.16 -*}
    3.17 +\<close>
    3.18  
    3.19  lemma fixes x :: int shows "x ^ 3 = x * x * x"
    3.20  by (simp add: numeral_eq_Suc)
    3.21  
    3.22 -text{* This is a typical situation: function ``@{text"^"}'' is defined
    3.23 +text\<open>This is a typical situation: function ``@{text"^"}'' is defined
    3.24  by pattern matching on @{const Suc} but is applied to a numeral.
    3.25  
    3.26  Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
    3.27 @@ -80,7 +80,7 @@
    3.28  But what to do when proper multiplication is involved?
    3.29  At this point it can be helpful to simplify with the lemma list
    3.30  @{thm [source] algebra_simps}. Examples:
    3.31 -*}
    3.32 +\<close>
    3.33  
    3.34  lemma fixes x :: int
    3.35    shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
    3.36 @@ -90,7 +90,7 @@
    3.37    shows "(x + y) * (y - z) = (y - z) * x + y * (y-z)"
    3.38  by(simp add: algebra_simps)
    3.39  
    3.40 -text{*
    3.41 +text\<open>
    3.42  Rewriting with @{thm[source] algebra_simps} has the following effect:
    3.43  terms are rewritten into a normal form by multiplying out,
    3.44  rearranging sums and products into some canonical order.
    3.45 @@ -101,33 +101,33 @@
    3.46  and @{class comm_ring}) this yields a decision procedure for equality.
    3.47  
    3.48  Additional function and predicate symbols are not a problem either:
    3.49 -*}
    3.50 +\<close>
    3.51  
    3.52  lemma fixes f :: "int \<Rightarrow> int" shows "2 * f(x*y) - f(y*x) < f(y*x) + 1"
    3.53  by(simp add: algebra_simps)
    3.54  
    3.55 -text{* Here @{thm[source]algebra_simps} merely has the effect of rewriting
    3.56 +text\<open>Here @{thm[source]algebra_simps} merely has the effect of rewriting
    3.57  @{term"y*x"} to @{term"x*y"} (or the other way around). This yields
    3.58  a problem of the form @{prop"2*t - t < t + (1::int)"} and we are back in the
    3.59  realm of linear arithmetic.
    3.60  
    3.61  Because @{thm[source]algebra_simps} multiplies out, terms can explode.
    3.62  If one merely wants to bring sums or products into a canonical order
    3.63 -it suffices to rewrite with @{thm [source] ac_simps}: *}
    3.64 +it suffices to rewrite with @{thm [source] ac_simps}:\<close>
    3.65  
    3.66  lemma fixes f :: "int \<Rightarrow> int" shows "f(x*y*z) - f(z*x*y) = 0"
    3.67  by(simp add: ac_simps)
    3.68  
    3.69 -text{* The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
    3.70 +text\<open>The lemmas @{thm[source]algebra_simps} take care of addition, subtraction
    3.71  and multiplication (algebraic structures up to rings) but ignore division (fields).
    3.72  The lemmas @{thm[source]field_simps} also deal with division:
    3.73 -*}
    3.74 +\<close>
    3.75  
    3.76  lemma fixes x :: real shows "x+z \<noteq> 0 \<Longrightarrow> 1 + y/(x+z) = (x+y+z)/(x+z)"
    3.77  by(simp add: field_simps)
    3.78  
    3.79 -text{* Warning: @{thm[source]field_simps} can blow up your terms
    3.80 -beyond recognition. *}
    3.81 +text\<open>Warning: @{thm[source]field_simps} can blow up your terms
    3.82 +beyond recognition.\<close>
    3.83  
    3.84  (*<*)
    3.85  end
     4.1 --- a/src/Doc/Logics_ZF/FOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
     4.2 +++ b/src/Doc/Logics_ZF/FOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
     4.3 @@ -1,30 +1,30 @@
     4.4 -section{*Examples of Classical Reasoning*}
     4.5 +section\<open>Examples of Classical Reasoning\<close>
     4.6  
     4.7  theory FOL_examples imports FOL begin
     4.8  
     4.9  lemma "EX y. ALL x. P(y)-->P(x)"
    4.10 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.11 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.12  apply (rule exCI)
    4.13 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.14 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.15  apply (rule allI)
    4.16 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.17 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.18  apply (rule impI)
    4.19 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.20 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.21  apply (erule allE)
    4.22 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.23 -txt{*see below for @{text allI} combined with @{text swap}*}
    4.24 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.25 +txt\<open>see below for @{text allI} combined with @{text swap}\<close>
    4.26  apply (erule allI [THEN [2] swap])
    4.27 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.28 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.29  apply (rule impI)
    4.30 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.31 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.32  apply (erule notE)
    4.33 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    4.34 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    4.35  apply assumption
    4.36  done
    4.37  
    4.38 -text {*
    4.39 +text \<open>
    4.40  @{thm[display] allI [THEN [2] swap]}
    4.41 -*}
    4.42 +\<close>
    4.43  
    4.44  lemma "EX y. ALL x. P(y)-->P(x)"
    4.45  by blast
     5.1 --- a/src/Doc/Logics_ZF/IFOL_examples.thy	Thu Jan 11 13:48:17 2018 +0100
     5.2 +++ b/src/Doc/Logics_ZF/IFOL_examples.thy	Fri Jan 12 14:08:53 2018 +0100
     5.3 @@ -1,56 +1,56 @@
     5.4 -section{*Examples of Intuitionistic Reasoning*}
     5.5 +section\<open>Examples of Intuitionistic Reasoning\<close>
     5.6  
     5.7  theory IFOL_examples imports IFOL begin
     5.8  
     5.9 -text{*Quantifier example from the book Logic and Computation*}
    5.10 +text\<open>Quantifier example from the book Logic and Computation\<close>
    5.11  lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
    5.12 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.13 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.14  apply (rule impI)
    5.15 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.16 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.17  apply (rule allI)
    5.18 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.19 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.20  apply (rule exI)
    5.21 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.22 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.23  apply (erule exE)
    5.24 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.25 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.26  apply (erule allE)
    5.27 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.28 -txt{*Now @{text "apply assumption"} fails*}
    5.29 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.30 +txt\<open>Now @{text "apply assumption"} fails\<close>
    5.31  oops
    5.32  
    5.33 -text{*Trying again, with the same first two steps*}
    5.34 +text\<open>Trying again, with the same first two steps\<close>
    5.35  lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
    5.36 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.37 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.38  apply (rule impI)
    5.39 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.40 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.41  apply (rule allI)
    5.42 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.43 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.44  apply (erule exE)
    5.45 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.46 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.47  apply (rule exI)
    5.48 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.49 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.50  apply (erule allE)
    5.51 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.52 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.53  apply assumption
    5.54 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.55 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.56  done
    5.57  
    5.58  lemma "(EX y. ALL x. Q(x,y)) -->  (ALL x. EX y. Q(x,y))"
    5.59 -by (tactic {*IntPr.fast_tac @{context} 1*})
    5.60 +by (tactic \<open>IntPr.fast_tac @{context} 1\<close>)
    5.61  
    5.62 -text{*Example of Dyckhoff's method*}
    5.63 +text\<open>Example of Dyckhoff's method\<close>
    5.64  lemma "~ ~ ((P-->Q) | (Q-->P))"
    5.65 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.66 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.67  apply (unfold not_def)
    5.68 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.69 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.70  apply (rule impI)
    5.71 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.72 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.73  apply (erule disj_impE)
    5.74 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.75 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.76  apply (erule imp_impE)
    5.77 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.78 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.79   apply (erule imp_impE)
    5.80 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    5.81 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    5.82  apply assumption 
    5.83  apply (erule FalseE)+
    5.84  done
     6.1 --- a/src/Doc/Logics_ZF/If.thy	Thu Jan 11 13:48:17 2018 +0100
     6.2 +++ b/src/Doc/Logics_ZF/If.thy	Fri Jan 12 14:08:53 2018 +0100
     6.3 @@ -12,35 +12,35 @@
     6.4  
     6.5  lemma ifI:
     6.6      "[| P ==> Q; ~P ==> R |] ==> if(P,Q,R)"
     6.7 -  --{* @{subgoals[display,indent=0,margin=65]} *}
     6.8 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
     6.9  apply (simp add: if_def)
    6.10 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.11 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.12  apply blast
    6.13  done
    6.14  
    6.15  lemma ifE:
    6.16     "[| if(P,Q,R);  [| P; Q |] ==> S; [| ~P; R |] ==> S |] ==> S"
    6.17 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.18 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.19  apply (simp add: if_def)
    6.20 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.21 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.22  apply blast
    6.23  done
    6.24  
    6.25  lemma if_commute: "if(P, if(Q,A,B), if(Q,C,D)) <-> if(Q, if(P,A,C), if(P,B,D))"
    6.26 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.27 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.28  apply (rule iffI)
    6.29 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.30 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.31  apply (erule ifE)
    6.32 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.33 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.34  apply (erule ifE)
    6.35 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.36 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.37  apply (rule ifI)
    6.38 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.39 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.40  apply (rule ifI)
    6.41 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.42 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.43  oops
    6.44  
    6.45 -text{*Trying again from the beginning in order to use @{text blast}*}
    6.46 +text\<open>Trying again from the beginning in order to use @{text blast}\<close>
    6.47  declare ifI [intro!]
    6.48  declare ifE [elim!]
    6.49  
    6.50 @@ -49,34 +49,34 @@
    6.51  
    6.52  
    6.53  lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
    6.54 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.55 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.56  by blast
    6.57  
    6.58 -text{*Trying again from the beginning in order to prove from the definitions*}
    6.59 +text\<open>Trying again from the beginning in order to prove from the definitions\<close>
    6.60  lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,A,B))"
    6.61 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.62 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.63  apply (simp add: if_def)
    6.64 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.65 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.66  apply blast
    6.67  done
    6.68  
    6.69  
    6.70 -text{*An invalid formula.  High-level rules permit a simpler diagnosis*}
    6.71 +text\<open>An invalid formula.  High-level rules permit a simpler diagnosis\<close>
    6.72  lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
    6.73 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.74 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.75  apply auto
    6.76 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.77 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.78  (*The next step will fail unless subgoals remain*)
    6.79  apply (tactic all_tac)
    6.80  oops
    6.81  
    6.82 -text{*Trying again from the beginning in order to prove from the definitions*}
    6.83 +text\<open>Trying again from the beginning in order to prove from the definitions\<close>
    6.84  lemma "if(if(P,Q,R), A, B) <-> if(P, if(Q,A,B), if(R,B,A))"
    6.85 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.86 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.87  apply (simp add: if_def)
    6.88 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.89 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.90  apply (auto) 
    6.91 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    6.92 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    6.93  (*The next step will fail unless subgoals remain*)
    6.94  apply (tactic all_tac)
    6.95  oops
     7.1 --- a/src/Doc/Logics_ZF/ZF_Isar.thy	Thu Jan 11 13:48:17 2018 +0100
     7.2 +++ b/src/Doc/Logics_ZF/ZF_Isar.thy	Fri Jan 12 14:08:53 2018 +0100
     7.3 @@ -6,11 +6,11 @@
     7.4  ML_file "../antiquote_setup.ML"
     7.5  (*>*)
     7.6  
     7.7 -chapter {* Some Isar language elements *}
     7.8 +chapter \<open>Some Isar language elements\<close>
     7.9  
    7.10 -section {* Type checking *}
    7.11 +section \<open>Type checking\<close>
    7.12  
    7.13 -text {*
    7.14 +text \<open>
    7.15    The ZF logic is essentially untyped, so the concept of ``type
    7.16    checking'' is performed as logical reasoning about set-membership
    7.17    statements.  A special method assists users in this task; a version
    7.18 @@ -39,14 +39,14 @@
    7.19    the context.
    7.20  
    7.21    \end{description}
    7.22 -*}
    7.23 +\<close>
    7.24  
    7.25  
    7.26 -section {* (Co)Inductive sets and datatypes *}
    7.27 +section \<open>(Co)Inductive sets and datatypes\<close>
    7.28  
    7.29 -subsection {* Set definitions *}
    7.30 +subsection \<open>Set definitions\<close>
    7.31  
    7.32 -text {*
    7.33 +text \<open>
    7.34    In ZF everything is a set.  The generic inductive package also
    7.35    provides a specific view for ``datatype'' specifications.
    7.36    Coinductive definitions are available in both cases, too.
    7.37 @@ -97,12 +97,12 @@
    7.38    See @{cite "isabelle-ZF"} for further information on inductive
    7.39    definitions in ZF, but note that this covers the old-style theory
    7.40    format.
    7.41 -*}
    7.42 +\<close>
    7.43  
    7.44  
    7.45 -subsection {* Primitive recursive functions *}
    7.46 +subsection \<open>Primitive recursive functions\<close>
    7.47  
    7.48 -text {*
    7.49 +text \<open>
    7.50    \begin{matharray}{rcl}
    7.51      @{command_def (ZF) "primrec"} & : & @{text "theory \<rightarrow> theory"} \\
    7.52    \end{matharray}
    7.53 @@ -110,12 +110,12 @@
    7.54    @{rail \<open>
    7.55      @@{command (ZF) primrec} (@{syntax thmdecl}? @{syntax prop} +)
    7.56    \<close>}
    7.57 -*}
    7.58 +\<close>
    7.59  
    7.60  
    7.61 -subsection {* Cases and induction: emulating tactic scripts *}
    7.62 +subsection \<open>Cases and induction: emulating tactic scripts\<close>
    7.63  
    7.64 -text {*
    7.65 +text \<open>
    7.66    The following important tactical tools of Isabelle/ZF have been
    7.67    ported to Isar.  These should not be used in proper proof texts.
    7.68  
    7.69 @@ -133,6 +133,6 @@
    7.70      ;
    7.71      @@{command (ZF) inductive_cases} (@{syntax thmdecl}? (@{syntax prop} +) + @'and')
    7.72    \<close>}
    7.73 -*}
    7.74 +\<close>
    7.75  
    7.76  end
     8.1 --- a/src/Doc/Logics_ZF/ZF_examples.thy	Thu Jan 11 13:48:17 2018 +0100
     8.2 +++ b/src/Doc/Logics_ZF/ZF_examples.thy	Fri Jan 12 14:08:53 2018 +0100
     8.3 @@ -1,8 +1,8 @@
     8.4 -section{*Examples of Reasoning in ZF Set Theory*}
     8.5 +section\<open>Examples of Reasoning in ZF Set Theory\<close>
     8.6  
     8.7  theory ZF_examples imports ZFC begin
     8.8  
     8.9 -subsection {* Binary Trees *}
    8.10 +subsection \<open>Binary Trees\<close>
    8.11  
    8.12  consts
    8.13    bt :: "i => i"
    8.14 @@ -12,11 +12,11 @@
    8.15  
    8.16  declare bt.intros [simp]
    8.17  
    8.18 -text{*Induction via tactic emulation*}
    8.19 +text\<open>Induction via tactic emulation\<close>
    8.20  lemma Br_neq_left [rule_format]: "l \<in> bt(A) ==> \<forall>x r. Br(x, l, r) \<noteq> l"
    8.21 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.22 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.23    apply (induct_tac l)
    8.24 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.25 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.26    apply auto
    8.27    done
    8.28  
    8.29 @@ -25,26 +25,26 @@
    8.30    apply (tactic {*exhaust_tac "l" 1*})
    8.31  *)
    8.32  
    8.33 -text{*The new induction method, which I don't understand*}
    8.34 +text\<open>The new induction method, which I don't understand\<close>
    8.35  lemma Br_neq_left': "l \<in> bt(A) ==> (!!x r. Br(x, l, r) \<noteq> l)"
    8.36 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.37 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.38    apply (induct set: bt)
    8.39 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.40 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.41    apply auto
    8.42    done
    8.43  
    8.44  lemma Br_iff: "Br(a,l,r) = Br(a',l',r') <-> a=a' & l=l' & r=r'"
    8.45 -  -- "Proving a freeness theorem."
    8.46 +  \<comment> "Proving a freeness theorem."
    8.47    by (blast elim!: bt.free_elims)
    8.48  
    8.49  inductive_cases Br_in_bt: "Br(a,l,r) \<in> bt(A)"
    8.50 -  -- "An elimination rule, for type-checking."
    8.51 +  \<comment> "An elimination rule, for type-checking."
    8.52  
    8.53 -text {*
    8.54 +text \<open>
    8.55  @{thm[display] Br_in_bt[no_vars]}
    8.56 -*}
    8.57 +\<close>
    8.58  
    8.59 -subsection{*Primitive recursion*}
    8.60 +subsection\<open>Primitive recursion\<close>
    8.61  
    8.62  consts  n_nodes :: "i => i"
    8.63  primrec
    8.64 @@ -71,7 +71,7 @@
    8.65   by (simp add: n_nodes_tail_def n_nodes_aux_eq) 
    8.66  
    8.67  
    8.68 -subsection {*Inductive definitions*}
    8.69 +subsection \<open>Inductive definitions\<close>
    8.70  
    8.71  consts  Fin       :: "i=>i"
    8.72  inductive
    8.73 @@ -114,7 +114,7 @@
    8.74    type_intros  llist.intros
    8.75  
    8.76  
    8.77 -subsection{*Powerset example*}
    8.78 +subsection\<open>Powerset example\<close>
    8.79  
    8.80  lemma Pow_mono: "A\<subseteq>B  ==>  Pow(A) \<subseteq> Pow(B)"
    8.81  apply (rule subsetI)
    8.82 @@ -124,78 +124,78 @@
    8.83  done
    8.84  
    8.85  lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
    8.86 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.87 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.88  apply (rule equalityI)
    8.89 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.90 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.91  apply (rule Int_greatest)
    8.92 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.93 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.94  apply (rule Int_lower1 [THEN Pow_mono])
    8.95 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.96 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
    8.97  apply (rule Int_lower2 [THEN Pow_mono])
    8.98 -  --{* @{subgoals[display,indent=0,margin=65]} *}
    8.99 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.100  apply (rule subsetI)
   8.101 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.102 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.103  apply (erule IntE)
   8.104 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.105 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.106  apply (rule PowI)
   8.107 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.108 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.109  apply (drule PowD)+
   8.110 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.111 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.112  apply (rule Int_greatest)
   8.113 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.114 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.115  apply (assumption+)
   8.116  done
   8.117  
   8.118 -text{*Trying again from the beginning in order to use @{text blast}*}
   8.119 +text\<open>Trying again from the beginning in order to use @{text blast}\<close>
   8.120  lemma "Pow(A Int B) = Pow(A) Int Pow(B)"
   8.121  by blast
   8.122  
   8.123  
   8.124  lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
   8.125 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.126 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.127  apply (rule subsetI)
   8.128 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.129 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.130  apply (erule UnionE)
   8.131 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.132 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.133  apply (rule UnionI)
   8.134 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.135 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.136  apply (erule subsetD)
   8.137 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.138 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.139  apply assumption 
   8.140 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.141 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.142  apply assumption 
   8.143  done
   8.144  
   8.145 -text{*A more abstract version of the same proof*}
   8.146 +text\<open>A more abstract version of the same proof\<close>
   8.147  
   8.148  lemma "C\<subseteq>D ==> Union(C) \<subseteq> Union(D)"
   8.149 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.150 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.151  apply (rule Union_least)
   8.152 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.153 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.154  apply (rule Union_upper)
   8.155 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.156 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.157  apply (erule subsetD, assumption)
   8.158  done
   8.159  
   8.160  
   8.161  lemma "[| a \<in> A;  f \<in> A->B;  g \<in> C->D;  A \<inter> C = 0 |] ==> (f \<union> g)`a = f`a"
   8.162 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.163 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.164  apply (rule apply_equality)
   8.165 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.166 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.167  apply (rule UnI1)
   8.168 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.169 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.170  apply (rule apply_Pair)
   8.171 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.172 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.173  apply assumption 
   8.174 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.175 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.176  apply assumption 
   8.177 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.178 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.179  apply (rule fun_disjoint_Un)
   8.180 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.181 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.182  apply assumption 
   8.183 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.184 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.185  apply assumption 
   8.186 -  --{* @{subgoals[display,indent=0,margin=65]} *}
   8.187 +  \<comment>\<open>@{subgoals[display,indent=0,margin=65]}\<close>
   8.188  apply assumption 
   8.189  done
   8.190  
     9.1 --- a/src/Doc/Prog_Prove/Basics.thy	Thu Jan 11 13:48:17 2018 +0100
     9.2 +++ b/src/Doc/Prog_Prove/Basics.thy	Fri Jan 12 14:08:53 2018 +0100
     9.3 @@ -3,7 +3,7 @@
     9.4  imports Main
     9.5  begin
     9.6  (*>*)
     9.7 -text{*
     9.8 +text\<open>
     9.9  This chapter introduces HOL as a functional programming language and shows
    9.10  how to prove properties of functional programs by induction.
    9.11  
    9.12 @@ -149,7 +149,7 @@
    9.13  to see the proof state in the output window.
    9.14  \end{warn}
    9.15  \fi
    9.16 -*}
    9.17 +\<close>
    9.18  (*<*)
    9.19  end
    9.20  (*>*)
    10.1 --- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Thu Jan 11 13:48:17 2018 +0100
    10.2 +++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Fri Jan 12 14:08:53 2018 +0100
    10.3 @@ -4,7 +4,7 @@
    10.4  begin
    10.5  (*>*)
    10.6  
    10.7 -text{*
    10.8 +text\<open>
    10.9  \vspace{-4ex}
   10.10  \section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
   10.11  
   10.12 @@ -19,13 +19,13 @@
   10.13  with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
   10.14  with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
   10.15  "\<longrightarrow>"}, etc. Here is how conjunction could be defined by pattern matching:
   10.16 -*}
   10.17 +\<close>
   10.18  
   10.19  fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
   10.20  "conj True True = True" |
   10.21  "conj _ _ = False"
   10.22  
   10.23 -text{* Both the datatype and function definitions roughly follow the syntax
   10.24 +text\<open>Both the datatype and function definitions roughly follow the syntax
   10.25  of functional programming languages.
   10.26  
   10.27  \subsection{Type \indexed{@{typ nat}}{nat}}
   10.28 @@ -37,13 +37,13 @@
   10.29  @{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
   10.30  There are many predefined functions: @{text "+"}, @{text "*"}, @{text
   10.31  "\<le>"}, etc. Here is how you could define your own addition:
   10.32 -*}
   10.33 +\<close>
   10.34  
   10.35  fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
   10.36  "add 0 n = n" |
   10.37  "add (Suc m) n = Suc(add m n)"
   10.38  
   10.39 -text{* And here is a proof of the fact that @{prop"add m 0 = m"}: *}
   10.40 +text\<open>And here is a proof of the fact that @{prop"add m 0 = m"}:\<close>
   10.41  
   10.42  lemma add_02: "add m 0 = m"
   10.43  apply(induction m)
   10.44 @@ -53,7 +53,7 @@
   10.45  lemma "add m 0 = m"
   10.46  apply(induction m)
   10.47  (*>*)
   10.48 -txt{* The \isacom{lemma} command starts the proof and gives the lemma
   10.49 +txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
   10.50  a name, @{text add_02}. Properties of recursively defined functions
   10.51  need to be established by induction in most cases.
   10.52  Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
   10.53 @@ -75,11 +75,11 @@
   10.54  the induction hypothesis.
   10.55  As a result of that final \isacom{done}, Isabelle associates the lemma
   10.56  just proved with its name. You can now inspect the lemma with the command
   10.57 -*}
   10.58 +\<close>
   10.59  
   10.60  thm add_02
   10.61  
   10.62 -txt{* which displays @{thm[show_question_marks,display] add_02} The free
   10.63 +txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
   10.64  variable @{text m} has been replaced by the \concept{unknown}
   10.65  @{text"?m"}. There is no logical difference between the two but there is an
   10.66  operational one: unknowns can be instantiated, which is what you want after
   10.67 @@ -153,7 +153,7 @@
   10.68  
   10.69  Although lists are already predefined, we define our own copy for
   10.70  demonstration purposes:
   10.71 -*}
   10.72 +\<close>
   10.73  (*<*)
   10.74  apply(auto)
   10.75  done 
   10.76 @@ -164,7 +164,7 @@
   10.77  for map: map
   10.78  (*>*)
   10.79  
   10.80 -text{*
   10.81 +text\<open>
   10.82  \begin{itemize}
   10.83  \item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
   10.84  \item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
   10.85 @@ -175,7 +175,7 @@
   10.86  types of a constructor needs to be enclosed in quotation marks, unless
   10.87  it is just an identifier (e.g., @{typ nat} or @{typ 'a}).
   10.88  \end{itemize}
   10.89 -We also define two standard functions, append and reverse: *}
   10.90 +We also define two standard functions, append and reverse:\<close>
   10.91  
   10.92  fun app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
   10.93  "app Nil ys = ys" |
   10.94 @@ -185,18 +185,18 @@
   10.95  "rev Nil = Nil" |
   10.96  "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
   10.97  
   10.98 -text{* By default, variables @{text xs}, @{text ys} and @{text zs} are of
   10.99 +text\<open>By default, variables @{text xs}, @{text ys} and @{text zs} are of
  10.100  @{text list} type.
  10.101  
  10.102 -Command \indexed{\isacommand{value}}{value} evaluates a term. For example, *}
  10.103 +Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
  10.104  
  10.105  value "rev(Cons True (Cons False Nil))"
  10.106  
  10.107 -text{* yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too: *}
  10.108 +text\<open>yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too:\<close>
  10.109  
  10.110  value "rev(Cons a (Cons b Nil))"
  10.111  
  10.112 -text{* yields @{value "rev(Cons a (Cons b Nil))"}.
  10.113 +text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
  10.114  \medskip
  10.115  
  10.116  Figure~\ref{fig:MyList} shows the theory created so far.
  10.117 @@ -238,28 +238,28 @@
  10.118  We will now demonstrate the typical proof process, which involves
  10.119  the formulation and proof of auxiliary lemmas.
  10.120  Our goal is to show that reversing a list twice produces the original
  10.121 -list. *}
  10.122 +list.\<close>
  10.123  
  10.124  theorem rev_rev [simp]: "rev(rev xs) = xs"
  10.125  
  10.126 -txt{* Commands \isacom{theorem} and \isacom{lemma} are
  10.127 +txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
  10.128  interchangeable and merely indicate the importance we attach to a
  10.129  proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
  10.130  to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
  10.131  involving simplification will replace occurrences of @{term"rev(rev xs)"} by
  10.132 -@{term"xs"}. The proof is by induction: *}
  10.133 +@{term"xs"}. The proof is by induction:\<close>
  10.134  
  10.135  apply(induction xs)
  10.136  
  10.137 -txt{*
  10.138 +txt\<open>
  10.139  As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
  10.140  @{subgoals[display,indent=0,margin=65]}
  10.141  Let us try to solve both goals automatically:
  10.142 -*}
  10.143 +\<close>
  10.144  
  10.145  apply(auto)
  10.146  
  10.147 -txt{*Subgoal~1 is proved, and disappears; the simplified version
  10.148 +txt\<open>Subgoal~1 is proved, and disappears; the simplified version
  10.149  of subgoal~2 becomes the new subgoal~1:
  10.150  @{subgoals[display,indent=0,margin=70]}
  10.151  In order to simplify this subgoal further, a lemma suggests itself.
  10.152 @@ -267,22 +267,22 @@
  10.153  \subsubsection{A First Lemma}
  10.154  
  10.155  We insert the following lemma in front of the main theorem:
  10.156 -*}
  10.157 +\<close>
  10.158  (*<*)
  10.159  oops
  10.160  (*>*)
  10.161  lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
  10.162  
  10.163 -txt{* There are two variables that we could induct on: @{text xs} and
  10.164 +txt\<open>There are two variables that we could induct on: @{text xs} and
  10.165  @{text ys}. Because @{const app} is defined by recursion on
  10.166  the first argument, @{text xs} is the correct one:
  10.167 -*}
  10.168 +\<close>
  10.169  
  10.170  apply(induction xs)
  10.171  
  10.172 -txt{* This time not even the base case is solved automatically: *}
  10.173 +txt\<open>This time not even the base case is solved automatically:\<close>
  10.174  apply(auto)
  10.175 -txt{*
  10.176 +txt\<open>
  10.177  \vspace{-5ex}
  10.178  @{subgoals[display,goals_limit=1]}
  10.179  Again, we need to abandon this proof attempt and prove another simple lemma
  10.180 @@ -291,7 +291,7 @@
  10.181  \subsubsection{A Second Lemma}
  10.182  
  10.183  We again try the canonical proof procedure:
  10.184 -*}
  10.185 +\<close>
  10.186  (*<*)
  10.187  oops
  10.188  (*>*)
  10.189 @@ -300,16 +300,16 @@
  10.190  apply(auto)
  10.191  done
  10.192  
  10.193 -text{*
  10.194 +text\<open>
  10.195  Thankfully, this worked.
  10.196  Now we can continue with our stuck proof attempt of the first lemma:
  10.197 -*}
  10.198 +\<close>
  10.199  
  10.200  lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
  10.201  apply(induction xs)
  10.202  apply(auto)
  10.203  
  10.204 -txt{*
  10.205 +txt\<open>
  10.206  We find that this time @{text"auto"} solves the base case, but the
  10.207  induction step merely simplifies to
  10.208  @{subgoals[display,indent=0,goals_limit=1]}
  10.209 @@ -319,7 +319,7 @@
  10.210  \subsubsection{Associativity of @{const app}}
  10.211  
  10.212  The canonical proof procedure succeeds without further ado:
  10.213 -*}
  10.214 +\<close>
  10.215  (*<*)oops(*>*)
  10.216  lemma app_assoc [simp]: "app (app xs ys) zs = app xs (app ys zs)"
  10.217  apply(induction xs)
  10.218 @@ -336,7 +336,7 @@
  10.219  apply(auto)
  10.220  done
  10.221  (*>*)
  10.222 -text{*
  10.223 +text\<open>
  10.224  Finally the proofs of @{thm[source] rev_app} and @{thm[source] rev_rev}
  10.225  succeed, too.
  10.226  
  10.227 @@ -457,7 +457,7 @@
  10.228  \mbox{@{text"sum_upto n"}} @{text"="} @{text"0 + ... + n"} and prove
  10.229  @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
  10.230  \end{exercise}
  10.231 -*}
  10.232 +\<close>
  10.233  (*<*)
  10.234  end
  10.235  (*>*)
    11.1 --- a/src/Doc/Prog_Prove/Isar.thy	Thu Jan 11 13:48:17 2018 +0100
    11.2 +++ b/src/Doc/Prog_Prove/Isar.thy	Fri Jan 12 14:08:53 2018 +0100
    11.3 @@ -4,7 +4,7 @@
    11.4  begin
    11.5  declare [[quick_and_dirty]]
    11.6  (*>*)
    11.7 -text{*
    11.8 +text\<open>
    11.9  Apply-scripts are unreadable and hard to maintain. The language of choice
   11.10  for larger proofs is \concept{Isar}. The two key features of Isar are:
   11.11  \begin{itemize}
   11.12 @@ -14,7 +14,7 @@
   11.13  \end{itemize}
   11.14  Whereas apply-scripts are like assembly language programs, Isar proofs
   11.15  are like structured programs with comments. A typical Isar proof looks like this:
   11.16 -*}text{*
   11.17 +\<close>text\<open>
   11.18  \begin{tabular}{@ {}l}
   11.19  \isacom{proof}\\
   11.20  \quad\isacom{assume} @{text"\""}$\mathit{formula}_0$@{text"\""}\\
   11.21 @@ -24,7 +24,7 @@
   11.22  \quad\isacom{show} @{text"\""}$\mathit{formula}_{n+1}$@{text"\""} \quad\isacom{by} @{text \<dots>}\\
   11.23  \isacom{qed}
   11.24  \end{tabular}
   11.25 -*}text{*
   11.26 +\<close>text\<open>
   11.27  It proves $\mathit{formula}_0 \Longrightarrow \mathit{formula}_{n+1}$
   11.28  (provided each proof step succeeds).
   11.29  The intermediate \isacom{have} statements are merely stepping stones
   11.30 @@ -89,7 +89,7 @@
   11.31  We show a number of proofs of Cantor's theorem that a function from a set to
   11.32  its powerset cannot be surjective, illustrating various features of Isar. The
   11.33  constant @{const surj} is predefined.
   11.34 -*}
   11.35 +\<close>
   11.36  
   11.37  lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
   11.38  proof
   11.39 @@ -99,7 +99,7 @@
   11.40    from 2 show "False" by blast
   11.41  qed
   11.42  
   11.43 -text{*
   11.44 +text\<open>
   11.45  The \isacom{proof} command lacks an explicit method by which to perform
   11.46  the proof. In such cases Isabelle tries to use some standard introduction
   11.47  rule, in the above case for @{text"\<not>"}:
   11.48 @@ -125,7 +125,7 @@
   11.49  in a UNIX pipe. In such cases the predefined name @{text this} can be used
   11.50  to refer to the proposition proved in the previous step. This allows us to
   11.51  eliminate all labels from our proof (we suppress the \isacom{lemma} statement):
   11.52 -*}
   11.53 +\<close>
   11.54  (*<*)
   11.55  lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
   11.56  (*>*)
   11.57 @@ -135,7 +135,7 @@
   11.58    from this show "False" by blast
   11.59  qed
   11.60  
   11.61 -text{* We have also taken the opportunity to compress the two \isacom{have}
   11.62 +text\<open>We have also taken the opportunity to compress the two \isacom{have}
   11.63  steps into one.
   11.64  
   11.65  To compact the text further, Isar has a few convenient abbreviations:
   11.66 @@ -150,7 +150,7 @@
   11.67  
   11.68  \noindent
   11.69  With the help of these abbreviations the proof becomes
   11.70 -*}
   11.71 +\<close>
   11.72  (*<*)
   11.73  lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
   11.74  (*>*)
   11.75 @@ -159,7 +159,7 @@
   11.76    hence "\<exists>a. {x. x \<notin> f x} = f a" by(auto simp: surj_def)
   11.77    thus "False" by blast
   11.78  qed
   11.79 -text{*
   11.80 +text\<open>
   11.81  
   11.82  There are two further linguistic variations:
   11.83  \medskip
   11.84 @@ -180,14 +180,14 @@
   11.85  Lemmas can also be stated in a more structured fashion. To demonstrate this
   11.86  feature with Cantor's theorem, we rephrase \noquotes{@{prop[source]"\<not> surj f"}}
   11.87  a little:
   11.88 -*}
   11.89 +\<close>
   11.90  
   11.91  lemma
   11.92    fixes f :: "'a \<Rightarrow> 'a set"
   11.93    assumes s: "surj f"
   11.94    shows "False"
   11.95  
   11.96 -txt{* The optional \isacom{fixes} part allows you to state the types of
   11.97 +txt\<open>The optional \isacom{fixes} part allows you to state the types of
   11.98  variables up front rather than by decorating one of their occurrences in the
   11.99  formula with a type constraint. The key advantage of the structured format is
  11.100  the \isacom{assumes} part that allows you to name each assumption; multiple
  11.101 @@ -195,7 +195,7 @@
  11.102  \isacom{shows} part gives the goal. The actual theorem that will come out of
  11.103  the proof is \noquotes{@{prop[source]"surj f \<Longrightarrow> False"}}, but during the proof the assumption
  11.104  \noquotes{@{prop[source]"surj f"}} is available under the name @{text s} like any other fact.
  11.105 -*}
  11.106 +\<close>
  11.107  
  11.108  proof -
  11.109    have "\<exists> a. {x. x \<notin> f x} = f a" using s
  11.110 @@ -203,7 +203,7 @@
  11.111    thus "False" by blast
  11.112  qed
  11.113  
  11.114 -text{*
  11.115 +text\<open>
  11.116  \begin{warn}
  11.117  Note the hyphen after the \isacom{proof} command.
  11.118  It is the null method that does nothing to the goal. Leaving it out would be asking
  11.119 @@ -235,42 +235,42 @@
  11.120  starting from a formula @{text P} we have the two cases @{text P} and
  11.121  @{prop"~P"}, and starting from a fact @{prop"P \<or> Q"}
  11.122  we have the two cases @{text P} and @{text Q}:
  11.123 -*}text_raw{*
  11.124 +\<close>text_raw\<open>
  11.125  \begin{tabular}{@ {}ll@ {}}
  11.126  \begin{minipage}[t]{.4\textwidth}
  11.127  \isa{%
  11.128 -*}
  11.129 +\<close>
  11.130  (*<*)lemma "R" proof-(*>*)
  11.131  show "R"
  11.132  proof cases
  11.133    assume "P"
  11.134 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.135 -  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.136 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.137 +  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.138  next
  11.139    assume "\<not> P"
  11.140 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.141 -  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.142 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.143 +  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.144  qed(*<*)oops(*>*)
  11.145 -text_raw {* }
  11.146 +text_raw \<open>}
  11.147  \end{minipage}\index{cases@@{text cases}}
  11.148  &
  11.149  \begin{minipage}[t]{.4\textwidth}
  11.150  \isa{%
  11.151 -*}
  11.152 +\<close>
  11.153  (*<*)lemma "R" proof-(*>*)
  11.154 -have "P \<or> Q" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.155 +have "P \<or> Q" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.156  then show "R"
  11.157  proof
  11.158    assume "P"
  11.159 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.160 -  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.161 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.162 +  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.163  next
  11.164    assume "Q"
  11.165 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.166 -  show "R" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.167 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.168 +  show "R" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.169  qed(*<*)oops(*>*)
  11.170  
  11.171 -text_raw {* }
  11.172 +text_raw \<open>}
  11.173  \end{minipage}
  11.174  \end{tabular}
  11.175  \medskip
  11.176 @@ -278,19 +278,19 @@
  11.177  How to prove a logical equivalence:
  11.178  \end{isamarkuptext}%
  11.179  \isa{%
  11.180 -*}
  11.181 +\<close>
  11.182  (*<*)lemma "P\<longleftrightarrow>Q" proof-(*>*)
  11.183  show "P \<longleftrightarrow> Q"
  11.184  proof
  11.185    assume "P"
  11.186 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.187 -  show "Q" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.188 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.189 +  show "Q" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.190  next
  11.191    assume "Q"
  11.192 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.193 -  show "P" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.194 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.195 +  show "P" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.196  qed(*<*)qed(*>*)
  11.197 -text_raw {* }
  11.198 +text_raw \<open>}
  11.199  \medskip
  11.200  \begin{isamarkuptext}%
  11.201  Proofs by contradiction (@{thm[source] ccontr} stands for ``classical contradiction''):
  11.202 @@ -298,30 +298,30 @@
  11.203  \begin{tabular}{@ {}ll@ {}}
  11.204  \begin{minipage}[t]{.4\textwidth}
  11.205  \isa{%
  11.206 -*}
  11.207 +\<close>
  11.208  (*<*)lemma "\<not> P" proof-(*>*)
  11.209  show "\<not> P"
  11.210  proof
  11.211    assume "P"
  11.212 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.213 -  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.214 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.215 +  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.216  qed(*<*)oops(*>*)
  11.217  
  11.218 -text_raw {* }
  11.219 +text_raw \<open>}
  11.220  \end{minipage}
  11.221  &
  11.222  \begin{minipage}[t]{.4\textwidth}
  11.223  \isa{%
  11.224 -*}
  11.225 +\<close>
  11.226  (*<*)lemma "P" proof-(*>*)
  11.227  show "P"
  11.228  proof (rule ccontr)
  11.229    assume "\<not>P"
  11.230 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.231 -  show "False" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.232 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.233 +  show "False" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.234  qed(*<*)oops(*>*)
  11.235  
  11.236 -text_raw {* }
  11.237 +text_raw \<open>}
  11.238  \end{minipage}
  11.239  \end{tabular}
  11.240  \medskip
  11.241 @@ -331,30 +331,30 @@
  11.242  \begin{tabular}{@ {}ll@ {}}
  11.243  \begin{minipage}[t]{.4\textwidth}
  11.244  \isa{%
  11.245 -*}
  11.246 +\<close>
  11.247  (*<*)lemma "ALL x. P x" proof-(*>*)
  11.248  show "\<forall>x. P(x)"
  11.249  proof
  11.250    fix x
  11.251 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.252 -  show "P(x)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.253 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.254 +  show "P(x)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.255  qed(*<*)oops(*>*)
  11.256  
  11.257 -text_raw {* }
  11.258 +text_raw \<open>}
  11.259  \end{minipage}
  11.260  &
  11.261  \begin{minipage}[t]{.4\textwidth}
  11.262  \isa{%
  11.263 -*}
  11.264 +\<close>
  11.265  (*<*)lemma "EX x. P(x)" proof-(*>*)
  11.266  show "\<exists>x. P(x)"
  11.267  proof
  11.268 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.269 -  show "P(witness)" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.270 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.271 +  show "P(witness)" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.272  qed
  11.273  (*<*)oops(*>*)
  11.274  
  11.275 -text_raw {* }
  11.276 +text_raw \<open>}
  11.277  \end{minipage}
  11.278  \end{tabular}
  11.279  \medskip
  11.280 @@ -369,12 +369,12 @@
  11.281  
  11.282  How to reason forward from \noquotes{@{prop[source] "\<exists>x. P(x)"}}:
  11.283  \end{isamarkuptext}%
  11.284 -*}
  11.285 +\<close>
  11.286  (*<*)lemma True proof- assume 1: "EX x. P x"(*>*)
  11.287 -have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw{*\ \isasymproof\\*}
  11.288 +have "\<exists>x. P(x)" (*<*)by(rule 1)(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.289  then obtain x where p: "P(x)" by blast
  11.290  (*<*)oops(*>*)
  11.291 -text{*
  11.292 +text\<open>
  11.293  After the \indexed{\isacom{obtain}}{obtain} step, @{text x} (we could have chosen any name)
  11.294  is a fixed local
  11.295  variable, and @{text p} is the name of the fact
  11.296 @@ -382,7 +382,7 @@
  11.297  This pattern works for one or more @{text x}.
  11.298  As an example of the \isacom{obtain} command, here is the proof of
  11.299  Cantor's theorem in more detail:
  11.300 -*}
  11.301 +\<close>
  11.302  
  11.303  lemma "\<not> surj(f :: 'a \<Rightarrow> 'a set)"
  11.304  proof
  11.305 @@ -393,7 +393,7 @@
  11.306    thus "False" by blast
  11.307  qed
  11.308  
  11.309 -text_raw{*
  11.310 +text_raw\<open>
  11.311  \begin{isamarkuptext}%
  11.312  
  11.313  Finally, how to prove set equality and subset relationship:
  11.314 @@ -401,31 +401,31 @@
  11.315  \begin{tabular}{@ {}ll@ {}}
  11.316  \begin{minipage}[t]{.4\textwidth}
  11.317  \isa{%
  11.318 -*}
  11.319 +\<close>
  11.320  (*<*)lemma "A = (B::'a set)" proof-(*>*)
  11.321  show "A = B"
  11.322  proof
  11.323 -  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.324 +  show "A \<subseteq> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.325  next
  11.326 -  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.327 +  show "B \<subseteq> A" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.328  qed(*<*)qed(*>*)
  11.329  
  11.330 -text_raw {* }
  11.331 +text_raw \<open>}
  11.332  \end{minipage}
  11.333  &
  11.334  \begin{minipage}[t]{.4\textwidth}
  11.335  \isa{%
  11.336 -*}
  11.337 +\<close>
  11.338  (*<*)lemma "A <= (B::'a set)" proof-(*>*)
  11.339  show "A \<subseteq> B"
  11.340  proof
  11.341    fix x
  11.342    assume "x \<in> A"
  11.343 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.344 -  show "x \<in> B" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.345 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.346 +  show "x \<in> B" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.347  qed(*<*)qed(*>*)
  11.348  
  11.349 -text_raw {* }
  11.350 +text_raw \<open>}
  11.351  \end{minipage}
  11.352  \end{tabular}
  11.353  \begin{isamarkuptext}%
  11.354 @@ -522,34 +522,34 @@
  11.355  the pattern for later use. As an example, consider the proof pattern for
  11.356  @{text"\<longleftrightarrow>"}:
  11.357  \end{isamarkuptext}%
  11.358 -*}
  11.359 +\<close>
  11.360  (*<*)lemma "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" proof-(*>*)
  11.361  show "formula\<^sub>1 \<longleftrightarrow> formula\<^sub>2" (is "?L \<longleftrightarrow> ?R")
  11.362  proof
  11.363    assume "?L"
  11.364 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.365 -  show "?R" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.366 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.367 +  show "?R" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.368  next
  11.369    assume "?R"
  11.370 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.371 -  show "?L" (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.372 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.373 +  show "?L" (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.374  qed(*<*)qed(*>*)
  11.375  
  11.376 -text{* Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
  11.377 +text\<open>Instead of duplicating @{text"formula\<^sub>i"} in the text, we introduce
  11.378  the two abbreviations @{text"?L"} and @{text"?R"} by pattern matching.
  11.379  Pattern matching works wherever a formula is stated, in particular
  11.380  with \isacom{have} and \isacom{lemma}.
  11.381  
  11.382  The unknown \indexed{@{text"?thesis"}}{thesis} is implicitly matched against any goal stated by
  11.383 -\isacom{lemma} or \isacom{show}. Here is a typical example: *}
  11.384 +\isacom{lemma} or \isacom{show}. Here is a typical example:\<close>
  11.385  
  11.386  lemma "formula"
  11.387  proof -
  11.388 -  text_raw{*\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}*}
  11.389 -  show ?thesis (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.390 +  text_raw\<open>\\\mbox{}\quad$\vdots$\\\mbox{}\hspace{-1.4ex}\<close>
  11.391 +  show ?thesis (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.392  qed
  11.393  
  11.394 -text{* 
  11.395 +text\<open>
  11.396  Unknowns can also be instantiated with \indexed{\isacom{let}}{let} commands
  11.397  \begin{quote}
  11.398  \isacom{let} @{text"?t"} = @{text"\""}\textit{some-big-term}@{text"\""}
  11.399 @@ -588,37 +588,37 @@
  11.400  Sometimes one needs a number of facts to enable some deduction. Of course
  11.401  one can name these facts individually, as shown on the right,
  11.402  but one can also combine them with \isacom{moreover}, as shown on the left:
  11.403 -*}text_raw{*
  11.404 +\<close>text_raw\<open>
  11.405  \begin{tabular}{@ {}ll@ {}}
  11.406  \begin{minipage}[t]{.4\textwidth}
  11.407  \isa{%
  11.408 -*}
  11.409 +\<close>
  11.410  (*<*)lemma "P" proof-(*>*)
  11.411 -have "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.412 -moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.413 +have "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.414 +moreover have "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.415  moreover
  11.416 -text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}(*<*)have "True" ..(*>*)
  11.417 -moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.418 -ultimately have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.419 +text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>(*<*)have "True" ..(*>*)
  11.420 +moreover have "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.421 +ultimately have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.422  (*<*)oops(*>*)
  11.423  
  11.424 -text_raw {* }
  11.425 +text_raw \<open>}
  11.426  \end{minipage}
  11.427  &
  11.428  \qquad
  11.429  \begin{minipage}[t]{.4\textwidth}
  11.430  \isa{%
  11.431 -*}
  11.432 +\<close>
  11.433  (*<*)lemma "P" proof-(*>*)
  11.434 -have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.435 -have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw{*\ \isasymproof*}
  11.436 -text_raw{*\\$\vdots$\\\hspace{-1.4ex}*}
  11.437 -have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.438 -from lab\<^sub>1 lab\<^sub>2 text_raw{*\ $\dots$\\*}
  11.439 -have "P"  (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.440 +have lab\<^sub>1: "P\<^sub>1" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.441 +have lab\<^sub>2: "P\<^sub>2" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\<close>
  11.442 +text_raw\<open>\\$\vdots$\\\hspace{-1.4ex}\<close>
  11.443 +have lab\<^sub>n: "P\<^sub>n" (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.444 +from lab\<^sub>1 lab\<^sub>2 text_raw\<open>\ $\dots$\\\<close>
  11.445 +have "P"  (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.446  (*<*)oops(*>*)
  11.447  
  11.448 -text_raw {* }
  11.449 +text_raw \<open>}
  11.450  \end{minipage}
  11.451  \end{tabular}
  11.452  \begin{isamarkuptext}%
  11.453 @@ -643,7 +643,7 @@
  11.454  As an example we prove a simple fact about divisibility on integers.
  11.455  The definition of @{text "dvd"} is @{thm dvd_def}.
  11.456  \end{isamarkuptext}%
  11.457 -*}
  11.458 +\<close>
  11.459  
  11.460  lemma fixes a b :: int assumes "b dvd (a+b)" shows "b dvd a"
  11.461  proof -
  11.462 @@ -654,28 +654,28 @@
  11.463    then show ?thesis using assms by(auto simp add: dvd_def)
  11.464  qed
  11.465  
  11.466 -text{*
  11.467 +text\<open>
  11.468  
  11.469  \subsection*{Exercises}
  11.470  
  11.471  \exercise
  11.472  Give a readable, structured proof of the following lemma:
  11.473 -*}
  11.474 +\<close>
  11.475  lemma assumes T: "\<forall>x y. T x y \<or> T y x"
  11.476    and A: "\<forall>x y. A x y \<and> A y x \<longrightarrow> x = y"
  11.477    and TA: "\<forall>x y. T x y \<longrightarrow> A x y" and "A x y"
  11.478    shows "T x y"
  11.479  (*<*)oops(*>*)
  11.480 -text{*
  11.481 +text\<open>
  11.482  \endexercise
  11.483  
  11.484  \exercise
  11.485  Give a readable, structured proof of the following lemma:
  11.486 -*}
  11.487 +\<close>
  11.488  lemma "\<exists>ys zs. xs = ys @ zs \<and>
  11.489              (length ys = length zs \<or> length ys = length zs + 1)"
  11.490  (*<*)oops(*>*)
  11.491 -text{*
  11.492 +text\<open>
  11.493  Hint: There are predefined functions @{const_typ take} and @{const_typ drop}
  11.494  such that @{text"take k [x\<^sub>1,\<dots>] = [x\<^sub>1,\<dots>,x\<^sub>k]"} and
  11.495  @{text"drop k [x\<^sub>1,\<dots>] = [x\<^bsub>k+1\<^esub>,\<dots>]"}. Let sledgehammer find and apply
  11.496 @@ -692,7 +692,7 @@
  11.497  which form some term takes: is it @{text 0} or of the form @{term"Suc n"},
  11.498  is it @{term"[]"} or of the form @{term"x#xs"}, etc. Here is a typical example
  11.499  proof by case analysis on the form of @{text xs}:
  11.500 -*}
  11.501 +\<close>
  11.502  
  11.503  lemma "length(tl xs) = length xs - 1"
  11.504  proof (cases xs)
  11.505 @@ -703,7 +703,7 @@
  11.506    thus ?thesis by simp
  11.507  qed
  11.508  
  11.509 -text{*\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
  11.510 +text\<open>\index{cases@@{text"cases"}|(}Function @{text tl} (''tail'') is defined by @{thm list.sel(2)} and
  11.511  @{thm list.sel(3)}. Note that the result type of @{const length} is @{typ nat}
  11.512  and @{prop"0 - 1 = (0::nat)"}.
  11.513  
  11.514 @@ -721,7 +721,7 @@
  11.515  but also gives the assumption @{text"\"t = C x\<^sub>1 \<dots> x\<^sub>n\""} a name: @{text C},
  11.516  like the constructor.
  11.517  Here is the \isacom{case} version of the proof above:
  11.518 -*}
  11.519 +\<close>
  11.520  (*<*)lemma "length(tl xs) = length xs - 1"(*>*)
  11.521  proof (cases xs)
  11.522    case Nil
  11.523 @@ -731,7 +731,7 @@
  11.524    thus ?thesis by simp
  11.525  qed
  11.526  
  11.527 -text{* Remember that @{text Nil} and @{text Cons} are the alphanumeric names
  11.528 +text\<open>Remember that @{text Nil} and @{text Cons} are the alphanumeric names
  11.529  for @{text"[]"} and @{text"#"}. The names of the assumptions
  11.530  are not used because they are directly piped (via \isacom{thus})
  11.531  into the proof of the claim.
  11.532 @@ -745,7 +745,7 @@
  11.533  the sum (@{text"\<Sum>"}) of the first @{text n} natural numbers
  11.534  (@{text"{0..n::nat}"}) is equal to \mbox{@{term"n*(n+1) div 2::nat"}}.
  11.535  Never mind the details, just focus on the pattern:
  11.536 -*}
  11.537 +\<close>
  11.538  
  11.539  lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"
  11.540  proof (induction n)
  11.541 @@ -755,10 +755,10 @@
  11.542    thus "\<Sum>{0..Suc n} = Suc n*(Suc n+1) div 2" by simp
  11.543  qed
  11.544  
  11.545 -text{* Except for the rewrite steps, everything is explicitly given. This
  11.546 +text\<open>Except for the rewrite steps, everything is explicitly given. This
  11.547  makes the proof easily readable, but the duplication means it is tedious to
  11.548  write and maintain. Here is how pattern
  11.549 -matching can completely avoid any duplication: *}
  11.550 +matching can completely avoid any duplication:\<close>
  11.551  
  11.552  lemma "\<Sum>{0..n::nat} = n*(n+1) div 2" (is "?P n")
  11.553  proof (induction n)
  11.554 @@ -768,7 +768,7 @@
  11.555    thus "?P(Suc n)" by simp
  11.556  qed
  11.557  
  11.558 -text{* The first line introduces an abbreviation @{text"?P n"} for the goal.
  11.559 +text\<open>The first line introduces an abbreviation @{text"?P n"} for the goal.
  11.560  Pattern matching @{text"?P n"} with the goal instantiates @{text"?P"} to the
  11.561  function @{term"\<lambda>n. \<Sum>{0..n::nat} = n*(n+1) div 2"}.  Now the proposition to
  11.562  be proved in the base case can be written as @{text"?P 0"}, the induction
  11.563 @@ -777,7 +777,7 @@
  11.564  
  11.565  Induction also provides the \isacom{case} idiom that abbreviates
  11.566  the \isacom{fix}-\isacom{assume} step. The above proof becomes
  11.567 -*}
  11.568 +\<close>
  11.569  (*<*)lemma "\<Sum>{0..n::nat} = n*(n+1) div 2"(*>*)
  11.570  proof (induction n)
  11.571    case 0
  11.572 @@ -787,29 +787,29 @@
  11.573    thus ?case by simp
  11.574  qed
  11.575  
  11.576 -text{*
  11.577 +text\<open>
  11.578  The unknown @{text"?case"}\index{case?@@{text"?case"}|(} is set in each case to the required
  11.579  claim, i.e., @{text"?P 0"} and \mbox{@{text"?P(Suc n)"}} in the above proof,
  11.580  without requiring the user to define a @{text "?P"}. The general
  11.581  pattern for induction over @{typ nat} is shown on the left-hand side:
  11.582 -*}text_raw{*
  11.583 +\<close>text_raw\<open>
  11.584  \begin{tabular}{@ {}ll@ {}}
  11.585  \begin{minipage}[t]{.4\textwidth}
  11.586  \isa{%
  11.587 -*}
  11.588 +\<close>
  11.589  (*<*)lemma "P(n::nat)" proof -(*>*)
  11.590  show "P(n)"
  11.591  proof (induction n)
  11.592    case 0
  11.593 -  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
  11.594 -  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.595 +  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
  11.596 +  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.597  next
  11.598    case (Suc n)
  11.599 -  text_raw{*\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}*}
  11.600 -  show ?case (*<*)sorry(*>*) text_raw{*\ \isasymproof\\*}
  11.601 +  text_raw\<open>\\\mbox{}\ \ $\vdots$\\\mbox{}\hspace{-1ex}\<close>
  11.602 +  show ?case (*<*)sorry(*>*) text_raw\<open>\ \isasymproof\\\<close>
  11.603  qed(*<*)qed(*>*)
  11.604  
  11.605 -text_raw {* }
  11.606 +text_raw \<open>}
  11.607  \end{minipage}
  11.608  &
  11.609  \begin{minipage}[t]{.4\textwidth}
  11.610 @@ -824,8 +824,8 @@
  11.611  \end{minipage}
  11.612  \end{tabular}
  11.613  \medskip
  11.614 -*}
  11.615 -text{*
  11.616 +\<close>
  11.617 +text\<open>
  11.618  On the right side you can see what the \isacom{case} command
  11.619  on the left stands for.
  11.620  
  11.621 @@ -910,7 +910,7 @@
  11.622  
  11.623  Recall the inductive and recursive definitions of even numbers in
  11.624  \autoref{sec:inductive-defs}:
  11.625 -*}
  11.626 +\<close>
  11.627  
  11.628  inductive ev :: "nat \<Rightarrow> bool" where
  11.629  ev0: "ev 0" |
  11.630 @@ -921,13 +921,13 @@
  11.631  "evn (Suc 0) = False" |
  11.632  "evn (Suc(Suc n)) = evn n"
  11.633  
  11.634 -text{* We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
  11.635 +text\<open>We recast the proof of @{prop"ev n \<Longrightarrow> evn n"} in Isar. The
  11.636  left column shows the actual proof text, the right column shows
  11.637 -the implicit effect of the two \isacom{case} commands:*}text_raw{*
  11.638 +the implicit effect of the two \isacom{case} commands:\<close>text_raw\<open>
  11.639  \begin{tabular}{@ {}l@ {\qquad}l@ {}}
  11.640  \begin{minipage}[t]{.5\textwidth}
  11.641  \isa{%
  11.642 -*}
  11.643 +\<close>
  11.644  
  11.645  lemma "ev n \<Longrightarrow> evn n"
  11.646  proof(induction rule: ev.induct)
  11.647 @@ -941,7 +941,7 @@
  11.648    thus ?case by simp
  11.649  qed
  11.650  
  11.651 -text_raw {* }
  11.652 +text_raw \<open>}
  11.653  \end{minipage}
  11.654  &
  11.655  \begin{minipage}[t]{.5\textwidth}
  11.656 @@ -957,8 +957,8 @@
  11.657  \end{minipage}
  11.658  \end{tabular}
  11.659  \medskip
  11.660 -*}
  11.661 -text{*
  11.662 +\<close>
  11.663 +text\<open>
  11.664  The proof resembles structural induction, but the induction rule is given
  11.665  explicitly and the names of the cases are the names of the rules in the
  11.666  inductive definition.
  11.667 @@ -986,7 +986,7 @@
  11.668  case @{thm[source] evSS} is derived from a renamed version of
  11.669  rule @{thm[source] evSS}: @{text"ev m \<Longrightarrow> ev(Suc(Suc m))"}.
  11.670  Here is an example with a (contrived) intermediate step that refers to @{text m}:
  11.671 -*}
  11.672 +\<close>
  11.673  
  11.674  lemma "ev n \<Longrightarrow> evn n"
  11.675  proof(induction rule: ev.induct)
  11.676 @@ -994,16 +994,16 @@
  11.677  next
  11.678    case (evSS m)
  11.679    have "evn(Suc(Suc m)) = evn m" by simp
  11.680 -  thus ?case using `evn m` by blast
  11.681 +  thus ?case using \<open>evn m\<close> by blast
  11.682  qed
  11.683  
  11.684 -text{*
  11.685 +text\<open>
  11.686  \indent
  11.687  In general, let @{text I} be a (for simplicity unary) inductively defined
  11.688  predicate and let the rules in the definition of @{text I}
  11.689  be called @{text "rule\<^sub>1"}, \dots, @{text "rule\<^sub>n"}. A proof by rule
  11.690  induction follows this pattern:\index{inductionrule@@{text"induction ... rule:"}}
  11.691 -*}
  11.692 +\<close>
  11.693  
  11.694  (*<*)
  11.695  inductive I where rule\<^sub>1: "I()" |  rule\<^sub>2: "I()" |  rule\<^sub>n: "I()"
  11.696 @@ -1011,21 +1011,21 @@
  11.697  show "I x \<Longrightarrow> P x"
  11.698  proof(induction rule: I.induct)
  11.699    case rule\<^sub>1
  11.700 -  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
  11.701 -  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.702 +  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
  11.703 +  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.704  next
  11.705 -  text_raw{*\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
  11.706 +  text_raw\<open>\\[-.4ex]$\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
  11.707  (*<*)
  11.708    case rule\<^sub>2
  11.709    show ?case sorry
  11.710  (*>*)
  11.711  next
  11.712    case rule\<^sub>n
  11.713 -  text_raw{*\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}*}
  11.714 -  show ?case (*<*)sorry(*>*)text_raw{*\ \isasymproof\\*}
  11.715 +  text_raw\<open>\\[-.4ex]\mbox{}\ \ $\vdots$\\[-.4ex]\mbox{}\hspace{-1ex}\<close>
  11.716 +  show ?case (*<*)sorry(*>*)text_raw\<open>\ \isasymproof\\\<close>
  11.717  qed(*<*)qed(*>*)
  11.718  
  11.719 -text{*
  11.720 +text\<open>
  11.721  One can provide explicit variable names by writing
  11.722  \isacom{case}~@{text"(rule\<^sub>i x\<^sub>1 \<dots> x\<^sub>k)"}, thus renaming the first @{text k}
  11.723  free variables in rule @{text i} to @{text"x\<^sub>1 \<dots> x\<^sub>k"},
  11.724 @@ -1071,7 +1071,7 @@
  11.725  A simple example is the proof that @{prop"ev n \<Longrightarrow> ev (n - 2)"}. We
  11.726  already went through the details informally in \autoref{sec:Logic:even}. This
  11.727  is the Isar proof:
  11.728 -*}
  11.729 +\<close>
  11.730  (*<*)
  11.731  notepad
  11.732  begin fix n
  11.733 @@ -1087,7 +1087,7 @@
  11.734  end
  11.735  (*>*)
  11.736  
  11.737 -text{* The key point here is that a case analysis over some inductively
  11.738 +text\<open>The key point here is that a case analysis over some inductively
  11.739  defined predicate is triggered by piping the given fact
  11.740  (here: \isacom{from}~@{text this}) into a proof by @{text cases}.
  11.741  Let us examine the assumptions available in each case. In case @{text ev0}
  11.742 @@ -1101,7 +1101,7 @@
  11.743  rule @{text evSS} can yield @{prop"ev(Suc 0)"} because @{text"Suc 0"} unifies
  11.744  neither with @{text 0} nor with @{term"Suc(Suc n)"}. Impossible cases do not
  11.745  have to be proved. Hence we can prove anything from @{prop"ev(Suc 0)"}:
  11.746 -*}
  11.747 +\<close>
  11.748  (*<*)
  11.749  notepad begin fix P
  11.750  (*>*)
  11.751 @@ -1110,14 +1110,14 @@
  11.752  end
  11.753  (*>*)
  11.754  
  11.755 -text{* That is, @{prop"ev(Suc 0)"} is simply not provable: *}
  11.756 +text\<open>That is, @{prop"ev(Suc 0)"} is simply not provable:\<close>
  11.757  
  11.758  lemma "\<not> ev(Suc 0)"
  11.759  proof
  11.760    assume "ev(Suc 0)" then show False by cases
  11.761  qed
  11.762  
  11.763 -text{* Normally not all cases will be impossible. As a simple exercise,
  11.764 +text\<open>Normally not all cases will be impossible. As a simple exercise,
  11.765  prove that \mbox{@{prop"\<not> ev(Suc(Suc(Suc 0)))"}.}
  11.766  
  11.767  \subsection{Advanced Rule Induction}
  11.768 @@ -1147,23 +1147,23 @@
  11.769  \isacom{proof}@{text"(induction \"r\" \"s\" \"t\" arbitrary: \<dots> rule: I.induct)"}\index{inductionrule@@{text"induction ... rule:"}}\index{arbitrary@@{text"arbitrary:"}}
  11.770  \end{isabelle}
  11.771  Like for rule inversion, cases that are impossible because of constructor clashes
  11.772 -will not show up at all. Here is a concrete example: *}
  11.773 +will not show up at all. Here is a concrete example:\<close>
  11.774  
  11.775  lemma "ev (Suc m) \<Longrightarrow> \<not> ev m"
  11.776  proof(induction "Suc m" arbitrary: m rule: ev.induct)
  11.777    fix n assume IH: "\<And>m. n = Suc m \<Longrightarrow> \<not> ev m"
  11.778    show "\<not> ev (Suc n)"
  11.779 -  proof --"contradiction"
  11.780 +  proof \<comment>"contradiction"
  11.781      assume "ev(Suc n)"
  11.782      thus False
  11.783 -    proof cases --"rule inversion"
  11.784 +    proof cases \<comment>"rule inversion"
  11.785        fix k assume "n = Suc k" "ev k"
  11.786        thus False using IH by auto
  11.787      qed
  11.788    qed
  11.789  qed
  11.790  
  11.791 -text{*
  11.792 +text\<open>
  11.793  Remarks:
  11.794  \begin{itemize}
  11.795  \item 
  11.796 @@ -1200,12 +1200,12 @@
  11.797  
  11.798  \exercise
  11.799  Give a structured proof by rule inversion:
  11.800 -*}
  11.801 +\<close>
  11.802  
  11.803  lemma assumes a: "ev(Suc(Suc n))" shows "ev n"
  11.804  (*<*)oops(*>*)
  11.805  
  11.806 -text{*
  11.807 +text\<open>
  11.808  \endexercise
  11.809  
  11.810  \begin{exercise}
  11.811 @@ -1236,7 +1236,7 @@
  11.812  @{const replicate} @{text"::"} @{typ"nat \<Rightarrow> 'a \<Rightarrow> 'a list"} is predefined
  11.813  and @{term"replicate n x"} yields the list @{text"[x, \<dots>, x]"} of length @{text n}.
  11.814  \end{exercise}
  11.815 -*}
  11.816 +\<close>
  11.817  
  11.818  (*<*)
  11.819  end
    12.1 --- a/src/Doc/Prog_Prove/LaTeXsugar.thy	Thu Jan 11 13:48:17 2018 +0100
    12.2 +++ b/src/Doc/Prog_Prove/LaTeXsugar.thy	Fri Jan 12 14:08:53 2018 +0100
    12.3 @@ -43,7 +43,7 @@
    12.4    "_asms" :: "prop \<Rightarrow> asms \<Rightarrow> asms" ("_ /\<^latex>\<open>{\\normalsize \\,\<close>and\<^latex>\<open>\\,}\<close>/ _")
    12.5    "_asm" :: "prop \<Rightarrow> asms" ("_")
    12.6  
    12.7 -setup{*
    12.8 +setup\<open>
    12.9    let
   12.10      fun pretty ctxt c =
   12.11        let val tc = Proof_Context.read_const {proper = true, strict = false} ctxt c
   12.12 @@ -57,7 +57,7 @@
   12.13            Thy_Output.output ctxt
   12.14              (Thy_Output.maybe_pretty_source pretty ctxt src [arg]))
   12.15    end;
   12.16 -*}
   12.17 +\<close>
   12.18  
   12.19  end
   12.20  (*>*)
    13.1 --- a/src/Doc/Prog_Prove/Logic.thy	Thu Jan 11 13:48:17 2018 +0100
    13.2 +++ b/src/Doc/Prog_Prove/Logic.thy	Fri Jan 12 14:08:53 2018 +0100
    13.3 @@ -3,7 +3,7 @@
    13.4  imports LaTeXsugar
    13.5  begin
    13.6  (*>*)
    13.7 -text{*
    13.8 +text\<open>
    13.9  \vspace{-5ex}
   13.10  \section{Formulas}
   13.11  
   13.12 @@ -147,11 +147,11 @@
   13.13  
   13.14  \exercise
   13.15  Start from the data type of binary trees defined earlier:
   13.16 -*}
   13.17 +\<close>
   13.18  
   13.19  datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
   13.20  
   13.21 -text{*
   13.22 +text\<open>
   13.23  Define a function @{text "set ::"} @{typ "'a tree \<Rightarrow> 'a set"}
   13.24  that returns the elements in a tree and a function
   13.25  @{text "ord ::"} @{typ "int tree \<Rightarrow> bool"}
   13.26 @@ -169,7 +169,7 @@
   13.27  So far we have only seen @{text simp} and \indexed{@{text auto}}{auto}: Both perform
   13.28  rewriting, both can also prove linear arithmetic facts (no multiplication),
   13.29  and @{text auto} is also able to prove simple logical or set-theoretic goals:
   13.30 -*}
   13.31 +\<close>
   13.32  
   13.33  lemma "\<forall>x. \<exists>y. x = y"
   13.34  by auto
   13.35 @@ -177,7 +177,7 @@
   13.36  lemma "A \<subseteq> B \<inter> C \<Longrightarrow> A \<subseteq> B \<union> C"
   13.37  by auto
   13.38  
   13.39 -text{* where
   13.40 +text\<open>where
   13.41  \begin{quote}
   13.42  \isacom{by} \textit{proof-method}
   13.43  \end{quote}
   13.44 @@ -200,13 +200,13 @@
   13.45  subgoal only, and it can be modified like @{text auto}, e.g.,
   13.46  with @{text "simp add"}. Here is a typical example of what @{text fastforce}
   13.47  can do:
   13.48 -*}
   13.49 +\<close>
   13.50  
   13.51  lemma "\<lbrakk> \<forall>xs \<in> A. \<exists>ys. xs = ys @ ys;  us \<in> A \<rbrakk>
   13.52     \<Longrightarrow> \<exists>n. length us = n+n"
   13.53  by fastforce
   13.54  
   13.55 -text{* This lemma is out of reach for @{text auto} because of the
   13.56 +text\<open>This lemma is out of reach for @{text auto} because of the
   13.57  quantifiers.  Even @{text fastforce} fails when the quantifier structure
   13.58  becomes more complicated. In a few cases, its slow version @{text force}
   13.59  succeeds where @{text fastforce} fails.
   13.60 @@ -215,7 +215,7 @@
   13.61  following example, @{text T} and @{text A} are two binary predicates. It
   13.62  is shown that if @{text T} is total, @{text A} is antisymmetric and @{text T} is
   13.63  a subset of @{text A}, then @{text A} is a subset of @{text T}:
   13.64 -*}
   13.65 +\<close>
   13.66  
   13.67  lemma
   13.68    "\<lbrakk> \<forall>x y. T x y \<or> T y x;
   13.69 @@ -224,7 +224,7 @@
   13.70     \<Longrightarrow> \<forall>x y. A x y \<longrightarrow> T x y"
   13.71  by blast
   13.72  
   13.73 -text{*
   13.74 +text\<open>
   13.75  We leave it to the reader to figure out why this lemma is true.
   13.76  Method @{text blast}
   13.77  \begin{itemize}
   13.78 @@ -245,16 +245,16 @@
   13.79  queried over the internet. If successful, a proof command is generated and can
   13.80  be inserted into your proof.  The biggest win of \isacom{sledgehammer} is
   13.81  that it will take into account the whole lemma library and you do not need to
   13.82 -feed in any lemma explicitly. For example,*}
   13.83 +feed in any lemma explicitly. For example,\<close>
   13.84  
   13.85  lemma "\<lbrakk> xs @ ys = ys @ xs;  length xs = length ys \<rbrakk> \<Longrightarrow> xs = ys"
   13.86  
   13.87 -txt{* cannot be solved by any of the standard proof methods, but
   13.88 -\isacom{sledgehammer} finds the following proof: *}
   13.89 +txt\<open>cannot be solved by any of the standard proof methods, but
   13.90 +\isacom{sledgehammer} finds the following proof:\<close>
   13.91  
   13.92  by (metis append_eq_conv_conj)
   13.93  
   13.94 -text{* We do not explain how the proof was found but what this command
   13.95 +text\<open>We do not explain how the proof was found but what this command
   13.96  means. For a start, Isabelle does not trust external tools (and in particular
   13.97  not the translations from Isabelle's logic to those tools!)
   13.98  and insists on a proof that it can check. This is what \indexed{@{text metis}}{metis} does.
   13.99 @@ -286,12 +286,12 @@
  13.100  because it does not involve multiplication, although multiplication with
  13.101  numbers, e.g., @{text"2*n"}, is allowed. Such formulas can be proved by
  13.102  \indexed{@{text arith}}{arith}:
  13.103 -*}
  13.104 +\<close>
  13.105  
  13.106  lemma "\<lbrakk> (a::nat) \<le> x + b; 2*x < c \<rbrakk> \<Longrightarrow> 2*a + 1 \<le> 2*b + c"
  13.107  by arith
  13.108  
  13.109 -text{* In fact, @{text auto} and @{text simp} can prove many linear
  13.110 +text\<open>In fact, @{text auto} and @{text simp} can prove many linear
  13.111  arithmetic formulas already, like the one above, by calling a weak but fast
  13.112  version of @{text arith}. Hence it is usually not necessary to invoke
  13.113  @{text arith} explicitly.
  13.114 @@ -425,12 +425,12 @@
  13.115  @{thm[source] le_trans}, transitivity of @{text"\<le>"} on type @{typ nat},
  13.116  is not an introduction rule by default because of the disastrous effect
  13.117  on the search space, but can be useful in specific situations:
  13.118 -*}
  13.119 +\<close>
  13.120  
  13.121  lemma "\<lbrakk> (a::nat) \<le> b; b \<le> c; c \<le> d; d \<le> e \<rbrakk> \<Longrightarrow> a \<le> e"
  13.122  by(blast intro: le_trans)
  13.123  
  13.124 -text{*
  13.125 +text\<open>
  13.126  Of course this is just an example and could be proved by @{text arith}, too.
  13.127  
  13.128  \subsection{Forward Proof}
  13.129 @@ -459,11 +459,11 @@
  13.130  by unifying and thus proving @{text "A\<^sub>i"} with @{text "r\<^sub>i"}, @{text"i = 1\<dots>m"}.
  13.131  Here is an example, where @{thm[source]refl} is the theorem
  13.132  @{thm[show_question_marks] refl}:
  13.133 -*}
  13.134 +\<close>
  13.135  
  13.136  thm conjI[OF refl[of "a"] refl[of "b"]]
  13.137  
  13.138 -text{* yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
  13.139 +text\<open>yields the theorem @{thm conjI[OF refl[of "a"] refl[of "b"]]}.
  13.140  The command \isacom{thm} merely displays the result.
  13.141  
  13.142  Forward reasoning also makes sense in connection with proof states.
  13.143 @@ -474,12 +474,12 @@
  13.144  allows proof search to reason forward with @{text r}, i.e.,
  13.145  to replace an assumption @{text A'}, where @{text A'} unifies with @{text A},
  13.146  with the correspondingly instantiated @{text B}. For example, @{thm[source,show_question_marks] Suc_leD} is the theorem \mbox{@{thm Suc_leD}}, which works well for forward reasoning:
  13.147 -*}
  13.148 +\<close>
  13.149  
  13.150  lemma "Suc(Suc(Suc a)) \<le> b \<Longrightarrow> a \<le> b"
  13.151  by(blast dest: Suc_leD)
  13.152  
  13.153 -text{* In this particular example we could have backchained with
  13.154 +text\<open>In this particular example we could have backchained with
  13.155  @{thm[source] Suc_leD}, too, but because the premise is more complicated than the conclusion this can easily lead to nontermination.
  13.156  
  13.157  %\subsection{Finding Theorems}
  13.158 @@ -516,14 +516,14 @@
  13.159  The operative word ``inductive'' means that these are the only even numbers.
  13.160  In Isabelle we give the two rules the names @{text ev0} and @{text evSS}
  13.161  and write
  13.162 -*}
  13.163 +\<close>
  13.164  
  13.165  inductive ev :: "nat \<Rightarrow> bool" where
  13.166  ev0:    "ev 0" |
  13.167  evSS:  (*<*)"ev n \<Longrightarrow> ev (Suc(Suc n))"(*>*)
  13.168 -text_raw{* @{prop[source]"ev n \<Longrightarrow> ev (n + 2)"} *}
  13.169 +text_raw\<open>@{prop[source]"ev n \<Longrightarrow> ev (n + 2)"}\<close>
  13.170  
  13.171 -text{* To get used to inductive definitions, we will first prove a few
  13.172 +text\<open>To get used to inductive definitions, we will first prove a few
  13.173  properties of @{const ev} informally before we descend to the Isabelle level.
  13.174  
  13.175  How do we prove that some number is even, e.g., @{prop "ev 4"}? Simply by combining the defining rules for @{const ev}:
  13.176 @@ -535,14 +535,14 @@
  13.177  
  13.178  Showing that all even numbers have some property is more complicated.  For
  13.179  example, let us prove that the inductive definition of even numbers agrees
  13.180 -with the following recursive one:*}
  13.181 +with the following recursive one:\<close>
  13.182  
  13.183  fun evn :: "nat \<Rightarrow> bool" where
  13.184  "evn 0 = True" |
  13.185  "evn (Suc 0) = False" |
  13.186  "evn (Suc(Suc n)) = evn n"
  13.187  
  13.188 -text{* We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
  13.189 +text\<open>We prove @{prop"ev m \<Longrightarrow> evn m"}.  That is, we
  13.190  assume @{prop"ev m"} and by induction on the form of its derivation
  13.191  prove @{prop"evn m"}. There are two cases corresponding to the two rules
  13.192  for @{const ev}:
  13.193 @@ -606,60 +606,60 @@
  13.194  direction: @{text "evSS[OF evSS[OF ev0]]"} yields the theorem @{thm evSS[OF
  13.195  evSS[OF ev0]]}. Alternatively, you can also prove it as a lemma in backwards
  13.196  fashion. Although this is more verbose, it allows us to demonstrate how each
  13.197 -rule application changes the proof state: *}
  13.198 +rule application changes the proof state:\<close>
  13.199  
  13.200  lemma "ev(Suc(Suc(Suc(Suc 0))))"
  13.201 -txt{*
  13.202 +txt\<open>
  13.203  @{subgoals[display,indent=0,goals_limit=1]}
  13.204 -*}
  13.205 +\<close>
  13.206  apply(rule evSS)
  13.207 -txt{*
  13.208 +txt\<open>
  13.209  @{subgoals[display,indent=0,goals_limit=1]}
  13.210 -*}
  13.211 +\<close>
  13.212  apply(rule evSS)
  13.213 -txt{*
  13.214 +txt\<open>
  13.215  @{subgoals[display,indent=0,goals_limit=1]}
  13.216 -*}
  13.217 +\<close>
  13.218  apply(rule ev0)
  13.219  done
  13.220  
  13.221 -text{* \indent
  13.222 +text\<open>\indent
  13.223  Rule induction is applied by giving the induction rule explicitly via the
  13.224 -@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}*}
  13.225 +@{text"rule:"} modifier:\index{inductionrule@@{text"induction ... rule:"}}\<close>
  13.226  
  13.227  lemma "ev m \<Longrightarrow> evn m"
  13.228  apply(induction rule: ev.induct)
  13.229  by(simp_all)
  13.230  
  13.231 -text{* Both cases are automatic. Note that if there are multiple assumptions
  13.232 +text\<open>Both cases are automatic. Note that if there are multiple assumptions
  13.233  of the form @{prop"ev t"}, method @{text induction} will induct on the leftmost
  13.234  one.
  13.235  
  13.236  As a bonus, we also prove the remaining direction of the equivalence of
  13.237  @{const ev} and @{const evn}:
  13.238 -*}
  13.239 +\<close>
  13.240  
  13.241  lemma "evn n \<Longrightarrow> ev n"
  13.242  apply(induction n rule: evn.induct)
  13.243  
  13.244 -txt{* This is a proof by computation induction on @{text n} (see
  13.245 +txt\<open>This is a proof by computation induction on @{text n} (see
  13.246  \autoref{sec:recursive-funs}) that sets up three subgoals corresponding to
  13.247  the three equations for @{const evn}:
  13.248  @{subgoals[display,indent=0]}
  13.249  The first and third subgoals follow with @{thm[source]ev0} and @{thm[source]evSS}, and the second subgoal is trivially true because @{prop"evn(Suc 0)"} is @{const False}:
  13.250 -*}
  13.251 +\<close>
  13.252  
  13.253  by (simp_all add: ev0 evSS)
  13.254  
  13.255 -text{* The rules for @{const ev} make perfect simplification and introduction
  13.256 +text\<open>The rules for @{const ev} make perfect simplification and introduction
  13.257  rules because their premises are always smaller than the conclusion. It
  13.258  makes sense to turn them into simplification and introduction rules
  13.259  permanently, to enhance proof automation. They are named @{thm[source] ev.intros}
  13.260 -\index{intros@@{text".intros"}} by Isabelle: *}
  13.261 +\index{intros@@{text".intros"}} by Isabelle:\<close>
  13.262  
  13.263  declare ev.intros[simp,intro]
  13.264  
  13.265 -text{* The rules of an inductive definition are not simplification rules by
  13.266 +text\<open>The rules of an inductive definition are not simplification rules by
  13.267  default because, in contrast to recursive functions, there is no termination
  13.268  requirement for inductive definitions.
  13.269  
  13.270 @@ -707,13 +707,13 @@
  13.271  r"}, because @{text"star r"} is meant to be the reflexive transitive closure.
  13.272  That is, @{prop"star r x y"} is meant to be true if from @{text x} we can
  13.273  reach @{text y} in finitely many @{text r} steps. This concept is naturally
  13.274 -defined inductively: *}
  13.275 +defined inductively:\<close>
  13.276  
  13.277  inductive star :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"  for r where
  13.278  refl:  "star r x x" |
  13.279  step:  "r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
  13.280  
  13.281 -text{* The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
  13.282 +text\<open>The base case @{thm[source] refl} is reflexivity: @{term "x=y"}. The
  13.283  step case @{thm[source]step} combines an @{text r} step (from @{text x} to
  13.284  @{text y}) and a @{term"star r"} step (from @{text y} to @{text z}) into a
  13.285  @{term"star r"} step (from @{text x} to @{text z}).
  13.286 @@ -723,7 +723,7 @@
  13.287  generates a simpler induction rule.
  13.288  
  13.289  By definition @{term"star r"} is reflexive. It is also transitive, but we
  13.290 -need rule induction to prove that: *}
  13.291 +need rule induction to prove that:\<close>
  13.292  
  13.293  lemma star_trans: "star r x y \<Longrightarrow> star r y z \<Longrightarrow> star r x z"
  13.294  apply(induction rule: star.induct)
  13.295 @@ -732,15 +732,15 @@
  13.296  apply(rename_tac u x y)
  13.297  defer
  13.298  (*>*)
  13.299 -txt{* The induction is over @{prop"star r x y"} (the first matching assumption)
  13.300 +txt\<open>The induction is over @{prop"star r x y"} (the first matching assumption)
  13.301  and we try to prove \mbox{@{prop"star r y z \<Longrightarrow> star r x z"}},
  13.302  which we abbreviate by @{prop"P x y"}. These are our two subgoals:
  13.303  @{subgoals[display,indent=0]}
  13.304  The first one is @{prop"P x x"}, the result of case @{thm[source]refl},
  13.305  and it is trivial:\index{assumption@@{text assumption}}
  13.306 -*}
  13.307 +\<close>
  13.308  apply(assumption)
  13.309 -txt{* Let us examine subgoal @{text 2}, case @{thm[source] step}.
  13.310 +txt\<open>Let us examine subgoal @{text 2}, case @{thm[source] step}.
  13.311  Assumptions @{prop"r u x"} and \mbox{@{prop"star r x y"}}
  13.312  are the premises of rule @{thm[source]step}.
  13.313  Assumption @{prop"star r y z \<Longrightarrow> star r x z"} is \mbox{@{prop"P x y"}},
  13.314 @@ -749,11 +749,11 @@
  13.315  The proof itself is straightforward: from \mbox{@{prop"star r y z"}} the IH
  13.316  leads to @{prop"star r x z"} which, together with @{prop"r u x"},
  13.317  leads to \mbox{@{prop"star r u z"}} via rule @{thm[source]step}:
  13.318 -*}
  13.319 +\<close>
  13.320  apply(metis step)
  13.321  done
  13.322  
  13.323 -text{*\index{rule induction|)}
  13.324 +text\<open>\index{rule induction|)}
  13.325  
  13.326  \subsection{The General Case}
  13.327  
  13.328 @@ -804,13 +804,13 @@
  13.329  
  13.330  \exercise
  13.331  We could also have defined @{const star} as follows:
  13.332 -*}
  13.333 +\<close>
  13.334  
  13.335  inductive star' :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool" for r where
  13.336  refl': "star' r x x" |
  13.337  step': "star' r x y \<Longrightarrow> r y z \<Longrightarrow> star' r x z"
  13.338  
  13.339 -text{*
  13.340 +text\<open>
  13.341  The single @{text r} step is performed after rather than before the @{text star'}
  13.342  steps. Prove @{prop "star' r x y \<Longrightarrow> star r x y"} and
  13.343  @{prop "star r x y \<Longrightarrow> star' r x y"}. You may need lemmas.
  13.344 @@ -877,7 +877,7 @@
  13.345  some suitable value of @{text "?"}.
  13.346  \end{exercise}
  13.347  \fi
  13.348 -*}
  13.349 +\<close>
  13.350  (*<*)
  13.351  end
  13.352  (*>*)
    14.1 --- a/src/Doc/Prog_Prove/Types_and_funs.thy	Thu Jan 11 13:48:17 2018 +0100
    14.2 +++ b/src/Doc/Prog_Prove/Types_and_funs.thy	Fri Jan 12 14:08:53 2018 +0100
    14.3 @@ -3,16 +3,16 @@
    14.4  imports Main
    14.5  begin
    14.6  (*>*)
    14.7 -text{*
    14.8 +text\<open>
    14.9  \vspace{-5ex}
   14.10  \section{Type and Function Definitions}
   14.11  
   14.12  Type synonyms are abbreviations for existing types, for example
   14.13 -\index{string@@{text string}}*}
   14.14 +\index{string@@{text string}}\<close>
   14.15  
   14.16  type_synonym string = "char list"
   14.17  
   14.18 -text{*
   14.19 +text\<open>
   14.20  Type synonyms are expanded after parsing and are not present in internal representation and output. They are mere conveniences for the reader.
   14.21  
   14.22  \subsection{Datatypes}
   14.23 @@ -54,22 +54,22 @@
   14.24  Case expressions must be enclosed in parentheses.
   14.25  
   14.26  As an example of a datatype beyond @{typ nat} and @{text list}, consider binary trees:
   14.27 -*}
   14.28 +\<close>
   14.29  
   14.30  datatype 'a tree = Tip | Node  "'a tree"  'a  "'a tree"
   14.31  
   14.32 -text{* with a mirror function: *}
   14.33 +text\<open>with a mirror function:\<close>
   14.34  
   14.35  fun mirror :: "'a tree \<Rightarrow> 'a tree" where
   14.36  "mirror Tip = Tip" |
   14.37  "mirror (Node l a r) = Node (mirror r) a (mirror l)"
   14.38  
   14.39 -text{* The following lemma illustrates induction: *}
   14.40 +text\<open>The following lemma illustrates induction:\<close>
   14.41  
   14.42  lemma "mirror(mirror t) = t"
   14.43  apply(induction t)
   14.44  
   14.45 -txt{* yields
   14.46 +txt\<open>yields
   14.47  @{subgoals[display]}
   14.48  The induction step contains two induction hypotheses, one for each subtree.
   14.49  An application of @{text auto} finishes the proof.
   14.50 @@ -81,7 +81,7 @@
   14.51  elements of @{typ 'a}, you wrap them up in @{const Some} and call
   14.52  the new type @{typ"'a option"}. A typical application is a lookup function
   14.53  on a list of key-value pairs, often called an association list:
   14.54 -*}
   14.55 +\<close>
   14.56  (*<*)
   14.57  apply auto
   14.58  done
   14.59 @@ -90,7 +90,7 @@
   14.60  "lookup [] x = None" |
   14.61  "lookup ((a,b) # ps) x = (if a = x then Some b else lookup ps x)"
   14.62  
   14.63 -text{*
   14.64 +text\<open>
   14.65  Note that @{text"\<tau>\<^sub>1 * \<tau>\<^sub>2"} is the type of pairs, also written @{text"\<tau>\<^sub>1 \<times> \<tau>\<^sub>2"}.
   14.66  Pairs can be taken apart either by pattern matching (as above) or with the
   14.67  projection functions @{const fst} and @{const snd}: @{thm fst_conv[of x y]} and @{thm snd_conv[of x y]}.
   14.68 @@ -101,23 +101,23 @@
   14.69  \subsection{Definitions}
   14.70  
   14.71  Non-recursive functions can be defined as in the following example:
   14.72 -\index{definition@\isacom{definition}}*}
   14.73 +\index{definition@\isacom{definition}}\<close>
   14.74  
   14.75  definition sq :: "nat \<Rightarrow> nat" where
   14.76  "sq n = n * n"
   14.77  
   14.78 -text{* Such definitions do not allow pattern matching but only
   14.79 +text\<open>Such definitions do not allow pattern matching but only
   14.80  @{text"f x\<^sub>1 \<dots> x\<^sub>n = t"}, where @{text f} does not occur in @{text t}.
   14.81  
   14.82  \subsection{Abbreviations}
   14.83  
   14.84  Abbreviations are similar to definitions:
   14.85 -\index{abbreviation@\isacom{abbreviation}}*}
   14.86 +\index{abbreviation@\isacom{abbreviation}}\<close>
   14.87  
   14.88  abbreviation sq' :: "nat \<Rightarrow> nat" where
   14.89  "sq' n \<equiv> n * n"
   14.90  
   14.91 -text{* The key difference is that @{const sq'} is only syntactic sugar:
   14.92 +text\<open>The key difference is that @{const sq'} is only syntactic sugar:
   14.93  after parsing, @{term"sq' t"} is replaced by \mbox{@{term"t*t"}};
   14.94  before printing, every occurrence of @{term"u*u"} is replaced by
   14.95  \mbox{@{term"sq' u"}}.  Internally, @{const sq'} does not exist.
   14.96 @@ -153,14 +153,14 @@
   14.97  Functions defined with \isacom{fun} come with their own induction schema
   14.98  that mirrors the recursion schema and is derived from the termination
   14.99  order. For example,
  14.100 -*}
  14.101 +\<close>
  14.102  
  14.103  fun div2 :: "nat \<Rightarrow> nat" where
  14.104  "div2 0 = 0" |
  14.105  "div2 (Suc 0) = 0" |
  14.106  "div2 (Suc(Suc n)) = Suc(div2 n)"
  14.107  
  14.108 -text{* does not just define @{const div2} but also proves a
  14.109 +text\<open>does not just define @{const div2} but also proves a
  14.110  customized induction rule:
  14.111  \[
  14.112  \inferrule{
  14.113 @@ -170,12 +170,12 @@
  14.114  {\mbox{@{thm (concl) div2.induct[of _ "m"]}}}
  14.115  \]
  14.116  This customized induction rule can simplify inductive proofs. For example,
  14.117 -*}
  14.118 +\<close>
  14.119  
  14.120  lemma "div2(n) = n div 2"
  14.121  apply(induction n rule: div2.induct)
  14.122  
  14.123 -txt{* (where the infix @{text div} is the predefined division operation)
  14.124 +txt\<open>(where the infix @{text div} is the predefined division operation)
  14.125  yields the subgoals
  14.126  @{subgoals[display,margin=65]}
  14.127  An application of @{text auto} finishes the proof.
  14.128 @@ -260,7 +260,7 @@
  14.129  append is linear in its first argument.  A linear time version of
  14.130  @{const rev} requires an extra argument where the result is accumulated
  14.131  gradually, using only~@{text"#"}:
  14.132 -*}
  14.133 +\<close>
  14.134  (*<*)
  14.135  apply auto
  14.136  done
  14.137 @@ -269,7 +269,7 @@
  14.138  "itrev []        ys = ys" |
  14.139  "itrev (x#xs) ys = itrev xs (x#ys)"
  14.140  
  14.141 -text{* The behaviour of @{const itrev} is simple: it reverses
  14.142 +text\<open>The behaviour of @{const itrev} is simple: it reverses
  14.143  its first argument by stacking its elements onto the second argument,
  14.144  and it returns that second argument when the first one becomes
  14.145  empty. Note that @{const itrev} is tail-recursive: it can be
  14.146 @@ -277,17 +277,17 @@
  14.147  
  14.148  Naturally, we would like to show that @{const itrev} does indeed reverse
  14.149  its first argument provided the second one is empty:
  14.150 -*}
  14.151 +\<close>
  14.152  
  14.153  lemma "itrev xs [] = rev xs"
  14.154  
  14.155 -txt{* There is no choice as to the induction variable:
  14.156 -*}
  14.157 +txt\<open>There is no choice as to the induction variable:
  14.158 +\<close>
  14.159  
  14.160  apply(induction xs)
  14.161  apply(auto)
  14.162  
  14.163 -txt{*
  14.164 +txt\<open>
  14.165  Unfortunately, this attempt does not prove
  14.166  the induction step:
  14.167  @{subgoals[display,margin=70]}
  14.168 @@ -299,11 +299,11 @@
  14.169  \end{quote}
  14.170  Of course one cannot do this naively: @{prop"itrev xs ys = rev xs"} is
  14.171  just not true.  The correct generalization is
  14.172 -*}
  14.173 +\<close>
  14.174  (*<*)oops(*>*)
  14.175  lemma "itrev xs ys = rev xs @ ys"
  14.176  (*<*)apply(induction xs, auto)(*>*)
  14.177 -txt{*
  14.178 +txt\<open>
  14.179  If @{text ys} is replaced by @{term"[]"}, the right-hand side simplifies to
  14.180  @{term"rev xs"}, as required.
  14.181  In this instance it was easy to guess the right generalization.
  14.182 @@ -320,21 +320,21 @@
  14.183  @{term"a # ys"} instead of @{text ys}. Hence we prove the theorem
  14.184  for all @{text ys} instead of a fixed one. We can instruct induction
  14.185  to perform this generalization for us by adding @{text "arbitrary: ys"}\index{arbitrary@@{text"arbitrary:"}}.
  14.186 -*}
  14.187 +\<close>
  14.188  (*<*)oops
  14.189  lemma "itrev xs ys = rev xs @ ys"
  14.190  (*>*)
  14.191  apply(induction xs arbitrary: ys)
  14.192  
  14.193 -txt{* The induction hypothesis in the induction step is now universally quantified over @{text ys}:
  14.194 +txt\<open>The induction hypothesis in the induction step is now universally quantified over @{text ys}:
  14.195  @{subgoals[display,margin=65]}
  14.196  Thus the proof succeeds:
  14.197 -*}
  14.198 +\<close>
  14.199  
  14.200  apply auto
  14.201  done
  14.202  
  14.203 -text{*
  14.204 +text\<open>
  14.205  This leads to another heuristic for generalization:
  14.206  \begin{quote}
  14.207  \emph{Generalize induction by generalizing all free
  14.208 @@ -547,7 +547,7 @@
  14.209  Define a function @{text "nodes :: tree0 \<Rightarrow> nat"} that counts the number of
  14.210  all nodes (inner nodes and leaves) in such a tree.
  14.211  Consider the following recursive function:
  14.212 -*}
  14.213 +\<close>
  14.214  (*<*)
  14.215  datatype tree0 = Tip | Node tree0 tree0
  14.216  (*>*)
  14.217 @@ -555,7 +555,7 @@
  14.218  "explode 0 t = t" |
  14.219  "explode (Suc n) t = explode n (Node t t)"
  14.220  
  14.221 -text {*
  14.222 +text \<open>
  14.223  Find an equation expressing the size of a tree after exploding it
  14.224  (\noquotes{@{term [source] "nodes (explode n t)"}}) as a function
  14.225  of @{term "nodes t"} and @{text n}. Prove your equation.
  14.226 @@ -569,11 +569,11 @@
  14.227  \exercise
  14.228  Define arithmetic expressions in one variable over integers (type @{typ int})
  14.229  as a data type:
  14.230 -*}
  14.231 +\<close>
  14.232  
  14.233  datatype exp = Var | Const int | Add exp exp | Mult exp exp
  14.234  
  14.235 -text{*
  14.236 +text\<open>
  14.237  Define a function \noquotes{@{term [source]"eval :: exp \<Rightarrow> int \<Rightarrow> int"}}
  14.238  such that @{term"eval e x"} evaluates @{text e} at the value
  14.239  @{text x}.
  14.240 @@ -589,7 +589,7 @@
  14.241  \mbox{@{prop"evalp (coeffs e) x = eval e x"}.}
  14.242  Hint: consider the hint in Exercise~\ref{exe:tree0}.
  14.243  \endexercise
  14.244 -*}
  14.245 +\<close>
  14.246  (*<*)
  14.247  end
  14.248  (*>*)
    15.1 --- a/src/Doc/Sugar/Sugar.thy	Thu Jan 11 13:48:17 2018 +0100
    15.2 +++ b/src/Doc/Sugar/Sugar.thy	Fri Jan 12 14:08:53 2018 +0100
    15.3 @@ -7,7 +7,7 @@
    15.4  no_translations
    15.5    ("prop") "P \<and> Q \<Longrightarrow> R" <= ("prop") "P \<Longrightarrow> Q \<Longrightarrow> R"
    15.6  (*>*)
    15.7 -text{*
    15.8 +text\<open>
    15.9  \section{Introduction}
   15.10  
   15.11  This document is for those Isabelle users who have mastered
   15.12 @@ -142,13 +142,13 @@
   15.13  \end{quote}
   15.14  into the relevant \texttt{ROOT} file, just before the \texttt{theories} for that session.
   15.15  The rest of this document is produced with this flag set to \texttt{false}.
   15.16 -*}
   15.17 +\<close>
   15.18  
   15.19  (*<*)declare [[show_question_marks = false]](*>*)
   15.20  
   15.21 -subsection {*Qualified names*}
   15.22 +subsection \<open>Qualified names\<close>
   15.23  
   15.24 -text{* If there are multiple declarations of the same name, Isabelle prints
   15.25 +text\<open>If there are multiple declarations of the same name, Isabelle prints
   15.26  the qualified name, for example @{text "T.length"}, where @{text T} is the
   15.27  theory it is defined in, to distinguish it from the predefined @{const[source]
   15.28  "List.length"}. In case there is no danger of confusion, you can insist on
   15.29 @@ -201,10 +201,10 @@
   15.30  \end{quote}
   15.31  
   15.32  Sometimes Isabelle $\eta$-contracts terms, for example in the following definition:
   15.33 -*}
   15.34 +\<close>
   15.35  fun eta where
   15.36  "eta (x \<cdot> xs) = (\<forall>y \<in> set xs. x < y)"
   15.37 -text{*
   15.38 +text\<open>
   15.39  \noindent
   15.40  If you now print the defining equation, the result is not what you hoped for:
   15.41  \begin{quote}
   15.42 @@ -287,12 +287,12 @@
   15.43  When displaying theorems with the \texttt{display} option, for example as in
   15.44  \verb!@!\verb!{thm[display] refl}! @{thm[display] refl} the theorem is
   15.45  set in small font. It uses the \LaTeX-macro \verb!\isastyle!,
   15.46 -which is also the style that regular theory text is set in, e.g. *}
   15.47 +which is also the style that regular theory text is set in, e.g.\<close>
   15.48  
   15.49  lemma "t = t"
   15.50  (*<*)oops(*>*)
   15.51  
   15.52 -text{* \noindent Otherwise \verb!\isastyleminor! is used,
   15.53 +text\<open>\noindent Otherwise \verb!\isastyleminor! is used,
   15.54  which does not modify the font size (assuming you stick to the default
   15.55  \verb!\isabellestyle{it}! in \texttt{root.tex}). If you prefer
   15.56  normal font size throughout your text, include
   15.57 @@ -447,23 +447,23 @@
   15.58  papers, but some key lemmas might be of interest.
   15.59  It is usually easiest to put them in figures like the one in Fig.\
   15.60  \ref{fig:proof}. This was achieved with the \isakeyword{text\_raw} command:
   15.61 -*}
   15.62 -text_raw {*
   15.63 +\<close>
   15.64 +text_raw \<open>
   15.65    \begin{figure}
   15.66    \begin{center}\begin{minipage}{0.6\textwidth}  
   15.67    \isastyleminor\isamarkuptrue
   15.68 -*}
   15.69 +\<close>
   15.70  lemma True
   15.71  proof -
   15.72 -  -- "pretty trivial"
   15.73 +  \<comment> "pretty trivial"
   15.74    show True by force
   15.75  qed
   15.76 -text_raw {*    
   15.77 +text_raw \<open>
   15.78    \end{minipage}\end{center}
   15.79    \caption{Example proof in a figure.}\label{fig:proof}
   15.80    \end{figure}
   15.81 -*}
   15.82 -text {*
   15.83 +\<close>
   15.84 +text \<open>
   15.85  
   15.86  \begin{quote}
   15.87  \small
   15.88 @@ -574,7 +574,7 @@
   15.89  \texttt{const\_typ} defined in \texttt{LaTeXsugar}. For example,
   15.90  \verb!@!\verb!{const_typ length}! produces @{const_typ length}.
   15.91  
   15.92 -*}
   15.93 +\<close>
   15.94  
   15.95  (*<*)
   15.96  end
    16.1 --- a/src/Doc/Tutorial/Advanced/Partial.thy	Thu Jan 11 13:48:17 2018 +0100
    16.2 +++ b/src/Doc/Tutorial/Advanced/Partial.thy	Fri Jan 12 14:08:53 2018 +0100
    16.3 @@ -1,6 +1,6 @@
    16.4  (*<*)theory Partial imports While_Combinator begin(*>*)
    16.5  
    16.6 -text{*\noindent Throughout this tutorial, we have emphasized
    16.7 +text\<open>\noindent Throughout this tutorial, we have emphasized
    16.8  that all functions in HOL are total.  We cannot hope to define
    16.9  truly partial functions, but must make them total.  A straightforward
   16.10  method is to lift the result type of the function from $\tau$ to
   16.11 @@ -23,29 +23,29 @@
   16.12  We have already seen an instance of underdefinedness by means of
   16.13  non-exhaustive pattern matching: the definition of @{term last} in
   16.14  \S\ref{sec:fun}. The same is allowed for \isacommand{primrec}
   16.15 -*}
   16.16 +\<close>
   16.17  
   16.18  consts hd :: "'a list \<Rightarrow> 'a"
   16.19  primrec "hd (x#xs) = x"
   16.20  
   16.21 -text{*\noindent
   16.22 +text\<open>\noindent
   16.23  although it generates a warning.
   16.24  Even ordinary definitions allow underdefinedness, this time by means of
   16.25  preconditions:
   16.26 -*}
   16.27 +\<close>
   16.28  
   16.29  definition subtract :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
   16.30  "n \<le> m \<Longrightarrow> subtract m n \<equiv> m - n"
   16.31  
   16.32 -text{*
   16.33 +text\<open>
   16.34  The rest of this section is devoted to the question of how to define
   16.35  partial recursive functions by other means than non-exhaustive pattern
   16.36  matching.
   16.37 -*}
   16.38 +\<close>
   16.39  
   16.40 -subsubsection{*Guarded Recursion*}
   16.41 +subsubsection\<open>Guarded Recursion\<close>
   16.42  
   16.43 -text{* 
   16.44 +text\<open>
   16.45  \index{recursion!guarded}%
   16.46  Neither \isacommand{primrec} nor \isacommand{recdef} allow to
   16.47  prefix an equation with a condition in the way ordinary definitions do
   16.48 @@ -59,14 +59,14 @@
   16.49  which is ideal for specifying underdefined functions on top of it.
   16.50  
   16.51  As a simple example we define division on @{typ nat}:
   16.52 -*}
   16.53 +\<close>
   16.54  
   16.55  consts divi :: "nat \<times> nat \<Rightarrow> nat"
   16.56  recdef divi "measure(\<lambda>(m,n). m)"
   16.57    "divi(m,0) = arbitrary"
   16.58    "divi(m,n) = (if m < n then 0 else divi(m-n,n)+1)"
   16.59  
   16.60 -text{*\noindent Of course we could also have defined
   16.61 +text\<open>\noindent Of course we could also have defined
   16.62  @{term"divi(m,0)"} to be some specific number, for example 0. The
   16.63  latter option is chosen for the predefined @{text div} function, which
   16.64  simplifies proofs at the expense of deviating from the
   16.65 @@ -83,14 +83,14 @@
   16.66  known \emph{Union-Find} algorithm.
   16.67  The snag is that it may not terminate if @{term f} has non-trivial cycles.
   16.68  Phrased differently, the relation
   16.69 -*}
   16.70 +\<close>
   16.71  
   16.72  definition step1 :: "('a \<Rightarrow> 'a) \<Rightarrow> ('a \<times> 'a)set" where
   16.73    "step1 f \<equiv> {(y,x). y = f x \<and> y \<noteq> x}"
   16.74  
   16.75 -text{*\noindent
   16.76 +text\<open>\noindent
   16.77  must be well-founded. Thus we make the following definition:
   16.78 -*}
   16.79 +\<close>
   16.80  
   16.81  consts find :: "('a \<Rightarrow> 'a) \<times> 'a \<Rightarrow> 'a"
   16.82  recdef find "same_fst (\<lambda>f. wf(step1 f)) step1"
   16.83 @@ -99,7 +99,7 @@
   16.84                  else arbitrary)"
   16.85  (hints recdef_simp: step1_def)
   16.86  
   16.87 -text{*\noindent
   16.88 +text\<open>\noindent
   16.89  The recursion equation itself should be clear enough: it is our aborted
   16.90  first attempt augmented with a check that there are no non-trivial loops.
   16.91  To express the required well-founded relation we employ the
   16.92 @@ -122,29 +122,29 @@
   16.93  
   16.94  Normally you will then derive the following conditional variant from
   16.95  the recursion equation:
   16.96 -*}
   16.97 +\<close>
   16.98  
   16.99  lemma [simp]:
  16.100    "wf(step1 f) \<Longrightarrow> find(f,x) = (if f x = x then x else find(f, f x))"
  16.101  by simp
  16.102  
  16.103 -text{*\noindent Then you should disable the original recursion equation:*}
  16.104 +text\<open>\noindent Then you should disable the original recursion equation:\<close>
  16.105  
  16.106  declare find.simps[simp del]
  16.107  
  16.108 -text{*
  16.109 +text\<open>
  16.110  Reasoning about such underdefined functions is like that for other
  16.111  recursive functions.  Here is a simple example of recursion induction:
  16.112 -*}
  16.113 +\<close>
  16.114  
  16.115  lemma "wf(step1 f) \<longrightarrow> f(find(f,x)) = find(f,x)"
  16.116  apply(induct_tac f x rule: find.induct)
  16.117  apply simp
  16.118  done
  16.119  
  16.120 -subsubsection{*The {\tt\slshape while} Combinator*}
  16.121 +subsubsection\<open>The {\tt\slshape while} Combinator\<close>
  16.122  
  16.123 -text{*If the recursive function happens to be tail recursive, its
  16.124 +text\<open>If the recursive function happens to be tail recursive, its
  16.125  definition becomes a triviality if based on the predefined \cdx{while}
  16.126  combinator.  The latter lives in the Library theory \thydx{While_Combinator}.
  16.127  % which is not part of {text Main} but needs to
  16.128 @@ -158,13 +158,13 @@
  16.129  \end{verbatim}
  16.130  In general, @{term s} will be a tuple or record.  As an example
  16.131  consider the following definition of function @{const find}:
  16.132 -*}
  16.133 +\<close>
  16.134  
  16.135  definition find2 :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a" where
  16.136    "find2 f x \<equiv>
  16.137     fst(while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x))"
  16.138  
  16.139 -text{*\noindent
  16.140 +text\<open>\noindent
  16.141  The loop operates on two ``local variables'' @{term x} and @{term x'}
  16.142  containing the ``current'' and the ``next'' value of function @{term f}.
  16.143  They are initialized with the global @{term x} and @{term"f x"}. At the
  16.144 @@ -185,7 +185,7 @@
  16.145  of induction we apply the above while rule, suitably instantiated.
  16.146  Only the final premise of @{thm[source]while_rule} is left unproved
  16.147  by @{text auto} but falls to @{text simp}:
  16.148 -*}
  16.149 +\<close>
  16.150  
  16.151  lemma lem: "wf(step1 f) \<Longrightarrow>
  16.152    \<exists>y. while (\<lambda>(x,x'). x' \<noteq> x) (\<lambda>(x,x'). (x',f x')) (x,f x) = (y,y) \<and>
  16.153 @@ -196,16 +196,16 @@
  16.154  apply(simp add: inv_image_def step1_def)
  16.155  done
  16.156  
  16.157 -text{*
  16.158 +text\<open>
  16.159  The theorem itself is a simple consequence of this lemma:
  16.160 -*}
  16.161 +\<close>
  16.162  
  16.163  theorem "wf(step1 f) \<Longrightarrow> f(find2 f x) = find2 f x"
  16.164  apply(drule_tac x = x in lem)
  16.165  apply(auto simp add: find2_def)
  16.166  done
  16.167  
  16.168 -text{* Let us conclude this section on partial functions by a
  16.169 +text\<open>Let us conclude this section on partial functions by a
  16.170  discussion of the merits of the @{term while} combinator. We have
  16.171  already seen that the advantage of not having to
  16.172  provide a termination argument when defining a function via @{term
  16.173 @@ -219,6 +219,6 @@
  16.174  definition that is impossible to execute or prohibitively slow.
  16.175  Thus, if you are aiming for an efficiently executable definition
  16.176  of a partial function, you are likely to need @{term while}.
  16.177 -*}
  16.178 +\<close>
  16.179  
  16.180  (*<*)end(*>*)
    17.1 --- a/src/Doc/Tutorial/Advanced/WFrec.thy	Thu Jan 11 13:48:17 2018 +0100
    17.2 +++ b/src/Doc/Tutorial/Advanced/WFrec.thy	Fri Jan 12 14:08:53 2018 +0100
    17.3 @@ -1,12 +1,12 @@
    17.4  (*<*)theory WFrec imports Main begin(*>*)
    17.5  
    17.6 -text{*\noindent
    17.7 +text\<open>\noindent
    17.8  So far, all recursive definitions were shown to terminate via measure
    17.9  functions. Sometimes this can be inconvenient or
   17.10  impossible. Fortunately, \isacommand{recdef} supports much more
   17.11  general definitions. For example, termination of Ackermann's function
   17.12  can be shown by means of the \rmindex{lexicographic product} @{text"<*lex*>"}:
   17.13 -*}
   17.14 +\<close>
   17.15  
   17.16  consts ack :: "nat\<times>nat \<Rightarrow> nat"
   17.17  recdef ack "measure(\<lambda>m. m) <*lex*> measure(\<lambda>n. n)"
   17.18 @@ -14,7 +14,7 @@
   17.19    "ack(Suc m,0)     = ack(m, 1)"
   17.20    "ack(Suc m,Suc n) = ack(m,ack(Suc m,n))"
   17.21  
   17.22 -text{*\noindent
   17.23 +text\<open>\noindent
   17.24  The lexicographic product decreases if either its first component
   17.25  decreases (as in the second equation and in the outer call in the
   17.26  third equation) or its first component stays the same and the second
   17.27 @@ -39,7 +39,7 @@
   17.28  product of two well-founded relations is again well-founded, which we relied
   17.29  on when defining Ackermann's function above.
   17.30  Of course the lexicographic product can also be iterated:
   17.31 -*}
   17.32 +\<close>
   17.33  
   17.34  consts contrived :: "nat \<times> nat \<times> nat \<Rightarrow> nat"
   17.35  recdef contrived
   17.36 @@ -49,7 +49,7 @@
   17.37  "contrived(Suc i,0,0) = contrived(i,i,i)"
   17.38  "contrived(0,0,0)     = 0"
   17.39  
   17.40 -text{*
   17.41 +text\<open>
   17.42  Lexicographic products of measure functions already go a long
   17.43  way. Furthermore, you may embed a type in an
   17.44  existing well-founded relation via the inverse image construction @{term
   17.45 @@ -64,42 +64,42 @@
   17.46  \isacommand{recdef}.  For example, the greater-than relation can be made
   17.47  well-founded by cutting it off at a certain point.  Here is an example
   17.48  of a recursive function that calls itself with increasing values up to ten:
   17.49 -*}
   17.50 +\<close>
   17.51  
   17.52  consts f :: "nat \<Rightarrow> nat"
   17.53  recdef (*<*)(permissive)(*>*)f "{(i,j). j<i \<and> i \<le> (10::nat)}"
   17.54  "f i = (if 10 \<le> i then 0 else i * f(Suc i))"
   17.55  
   17.56 -text{*\noindent
   17.57 +text\<open>\noindent
   17.58  Since \isacommand{recdef} is not prepared for the relation supplied above,
   17.59  Isabelle rejects the definition.  We should first have proved that
   17.60  our relation was well-founded:
   17.61 -*}
   17.62 +\<close>
   17.63  
   17.64  lemma wf_greater: "wf {(i,j). j<i \<and> i \<le> (N::nat)}"
   17.65  
   17.66 -txt{*\noindent
   17.67 +txt\<open>\noindent
   17.68  The proof is by showing that our relation is a subset of another well-founded
   17.69  relation: one given by a measure function.\index{*wf_subset (theorem)}
   17.70 -*}
   17.71 +\<close>
   17.72  
   17.73  apply (rule wf_subset [of "measure (\<lambda>k::nat. N-k)"], blast)
   17.74  
   17.75 -txt{*
   17.76 +txt\<open>
   17.77  @{subgoals[display,indent=0,margin=65]}
   17.78  
   17.79  \noindent
   17.80  The inclusion remains to be proved. After unfolding some definitions, 
   17.81  we are left with simple arithmetic that is dispatched automatically.
   17.82 -*}
   17.83 +\<close>
   17.84  
   17.85  by (clarify, simp add: measure_def inv_image_def)
   17.86  
   17.87 -text{*\noindent
   17.88 +text\<open>\noindent
   17.89  
   17.90  Armed with this lemma, we use the \attrdx{recdef_wf} attribute to attach a
   17.91  crucial hint\cmmdx{hints} to our definition:
   17.92 -*}
   17.93 +\<close>
   17.94  (*<*)
   17.95  consts g :: "nat \<Rightarrow> nat"
   17.96  recdef g "{(i,j). j<i \<and> i \<le> (10::nat)}"
   17.97 @@ -107,13 +107,13 @@
   17.98  (*>*)
   17.99  (hints recdef_wf: wf_greater)
  17.100  
  17.101 -text{*\noindent
  17.102 +text\<open>\noindent
  17.103  Alternatively, we could have given @{text "measure (\<lambda>k::nat. 10-k)"} for the
  17.104  well-founded relation in our \isacommand{recdef}.  However, the arithmetic
  17.105  goal in the lemma above would have arisen instead in the \isacommand{recdef}
  17.106  termination proof, where we have less control.  A tailor-made termination
  17.107  relation makes even more sense when it can be used in several function
  17.108  declarations.
  17.109 -*}
  17.110 +\<close>
  17.111  
  17.112  (*<*)end(*>*)
    18.1 --- a/src/Doc/Tutorial/Advanced/simp2.thy	Thu Jan 11 13:48:17 2018 +0100
    18.2 +++ b/src/Doc/Tutorial/Advanced/simp2.thy	Fri Jan 12 14:08:53 2018 +0100
    18.3 @@ -2,19 +2,19 @@
    18.4  theory simp2 imports Main begin
    18.5  (*>*)
    18.6  
    18.7 -section{*Simplification*}
    18.8 +section\<open>Simplification\<close>
    18.9  
   18.10 -text{*\label{sec:simplification-II}\index{simplification|(}
   18.11 +text\<open>\label{sec:simplification-II}\index{simplification|(}
   18.12  This section describes features not covered until now.  It also
   18.13  outlines the simplification process itself, which can be helpful
   18.14  when the simplifier does not do what you expect of it.
   18.15 -*}
   18.16 +\<close>
   18.17  
   18.18 -subsection{*Advanced Features*}
   18.19 +subsection\<open>Advanced Features\<close>
   18.20  
   18.21 -subsubsection{*Congruence Rules*}
   18.22 +subsubsection\<open>Congruence Rules\<close>
   18.23  
   18.24 -text{*\label{sec:simp-cong}
   18.25 +text\<open>\label{sec:simp-cong}
   18.26  While simplifying the conclusion $Q$
   18.27  of $P \Imp Q$, it is legal to use the assumption $P$.
   18.28  For $\Imp$ this policy is hardwired, but 
   18.29 @@ -62,11 +62,11 @@
   18.30  \par\noindent
   18.31  is occasionally useful but is not a default rule; you have to declare it explicitly.
   18.32  \end{warn}
   18.33 -*}
   18.34 +\<close>
   18.35  
   18.36 -subsubsection{*Permutative Rewrite Rules*}
   18.37 +subsubsection\<open>Permutative Rewrite Rules\<close>
   18.38  
   18.39 -text{*
   18.40 +text\<open>
   18.41  \index{rewrite rules!permutative|bold}%
   18.42  An equation is a \textbf{permutative rewrite rule} if the left-hand
   18.43  side and right-hand side are the same up to renaming of variables.  The most
   18.44 @@ -105,20 +105,20 @@
   18.45  Note that ordered rewriting for @{text"+"} and @{text"*"} on numbers is rarely
   18.46  necessary because the built-in arithmetic prover often succeeds without
   18.47  such tricks.
   18.48 -*}
   18.49 +\<close>
   18.50  
   18.51 -subsection{*How the Simplifier Works*}
   18.52 +subsection\<open>How the Simplifier Works\<close>
   18.53  
   18.54 -text{*\label{sec:SimpHow}
   18.55 +text\<open>\label{sec:SimpHow}
   18.56  Roughly speaking, the simplifier proceeds bottom-up: subterms are simplified
   18.57  first.  A conditional equation is only applied if its condition can be
   18.58  proved, again by simplification.  Below we explain some special features of
   18.59  the rewriting process. 
   18.60 -*}
   18.61 +\<close>
   18.62  
   18.63 -subsubsection{*Higher-Order Patterns*}
   18.64 +subsubsection\<open>Higher-Order Patterns\<close>
   18.65  
   18.66 -text{*\index{simplification rule|(}
   18.67 +text\<open>\index{simplification rule|(}
   18.68  So far we have pretended the simplifier can deal with arbitrary
   18.69  rewrite rules. This is not quite true.  For reasons of feasibility,
   18.70  the simplifier expects the
   18.71 @@ -153,11 +153,11 @@
   18.72    
   18.73  There is no restriction on the form of the right-hand
   18.74  sides.  They may not contain extraneous term or type variables, though.
   18.75 -*}
   18.76 +\<close>
   18.77  
   18.78 -subsubsection{*The Preprocessor*}
   18.79 +subsubsection\<open>The Preprocessor\<close>
   18.80  
   18.81 -text{*\label{sec:simp-preprocessor}
   18.82 +text\<open>\label{sec:simp-preprocessor}
   18.83  When a theorem is declared a simplification rule, it need not be a
   18.84  conditional equation already.  The simplifier will turn it into a set of
   18.85  conditional equations automatically.  For example, @{prop"f x =
   18.86 @@ -183,7 +183,7 @@
   18.87  \end{center}
   18.88  \index{simplification rule|)}
   18.89  \index{simplification|)}
   18.90 -*}
   18.91 +\<close>
   18.92  (*<*)
   18.93  end
   18.94  (*>*)
    19.1 --- a/src/Doc/Tutorial/CTL/Base.thy	Thu Jan 11 13:48:17 2018 +0100
    19.2 +++ b/src/Doc/Tutorial/CTL/Base.thy	Fri Jan 12 14:08:53 2018 +0100
    19.3 @@ -1,8 +1,8 @@
    19.4  (*<*)theory Base imports Main begin(*>*)
    19.5  
    19.6 -section{*Case Study: Verified Model Checking*}
    19.7 +section\<open>Case Study: Verified Model Checking\<close>
    19.8  
    19.9 -text{*\label{sec:VMC}
   19.10 +text\<open>\label{sec:VMC}
   19.11  This chapter ends with a case study concerning model checking for 
   19.12  Computation Tree Logic (CTL), a temporal logic.
   19.13  Model checking is a popular technique for the verification of finite
   19.14 @@ -54,11 +54,11 @@
   19.15  
   19.16  Abstracting from this concrete example, we assume there is a type of
   19.17  states:
   19.18 -*}
   19.19 +\<close>
   19.20  
   19.21  typedecl state
   19.22  
   19.23 -text{*\noindent
   19.24 +text\<open>\noindent
   19.25  Command \commdx{typedecl} merely declares a new type but without
   19.26  defining it (see \S\ref{sec:typedecl}). Thus we know nothing
   19.27  about the type other than its existence. That is exactly what we need
   19.28 @@ -67,25 +67,25 @@
   19.29  parameter of everything but declaring @{typ state} globally as above
   19.30  reduces clutter.  Similarly we declare an arbitrary but fixed
   19.31  transition system, i.e.\ a relation between states:
   19.32 -*}
   19.33 +\<close>
   19.34  
   19.35  consts M :: "(state \<times> state)set"
   19.36  
   19.37 -text{*\noindent
   19.38 +text\<open>\noindent
   19.39  This is Isabelle's way of declaring a constant without defining it.
   19.40  Finally we introduce a type of atomic propositions
   19.41 -*}
   19.42 +\<close>
   19.43  
   19.44  typedecl "atom"
   19.45  
   19.46 -text{*\noindent
   19.47 +text\<open>\noindent
   19.48  and a \emph{labelling function}
   19.49 -*}
   19.50 +\<close>
   19.51  
   19.52  consts L :: "state \<Rightarrow> atom set"
   19.53  
   19.54 -text{*\noindent
   19.55 +text\<open>\noindent
   19.56  telling us which atomic propositions are true in each state.
   19.57 -*}
   19.58 +\<close>
   19.59  
   19.60  (*<*)end(*>*)
    20.1 --- a/src/Doc/Tutorial/CTL/CTL.thy	Thu Jan 11 13:48:17 2018 +0100
    20.2 +++ b/src/Doc/Tutorial/CTL/CTL.thy	Fri Jan 12 14:08:53 2018 +0100
    20.3 @@ -1,14 +1,14 @@
    20.4  (*<*)theory CTL imports Base begin(*>*)
    20.5  
    20.6 -subsection{*Computation Tree Logic --- CTL*}
    20.7 +subsection\<open>Computation Tree Logic --- CTL\<close>
    20.8  
    20.9 -text{*\label{sec:CTL}
   20.10 +text\<open>\label{sec:CTL}
   20.11  \index{CTL|(}%
   20.12  The semantics of PDL only needs reflexive transitive closure.
   20.13  Let us be adventurous and introduce a more expressive temporal operator.
   20.14  We extend the datatype
   20.15  @{text formula} by a new constructor
   20.16 -*}
   20.17 +\<close>
   20.18  (*<*)
   20.19  datatype formula = Atom "atom"
   20.20                    | Neg formula
   20.21 @@ -17,23 +17,23 @@
   20.22                    | EF formula(*>*)
   20.23                    | AF formula
   20.24  
   20.25 -text{*\noindent
   20.26 +text\<open>\noindent
   20.27  which stands for ``\emph{A}lways in the \emph{F}uture'':
   20.28  on all infinite paths, at some point the formula holds.
   20.29  Formalizing the notion of an infinite path is easy
   20.30  in HOL: it is simply a function from @{typ nat} to @{typ state}.
   20.31 -*}
   20.32 +\<close>
   20.33  
   20.34  definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
   20.35  "Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
   20.36  
   20.37 -text{*\noindent
   20.38 +text\<open>\noindent
   20.39  This definition allows a succinct statement of the semantics of @{const AF}:
   20.40  \footnote{Do not be misled: neither datatypes nor recursive functions can be
   20.41  extended by new constructors or equations. This is just a trick of the
   20.42  presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
   20.43  a new datatype and a new function.}
   20.44 -*}
   20.45 +\<close>
   20.46  (*<*)
   20.47  primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80,80] 80) where
   20.48  "s \<Turnstile> Atom a  =  (a \<in> L s)" |
   20.49 @@ -44,18 +44,18 @@
   20.50  (*>*)
   20.51  "s \<Turnstile> AF f    = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
   20.52  
   20.53 -text{*\noindent
   20.54 +text\<open>\noindent
   20.55  Model checking @{const AF} involves a function which
   20.56  is just complicated enough to warrant a separate definition:
   20.57 -*}
   20.58 +\<close>
   20.59  
   20.60  definition af :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
   20.61  "af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
   20.62  
   20.63 -text{*\noindent
   20.64 +text\<open>\noindent
   20.65  Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
   20.66  @{term"mc f"} and all states all of whose direct successors are in @{term T}:
   20.67 -*}
   20.68 +\<close>
   20.69  (*<*)
   20.70  primrec mc :: "formula \<Rightarrow> state set" where
   20.71  "mc(Atom a)  = {s. a \<in> L s}" |
   20.72 @@ -65,10 +65,10 @@
   20.73  "mc(EF f)    = lfp(\<lambda>T. mc f \<union> M\<inverse> `` T)"|(*>*)
   20.74  "mc(AF f)    = lfp(af(mc f))"
   20.75  
   20.76 -text{*\noindent
   20.77 +text\<open>\noindent
   20.78  Because @{const af} is monotone in its second argument (and also its first, but
   20.79  that is irrelevant), @{term"af A"} has a least fixed point:
   20.80 -*}
   20.81 +\<close>
   20.82  
   20.83  lemma mono_af: "mono(af A)"
   20.84  apply(simp add: mono_def af_def)
   20.85 @@ -96,16 +96,16 @@
   20.86  apply(subst lfp_unfold[OF mono_ef])
   20.87  by(blast)
   20.88  (*>*)
   20.89 -text{*
   20.90 +text\<open>
   20.91  All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
   20.92  that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
   20.93  This time we prove the two inclusions separately, starting
   20.94  with the easy one:
   20.95 -*}
   20.96 +\<close>
   20.97  
   20.98  theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
   20.99  
  20.100 -txt{*\noindent
  20.101 +txt\<open>\noindent
  20.102  In contrast to the analogous proof for @{const EF}, and just
  20.103  for a change, we do not use fixed point induction.  Park-induction,
  20.104  named after David Park, is weaker but sufficient for this proof:
  20.105 @@ -114,24 +114,24 @@
  20.106  \end{center}
  20.107  The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
  20.108  a decision that \isa{auto} takes for us:
  20.109 -*}
  20.110 +\<close>
  20.111  apply(rule lfp_lowerbound)
  20.112  apply(auto simp add: af_def Paths_def)
  20.113  
  20.114 -txt{*
  20.115 +txt\<open>
  20.116  @{subgoals[display,indent=0,margin=70,goals_limit=1]}
  20.117  In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
  20.118  The rest is automatic, which is surprising because it involves
  20.119  finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
  20.120  for @{text"\<forall>p"}.
  20.121 -*}
  20.122 +\<close>
  20.123  
  20.124  apply(erule_tac x = "p 1" in allE)
  20.125  apply(auto)
  20.126  done
  20.127  
  20.128  
  20.129 -text{*
  20.130 +text\<open>
  20.131  The opposite inclusion is proved by contradiction: if some state
  20.132  @{term s} is not in @{term"lfp(af A)"}, then we can construct an
  20.133  infinite @{term A}-avoiding path starting from~@{term s}. The reason is
  20.134 @@ -143,7 +143,7 @@
  20.135  
  20.136  The one-step argument in the sketch above
  20.137  is proved by a variant of contraposition:
  20.138 -*}
  20.139 +\<close>
  20.140  
  20.141  lemma not_in_lfp_afD:
  20.142   "s \<notin> lfp(af A) \<Longrightarrow> s \<notin> A \<and> (\<exists> t. (s,t) \<in> M \<and> t \<notin> lfp(af A))"
  20.143 @@ -152,20 +152,20 @@
  20.144  apply(simp add: af_def)
  20.145  done
  20.146  
  20.147 -text{*\noindent
  20.148 +text\<open>\noindent
  20.149  We assume the negation of the conclusion and prove @{term"s : lfp(af A)"}.
  20.150  Unfolding @{const lfp} once and
  20.151  simplifying with the definition of @{const af} finishes the proof.
  20.152  
  20.153  Now we iterate this process. The following construction of the desired
  20.154  path is parameterized by a predicate @{term Q} that should hold along the path:
  20.155 -*}
  20.156 +\<close>
  20.157  
  20.158  primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
  20.159  "path s Q 0 = s" |
  20.160  "path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
  20.161  
  20.162 -text{*\noindent
  20.163 +text\<open>\noindent
  20.164  Element @{term"n+1::nat"} on this path is some arbitrary successor
  20.165  @{term t} of element @{term n} such that @{term"Q t"} holds.  Remember that @{text"SOME t. R t"}
  20.166  is some arbitrary but fixed @{term t} such that @{prop"R t"} holds (see \S\ref{sec:SOME}). Of
  20.167 @@ -175,43 +175,43 @@
  20.168  
  20.169  Let us show that if each state @{term s} that satisfies @{term Q}
  20.170  has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
  20.171 -*}
  20.172 +\<close>
  20.173  
  20.174  lemma infinity_lemma:
  20.175    "\<lbrakk> Q s; \<forall>s. Q s \<longrightarrow> (\<exists> t. (s,t) \<in> M \<and> Q t) \<rbrakk> \<Longrightarrow>
  20.176     \<exists>p\<in>Paths s. \<forall>i. Q(p i)"
  20.177  
  20.178 -txt{*\noindent
  20.179 +txt\<open>\noindent
  20.180  First we rephrase the conclusion slightly because we need to prove simultaneously
  20.181  both the path property and the fact that @{term Q} holds:
  20.182 -*}
  20.183 +\<close>
  20.184  
  20.185  apply(subgoal_tac
  20.186    "\<exists>p. s = p 0 \<and> (\<forall>i::nat. (p i, p(i+1)) \<in> M \<and> Q(p i))")
  20.187  
  20.188 -txt{*\noindent
  20.189 +txt\<open>\noindent
  20.190  From this proposition the original goal follows easily:
  20.191 -*}
  20.192 +\<close>
  20.193  
  20.194   apply(simp add: Paths_def, blast)
  20.195  
  20.196 -txt{*\noindent
  20.197 +txt\<open>\noindent
  20.198  The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
  20.199 -*}
  20.200 +\<close>
  20.201  
  20.202  apply(rule_tac x = "path s Q" in exI)
  20.203  apply(clarsimp)
  20.204  
  20.205 -txt{*\noindent
  20.206 +txt\<open>\noindent
  20.207  After simplification and clarification, the subgoal has the following form:
  20.208  @{subgoals[display,indent=0,margin=70,goals_limit=1]}
  20.209  It invites a proof by induction on @{term i}:
  20.210 -*}
  20.211 +\<close>
  20.212  
  20.213  apply(induct_tac i)
  20.214   apply(simp)
  20.215  
  20.216 -txt{*\noindent
  20.217 +txt\<open>\noindent
  20.218  After simplification, the base case boils down to
  20.219  @{subgoals[display,indent=0,margin=70,goals_limit=1]}
  20.220  The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t):M"}
  20.221 @@ -223,11 +223,11 @@
  20.222  two subgoals: @{prop"EX a. (s, a) : M & Q a"}, which follows from the assumptions, and
  20.223  @{prop"(s, x) : M & Q x ==> (s,x) : M"}, which is trivial. Thus it is not surprising that
  20.224  @{text fast} can prove the base case quickly:
  20.225 -*}
  20.226 +\<close>
  20.227  
  20.228   apply(fast intro: someI2_ex)
  20.229  
  20.230 -txt{*\noindent
  20.231 +txt\<open>\noindent
  20.232  What is worth noting here is that we have used \methdx{fast} rather than
  20.233  @{text blast}.  The reason is that @{text blast} would fail because it cannot
  20.234  cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
  20.235 @@ -242,7 +242,7 @@
  20.236  occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
  20.237  solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
  20.238  show the proof commands but do not describe the details:
  20.239 -*}
  20.240 +\<close>
  20.241  
  20.242  apply(simp)
  20.243  apply(rule someI2_ex)
  20.244 @@ -252,7 +252,7 @@
  20.245  apply(blast)
  20.246  done
  20.247  
  20.248 -text{*
  20.249 +text\<open>
  20.250  Function @{const path} has fulfilled its purpose now and can be forgotten.
  20.251  It was merely defined to provide the witness in the proof of the
  20.252  @{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
  20.253 @@ -261,7 +261,7 @@
  20.254  @{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
  20.255  is extensionally equal to @{term"path s Q"},
  20.256  where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
  20.257 -*}
  20.258 +\<close>
  20.259  (*<*)
  20.260  lemma
  20.261  "\<lbrakk> Q s; \<forall> s. Q s \<longrightarrow> (\<exists> t. (s,t)\<in>M \<and> Q t) \<rbrakk> \<Longrightarrow>
  20.262 @@ -284,37 +284,37 @@
  20.263  by(blast)
  20.264  (*>*)
  20.265  
  20.266 -text{*
  20.267 +text\<open>
  20.268  At last we can prove the opposite direction of @{thm[source]AF_lemma1}:
  20.269 -*}
  20.270 +\<close>
  20.271  
  20.272  theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A} \<subseteq> lfp(af A)"
  20.273  
  20.274 -txt{*\noindent
  20.275 +txt\<open>\noindent
  20.276  The proof is again pointwise and then by contraposition:
  20.277 -*}
  20.278 +\<close>
  20.279  
  20.280  apply(rule subsetI)
  20.281  apply(erule contrapos_pp)
  20.282  apply simp
  20.283  
  20.284 -txt{*
  20.285 +txt\<open>
  20.286  @{subgoals[display,indent=0,goals_limit=1]}
  20.287  Applying the @{thm[source]infinity_lemma} as a destruction rule leaves two subgoals, the second
  20.288  premise of @{thm[source]infinity_lemma} and the original subgoal:
  20.289 -*}
  20.290 +\<close>
  20.291  
  20.292  apply(drule infinity_lemma)
  20.293  
  20.294 -txt{*
  20.295 +txt\<open>
  20.296  @{subgoals[display,indent=0,margin=65]}
  20.297  Both are solved automatically:
  20.298 -*}
  20.299 +\<close>
  20.300  
  20.301   apply(auto dest: not_in_lfp_afD)
  20.302  done
  20.303  
  20.304 -text{*
  20.305 +text\<open>
  20.306  If you find these proofs too complicated, we recommend that you read
  20.307  \S\ref{sec:CTL-revisited}, where we show how inductive definitions lead to
  20.308  simpler arguments.
  20.309 @@ -322,20 +322,20 @@
  20.310  The main theorem is proved as for PDL, except that we also derive the
  20.311  necessary equality @{text"lfp(af A) = ..."} by combining
  20.312  @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
  20.313 -*}
  20.314 +\<close>
  20.315  
  20.316  theorem "mc f = {s. s \<Turnstile> f}"
  20.317  apply(induct_tac f)
  20.318  apply(auto simp add: EF_lemma equalityI[OF AF_lemma1 AF_lemma2])
  20.319  done
  20.320  
  20.321 -text{*
  20.322 +text\<open>
  20.323  
  20.324  The language defined above is not quite CTL\@. The latter also includes an
  20.325  until-operator @{term"EU f g"} with semantics ``there \emph{E}xists a path
  20.326  where @{term f} is true \emph{U}ntil @{term g} becomes true''.  We need
  20.327  an auxiliary function:
  20.328 -*}
  20.329 +\<close>
  20.330  
  20.331  primrec
  20.332  until:: "state set \<Rightarrow> state set \<Rightarrow> state \<Rightarrow> state list \<Rightarrow> bool" where
  20.333 @@ -345,7 +345,7 @@
  20.334   eusem :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
  20.335  "eusem A B \<equiv> {s. \<exists>p. until A B s p}"(*>*)
  20.336  
  20.337 -text{*\noindent
  20.338 +text\<open>\noindent
  20.339  Expressing the semantics of @{term EU} is now straightforward:
  20.340  @{prop[display]"s \<Turnstile> EU f g = (\<exists>p. until {t. t \<Turnstile> f} {t. t \<Turnstile> g} s p)"}
  20.341  Note that @{term EU} is not definable in terms of the other operators!
  20.342 @@ -362,7 +362,7 @@
  20.343  %which enables you to read and write {text"E[f U g]"} instead of {term"EU f g"}.
  20.344  \end{exercise}
  20.345  For more CTL exercises see, for example, Huth and Ryan @{cite "Huth-Ryan-book"}.
  20.346 -*}
  20.347 +\<close>
  20.348  
  20.349  (*<*)
  20.350  definition eufix :: "state set \<Rightarrow> state set \<Rightarrow> state set \<Rightarrow> state set" where
  20.351 @@ -435,7 +435,7 @@
  20.352  *)
  20.353  (*>*)
  20.354  
  20.355 -text{* Let us close this section with a few words about the executability of
  20.356 +text\<open>Let us close this section with a few words about the executability of
  20.357  our model checkers.  It is clear that if all sets are finite, they can be
  20.358  represented as lists and the usual set operations are easily
  20.359  implemented. Only @{const lfp} requires a little thought.  Fortunately, theory
  20.360 @@ -445,5 +445,5 @@
  20.361  iterated application of @{term F} to~@{term"{}"} until a fixed point is
  20.362  reached. It is actually possible to generate executable functional programs
  20.363  from HOL definitions, but that is beyond the scope of the tutorial.%
  20.364 -\index{CTL|)} *}
  20.365 +\index{CTL|)}\<close>
  20.366  (*<*)end(*>*)
    21.1 --- a/src/Doc/Tutorial/CTL/CTLind.thy	Thu Jan 11 13:48:17 2018 +0100
    21.2 +++ b/src/Doc/Tutorial/CTL/CTLind.thy	Fri Jan 12 14:08:53 2018 +0100
    21.3 @@ -1,8 +1,8 @@
    21.4  (*<*)theory CTLind imports CTL begin(*>*)
    21.5  
    21.6 -subsection{*CTL Revisited*}
    21.7 +subsection\<open>CTL Revisited\<close>
    21.8  
    21.9 -text{*\label{sec:CTL-revisited}
   21.10 +text\<open>\label{sec:CTL-revisited}
   21.11  \index{CTL|(}%
   21.12  The purpose of this section is twofold: to demonstrate
   21.13  some of the induction principles and heuristics discussed above and to
   21.14 @@ -22,7 +22,7 @@
   21.15  A}-avoiding path:
   21.16  % Second proof of opposite direction, directly by well-founded induction
   21.17  % on the initial segment of M that avoids A.
   21.18 -*}
   21.19 +\<close>
   21.20  
   21.21  inductive_set
   21.22    Avoid :: "state \<Rightarrow> state set \<Rightarrow> state set"
   21.23 @@ -31,7 +31,7 @@
   21.24      "s \<in> Avoid s A"
   21.25    | "\<lbrakk> t \<in> Avoid s A; t \<notin> A; (t,u) \<in> M \<rbrakk> \<Longrightarrow> u \<in> Avoid s A"
   21.26  
   21.27 -text{*
   21.28 +text\<open>
   21.29  It is easy to see that for any infinite @{term A}-avoiding path @{term f}
   21.30  with @{prop"f(0::nat) \<in> Avoid s A"} there is an infinite @{term A}-avoiding path
   21.31  starting with @{term s} because (by definition of @{const Avoid}) there is a
   21.32 @@ -40,7 +40,7 @@
   21.33  this requires the following
   21.34  reformulation, as explained in \S\ref{sec:ind-var-in-prems} above;
   21.35  the @{text rule_format} directive undoes the reformulation after the proof.
   21.36 -*}
   21.37 +\<close>
   21.38  
   21.39  lemma ex_infinite_path[rule_format]:
   21.40    "t \<in> Avoid s A  \<Longrightarrow>
   21.41 @@ -52,7 +52,7 @@
   21.42  apply(simp_all add: Paths_def split: nat.split)
   21.43  done
   21.44  
   21.45 -text{*\noindent
   21.46 +text\<open>\noindent
   21.47  The base case (@{prop"t = s"}) is trivial and proved by @{text blast}.
   21.48  In the induction step, we have an infinite @{term A}-avoiding path @{term f}
   21.49  starting from @{term u}, a successor of @{term t}. Now we simply instantiate
   21.50 @@ -66,12 +66,12 @@
   21.51  inductive proof this must be generalized to the statement that every point @{term t}
   21.52  ``between'' @{term s} and @{term A}, in other words all of @{term"Avoid s A"},
   21.53  is contained in @{term"lfp(af A)"}:
   21.54 -*}
   21.55 +\<close>
   21.56  
   21.57  lemma Avoid_in_lfp[rule_format(no_asm)]:
   21.58    "\<forall>p\<in>Paths s. \<exists>i. p i \<in> A \<Longrightarrow> t \<in> Avoid s A \<longrightarrow> t \<in> lfp(af A)"
   21.59  
   21.60 -txt{*\noindent
   21.61 +txt\<open>\noindent
   21.62  The proof is by induction on the ``distance'' between @{term t} and @{term
   21.63  A}. Remember that @{prop"lfp(af A) = A \<union> M\<inverse> `` lfp(af A)"}.
   21.64  If @{term t} is already in @{term A}, then @{prop"t \<in> lfp(af A)"} is
   21.65 @@ -85,14 +85,14 @@
   21.66  As we shall see presently, the absence of infinite @{term A}-avoiding paths
   21.67  starting from @{term s} implies well-foundedness of this relation. For the
   21.68  moment we assume this and proceed with the induction:
   21.69 -*}
   21.70 +\<close>
   21.71  
   21.72  apply(subgoal_tac "wf{(y,x). (x,y) \<in> M \<and> x \<in> Avoid s A \<and> x \<notin> A}")
   21.73   apply(erule_tac a = t in wf_induct)
   21.74   apply(clarsimp)
   21.75  (*<*)apply(rename_tac t)(*>*)
   21.76  
   21.77 -txt{*\noindent
   21.78 +txt\<open>\noindent
   21.79  @{subgoals[display,indent=0,margin=65]}
   21.80  Now the induction hypothesis states that if @{prop"t \<notin> A"}
   21.81  then all successors of @{term t} that are in @{term"Avoid s A"} are in
   21.82 @@ -104,13 +104,13 @@
   21.83  @{term"Avoid s A"}, because we also assume @{prop"t \<in> Avoid s A"}.
   21.84  Hence, by the induction hypothesis, all successors of @{term t} are indeed in
   21.85  @{term"lfp(af A)"}. Mechanically:
   21.86 -*}
   21.87 +\<close>
   21.88  
   21.89   apply(subst lfp_unfold[OF mono_af])
   21.90   apply(simp (no_asm) add: af_def)
   21.91   apply(blast intro: Avoid.intros)
   21.92  
   21.93 -txt{*
   21.94 +txt\<open>
   21.95  Having proved the main goal, we return to the proof obligation that the 
   21.96  relation used above is indeed well-founded. This is proved by contradiction: if
   21.97  the relation is not well-founded then there exists an infinite @{term
   21.98 @@ -119,7 +119,7 @@
   21.99  @{thm[display]wf_iff_no_infinite_down_chain[no_vars]}
  21.100  From lemma @{thm[source]ex_infinite_path} the existence of an infinite
  21.101  @{term A}-avoiding path starting in @{term s} follows, contradiction.
  21.102 -*}
  21.103 +\<close>
  21.104  
  21.105  apply(erule contrapos_pp)
  21.106  apply(simp add: wf_iff_no_infinite_down_chain)
  21.107 @@ -128,7 +128,7 @@
  21.108  apply(auto simp add: Paths_def)
  21.109  done
  21.110  
  21.111 -text{*
  21.112 +text\<open>
  21.113  The @{text"(no_asm)"} modifier of the @{text"rule_format"} directive in the
  21.114  statement of the lemma means
  21.115  that the assumption is left unchanged; otherwise the @{text"\<forall>p"} 
  21.116 @@ -139,7 +139,7 @@
  21.117  The main theorem is simply the corollary where @{prop"t = s"},
  21.118  when the assumption @{prop"t \<in> Avoid s A"} is trivially true
  21.119  by the first @{const Avoid}-rule. Isabelle confirms this:%
  21.120 -\index{CTL|)}*}
  21.121 +\index{CTL|)}\<close>
  21.122  
  21.123  theorem AF_lemma2:  "{s. \<forall>p \<in> Paths s. \<exists> i. p i \<in> A} \<subseteq> lfp(af A)"
  21.124  by(auto elim: Avoid_in_lfp intro: Avoid.intros)
    22.1 --- a/src/Doc/Tutorial/CTL/PDL.thy	Thu Jan 11 13:48:17 2018 +0100
    22.2 +++ b/src/Doc/Tutorial/CTL/PDL.thy	Fri Jan 12 14:08:53 2018 +0100
    22.3 @@ -1,8 +1,8 @@
    22.4  (*<*)theory PDL imports Base begin(*>*)
    22.5  
    22.6 -subsection{*Propositional Dynamic Logic --- PDL*}
    22.7 +subsection\<open>Propositional Dynamic Logic --- PDL\<close>
    22.8  
    22.9 -text{*\index{PDL|(}
   22.10 +text\<open>\index{PDL|(}
   22.11  The formulae of PDL are built up from atomic propositions via
   22.12  negation and conjunction and the two temporal
   22.13  connectives @{text AX} and @{text EF}\@. Since formulae are essentially
   22.14 @@ -10,7 +10,7 @@
   22.15  \footnote{The customary definition of PDL
   22.16  @{cite "HarelKT-DL"} looks quite different from ours, but the two are easily
   22.17  shown to be equivalent.}
   22.18 -*}
   22.19 +\<close>
   22.20  
   22.21  datatype formula = Atom "atom"
   22.22                    | Neg formula
   22.23 @@ -18,13 +18,13 @@
   22.24                    | AX formula
   22.25                    | EF formula
   22.26  
   22.27 -text{*\noindent
   22.28 +text\<open>\noindent
   22.29  This resembles the boolean expression case study in
   22.30  \S\ref{sec:boolex}.
   22.31  A validity relation between states and formulae specifies the semantics.
   22.32  The syntax annotation allows us to write @{text"s \<Turnstile> f"} instead of
   22.33  \hbox{@{text"valid s f"}}. The definition is by recursion over the syntax:
   22.34 -*}
   22.35 +\<close>
   22.36  
   22.37  primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool"   ("(_ \<Turnstile> _)" [80,80] 80)
   22.38  where
   22.39 @@ -34,7 +34,7 @@
   22.40  "s \<Turnstile> AX f    = (\<forall>t. (s,t) \<in> M \<longrightarrow> t \<Turnstile> f)" |
   22.41  "s \<Turnstile> EF f    = (\<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<Turnstile> f)"
   22.42  
   22.43 -text{*\noindent
   22.44 +text\<open>\noindent
   22.45  The first three equations should be self-explanatory. The temporal formula
   22.46  @{term"AX f"} means that @{term f} is true in \emph{A}ll ne\emph{X}t states whereas
   22.47  @{term"EF f"} means that there \emph{E}xists some \emph{F}uture state in which @{term f} is
   22.48 @@ -43,7 +43,7 @@
   22.49  
   22.50  Now we come to the model checker itself. It maps a formula into the
   22.51  set of states where the formula is true.  It too is defined by
   22.52 -recursion over the syntax: *}
   22.53 +recursion over the syntax:\<close>
   22.54  
   22.55  primrec mc :: "formula \<Rightarrow> state set" where
   22.56  "mc(Atom a)  = {s. a \<in> L s}" |
   22.57 @@ -52,7 +52,7 @@
   22.58  "mc(AX f)    = {s. \<forall>t. (s,t) \<in> M  \<longrightarrow> t \<in> mc f}" |
   22.59  "mc(EF f)    = lfp(\<lambda>T. mc f \<union> (M\<inverse> `` T))"
   22.60  
   22.61 -text{*\noindent
   22.62 +text\<open>\noindent
   22.63  Only the equation for @{term EF} deserves some comments. Remember that the
   22.64  postfix @{text"\<inverse>"} and the infix @{text"``"} are predefined and denote the
   22.65  converse of a relation and the image of a set under a relation.  Thus
   22.66 @@ -65,40 +65,40 @@
   22.67  
   22.68  First we prove monotonicity of the function inside @{term lfp}
   22.69  in order to make sure it really has a least fixed point.
   22.70 -*}
   22.71 +\<close>
   22.72  
   22.73  lemma mono_ef: "mono(\<lambda>T. A \<union> (M\<inverse> `` T))"
   22.74  apply(rule monoI)
   22.75  apply blast
   22.76  done
   22.77  
   22.78 -text{*\noindent
   22.79 +text\<open>\noindent
   22.80  Now we can relate model checking and semantics. For the @{text EF} case we need
   22.81  a separate lemma:
   22.82 -*}
   22.83 +\<close>
   22.84  
   22.85  lemma EF_lemma:
   22.86    "lfp(\<lambda>T. A \<union> (M\<inverse> `` T)) = {s. \<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<in> A}"
   22.87  
   22.88 -txt{*\noindent
   22.89 +txt\<open>\noindent
   22.90  The equality is proved in the canonical fashion by proving that each set
   22.91  includes the other; the inclusion is shown pointwise:
   22.92 -*}
   22.93 +\<close>
   22.94  
   22.95  apply(rule equalityI)
   22.96   apply(rule subsetI)
   22.97   apply(simp)(*<*)apply(rename_tac s)(*>*)
   22.98  
   22.99 -txt{*\noindent
  22.100 +txt\<open>\noindent
  22.101  Simplification leaves us with the following first subgoal
  22.102  @{subgoals[display,indent=0,goals_limit=1]}
  22.103  which is proved by @{term lfp}-induction:
  22.104 -*}
  22.105 +\<close>
  22.106  
  22.107   apply(erule lfp_induct_set)
  22.108    apply(rule mono_ef)
  22.109   apply(simp)
  22.110 -txt{*\noindent
  22.111 +txt\<open>\noindent
  22.112  Having disposed of the monotonicity subgoal,
  22.113  simplification leaves us with the following goal:
  22.114  \begin{isabelle}
  22.115 @@ -108,19 +108,19 @@
  22.116  \end{isabelle}
  22.117  It is proved by @{text blast}, using the transitivity of 
  22.118  \isa{M\isactrlsup {\isacharasterisk}}.
  22.119 -*}
  22.120 +\<close>
  22.121  
  22.122   apply(blast intro: rtrancl_trans)
  22.123  
  22.124 -txt{*
  22.125 +txt\<open>
  22.126  We now return to the second set inclusion subgoal, which is again proved
  22.127  pointwise:
  22.128 -*}
  22.129 +\<close>
  22.130  
  22.131  apply(rule subsetI)
  22.132  apply(simp, clarify)
  22.133  
  22.134 -txt{*\noindent
  22.135 +txt\<open>\noindent
  22.136  After simplification and clarification we are left with
  22.137  @{subgoals[display,indent=0,goals_limit=1]}
  22.138  This goal is proved by induction on @{term"(s,t)\<in>M\<^sup>*"}. But since the model
  22.139 @@ -132,44 +132,44 @@
  22.140  It says that if @{prop"(a,b):r\<^sup>*"} and we know @{prop"P b"} then we can infer
  22.141  @{prop"P a"} provided each step backwards from a predecessor @{term z} of
  22.142  @{term b} preserves @{term P}.
  22.143 -*}
  22.144 +\<close>
  22.145  
  22.146  apply(erule converse_rtrancl_induct)
  22.147  
  22.148 -txt{*\noindent
  22.149 +txt\<open>\noindent
  22.150  The base case
  22.151  @{subgoals[display,indent=0,goals_limit=1]}
  22.152  is solved by unrolling @{term lfp} once
  22.153 -*}
  22.154 +\<close>
  22.155  
  22.156   apply(subst lfp_unfold[OF mono_ef])
  22.157  
  22.158 -txt{*
  22.159 +txt\<open>
  22.160  @{subgoals[display,indent=0,goals_limit=1]}
  22.161  and disposing of the resulting trivial subgoal automatically:
  22.162 -*}
  22.163 +\<close>
  22.164  
  22.165   apply(blast)
  22.166  
  22.167 -txt{*\noindent
  22.168 +txt\<open>\noindent
  22.169  The proof of the induction step is identical to the one for the base case:
  22.170 -*}
  22.171 +\<close>
  22.172  
  22.173  apply(subst lfp_unfold[OF mono_ef])
  22.174  apply(blast)
  22.175  done
  22.176  
  22.177 -text{*
  22.178 +text\<open>
  22.179  The main theorem is proved in the familiar manner: induction followed by
  22.180  @{text auto} augmented with the lemma as a simplification rule.
  22.181 -*}
  22.182 +\<close>
  22.183  
  22.184  theorem "mc f = {s. s \<Turnstile> f}"
  22.185  apply(induct_tac f)
  22.186  apply(auto simp add: EF_lemma)
  22.187  done
  22.188  
  22.189 -text{*
  22.190 +text\<open>
  22.191  \begin{exercise}
  22.192  @{term AX} has a dual operator @{term EN} 
  22.193  (``there exists a next state such that'')%
  22.194 @@ -183,7 +183,7 @@
  22.195  @{prop[display]"(s \<Turnstile> EF f) = (s \<Turnstile> f | s \<Turnstile> EN(EF f))"}
  22.196  \end{exercise}
  22.197  \index{PDL|)}
  22.198 -*}
  22.199 +\<close>
  22.200  (*<*)
  22.201  theorem main: "mc f = {s. s \<Turnstile> f}"
  22.202  apply(induct_tac f)
    23.1 --- a/src/Doc/Tutorial/CodeGen/CodeGen.thy	Thu Jan 11 13:48:17 2018 +0100
    23.2 +++ b/src/Doc/Tutorial/CodeGen/CodeGen.thy	Fri Jan 12 14:08:53 2018 +0100
    23.3 @@ -2,9 +2,9 @@
    23.4  theory CodeGen imports Main begin
    23.5  (*>*)
    23.6  
    23.7 -section{*Case Study: Compiling Expressions*}
    23.8 +section\<open>Case Study: Compiling Expressions\<close>
    23.9  
   23.10 -text{*\label{sec:ExprCompiler}
   23.11 +text\<open>\label{sec:ExprCompiler}
   23.12  \index{compiling expressions example|(}%
   23.13  The task is to develop a compiler from a generic type of expressions (built
   23.14  from variables, constants and binary operations) to a stack machine.  This
   23.15 @@ -13,45 +13,45 @@
   23.16  type of variables or values but make them type parameters.  Neither is there
   23.17  a fixed set of binary operations: instead the expression contains the
   23.18  appropriate function itself.
   23.19 -*}
   23.20 +\<close>
   23.21  
   23.22  type_synonym 'v binop = "'v \<Rightarrow> 'v \<Rightarrow> 'v"
   23.23  datatype (dead 'a, 'v) expr = Cex 'v
   23.24                        | Vex 'a
   23.25                        | Bex "'v binop"  "('a,'v)expr"  "('a,'v)expr"
   23.26  
   23.27 -text{*\noindent
   23.28 +text\<open>\noindent
   23.29  The three constructors represent constants, variables and the application of
   23.30  a binary operation to two subexpressions.
   23.31  
   23.32  The value of an expression with respect to an environment that maps variables to
   23.33  values is easily defined:
   23.34 -*}
   23.35 +\<close>
   23.36  
   23.37  primrec "value" :: "('a,'v)expr \<Rightarrow> ('a \<Rightarrow> 'v) \<Rightarrow> 'v" where
   23.38  "value (Cex v) env = v" |
   23.39  "value (Vex a) env = env a" |
   23.40  "value (Bex f e1 e2) env = f (value e1 env) (value e2 env)"
   23.41  
   23.42 -text{*
   23.43 +text\<open>
   23.44  The stack machine has three instructions: load a constant value onto the
   23.45  stack, load the contents of an address onto the stack, and apply a
   23.46  binary operation to the two topmost elements of the stack, replacing them by
   23.47  the result. As for @{text"expr"}, addresses and values are type parameters:
   23.48 -*}
   23.49 +\<close>
   23.50  
   23.51  datatype (dead 'a, 'v) instr = Const 'v
   23.52                         | Load 'a
   23.53                         | Apply "'v binop"
   23.54  
   23.55 -text{*
   23.56 +text\<open>
   23.57  The execution of the stack machine is modelled by a function
   23.58  @{text"exec"} that takes a list of instructions, a store (modelled as a
   23.59  function from addresses to values, just like the environment for
   23.60  evaluating expressions), and a stack (modelled as a list) of values,
   23.61  and returns the stack at the end of the execution --- the store remains
   23.62  unchanged:
   23.63 -*}
   23.64 +\<close>
   23.65  
   23.66  primrec exec :: "('a,'v)instr list \<Rightarrow> ('a\<Rightarrow>'v) \<Rightarrow> 'v list \<Rightarrow> 'v list"
   23.67  where
   23.68 @@ -61,7 +61,7 @@
   23.69    | Load a   \<Rightarrow> exec is s ((s a)#vs)
   23.70    | Apply f  \<Rightarrow> exec is s ((f (hd vs) (hd(tl vs)))#(tl(tl vs))))"
   23.71  
   23.72 -text{*\noindent
   23.73 +text\<open>\noindent
   23.74  Recall that @{term"hd"} and @{term"tl"}
   23.75  return the first element and the remainder of a list.
   23.76  Because all functions are total, \cdx{hd} is defined even for the empty
   23.77 @@ -72,54 +72,54 @@
   23.78  
   23.79  The compiler is a function from expressions to a list of instructions. Its
   23.80  definition is obvious:
   23.81 -*}
   23.82 +\<close>
   23.83  
   23.84  primrec compile :: "('a,'v)expr \<Rightarrow> ('a,'v)instr list" where
   23.85  "compile (Cex v)       = [Const v]" |
   23.86  "compile (Vex a)       = [Load a]" |
   23.87  "compile (Bex f e1 e2) = (compile e2) @ (compile e1) @ [Apply f]"
   23.88  
   23.89 -text{*
   23.90 +text\<open>
   23.91  Now we have to prove the correctness of the compiler, i.e.\ that the
   23.92  execution of a compiled expression results in the value of the expression:
   23.93 -*}
   23.94 +\<close>
   23.95  theorem "exec (compile e) s [] = [value e s]"
   23.96  (*<*)oops(*>*)
   23.97 -text{*\noindent
   23.98 +text\<open>\noindent
   23.99  This theorem needs to be generalized:
  23.100 -*}
  23.101 +\<close>
  23.102  
  23.103  theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
  23.104  
  23.105 -txt{*\noindent
  23.106 +txt\<open>\noindent
  23.107  It will be proved by induction on @{term"e"} followed by simplification.  
  23.108  First, we must prove a lemma about executing the concatenation of two
  23.109  instruction sequences:
  23.110 -*}
  23.111 +\<close>
  23.112  (*<*)oops(*>*)
  23.113  lemma exec_app[simp]:
  23.114    "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
  23.115  
  23.116 -txt{*\noindent
  23.117 +txt\<open>\noindent
  23.118  This requires induction on @{term"xs"} and ordinary simplification for the
  23.119  base cases. In the induction step, simplification leaves us with a formula
  23.120  that contains two @{text"case"}-expressions over instructions. Thus we add
  23.121  automatic case splitting, which finishes the proof:
  23.122 -*}
  23.123 +\<close>
  23.124  apply(induct_tac xs, simp, simp split: instr.split)
  23.125  (*<*)done(*>*)
  23.126 -text{*\noindent
  23.127 +text\<open>\noindent
  23.128  Note that because both \methdx{simp_all} and \methdx{auto} perform simplification, they can
  23.129  be modified in the same way as @{text simp}.  Thus the proof can be
  23.130  rewritten as
  23.131 -*}
  23.132 +\<close>
  23.133  (*<*)
  23.134  declare exec_app[simp del]
  23.135  lemma [simp]: "\<forall>vs. exec (xs@ys) s vs = exec ys s (exec xs s vs)" 
  23.136  (*>*)
  23.137  apply(induct_tac xs, simp_all split: instr.split)
  23.138  (*<*)done(*>*)
  23.139 -text{*\noindent
  23.140 +text\<open>\noindent
  23.141  Although this is more compact, it is less clear for the reader of the proof.
  23.142  
  23.143  We could now go back and prove @{prop"exec (compile e) s [] = [value e s]"}
  23.144 @@ -127,7 +127,7 @@
  23.145  However, this is unnecessary because the generalized version fully subsumes
  23.146  its instance.%
  23.147  \index{compiling expressions example|)}
  23.148 -*}
  23.149 +\<close>
  23.150  (*<*)
  23.151  theorem "\<forall>vs. exec (compile e) s vs = (value e s) # vs"
  23.152  by(induct_tac e, auto)
    24.1 --- a/src/Doc/Tutorial/Datatype/ABexpr.thy	Thu Jan 11 13:48:17 2018 +0100
    24.2 +++ b/src/Doc/Tutorial/Datatype/ABexpr.thy	Fri Jan 12 14:08:53 2018 +0100
    24.3 @@ -2,7 +2,7 @@
    24.4  theory ABexpr imports Main begin
    24.5  (*>*)
    24.6  
    24.7 -text{*
    24.8 +text\<open>
    24.9  \index{datatypes!mutually recursive}%
   24.10  Sometimes it is necessary to define two datatypes that depend on each
   24.11  other. This is called \textbf{mutual recursion}. As an example consider a
   24.12 @@ -15,7 +15,7 @@
   24.13    comparisons like ``$m<n$''.
   24.14  \end{itemize}
   24.15  In Isabelle this becomes
   24.16 -*}
   24.17 +\<close>
   24.18  
   24.19  datatype 'a aexp = IF   "'a bexp" "'a aexp" "'a aexp"
   24.20                   | Sum  "'a aexp" "'a aexp"
   24.21 @@ -26,14 +26,14 @@
   24.22                   | And  "'a bexp" "'a bexp"
   24.23                   | Neg  "'a bexp"
   24.24  
   24.25 -text{*\noindent
   24.26 +text\<open>\noindent
   24.27  Type @{text"aexp"} is similar to @{text"expr"} in \S\ref{sec:ExprCompiler},
   24.28  except that we have added an @{text IF} constructor,
   24.29  fixed the values to be of type @{typ"nat"} and declared the two binary
   24.30  operations @{text Sum} and @{term"Diff"}.  Boolean
   24.31  expressions can be arithmetic comparisons, conjunctions and negations.
   24.32  The semantics is given by two evaluation functions:
   24.33 -*}
   24.34 +\<close>
   24.35  
   24.36  primrec evala :: "'a aexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> nat" and
   24.37           evalb :: "'a bexp \<Rightarrow> ('a \<Rightarrow> nat) \<Rightarrow> bool" where
   24.38 @@ -48,7 +48,7 @@
   24.39  "evalb (And b1 b2) env = (evalb b1 env \<and> evalb b2 env)" |
   24.40  "evalb (Neg b) env = (\<not> evalb b env)"
   24.41  
   24.42 -text{*\noindent
   24.43 +text\<open>\noindent
   24.44  
   24.45  Both take an expression and an environment (a mapping from variables
   24.46  @{typ"'a"} to values @{typ"nat"}) and return its arithmetic/boolean
   24.47 @@ -60,7 +60,7 @@
   24.48  the empty line is purely for readability.
   24.49  
   24.50  In the same fashion we also define two functions that perform substitution:
   24.51 -*}
   24.52 +\<close>
   24.53  
   24.54  primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp" and
   24.55           substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp" where
   24.56 @@ -75,7 +75,7 @@
   24.57  "substb s (And b1 b2) = And (substb s b1) (substb s b2)" |
   24.58  "substb s (Neg b) = Neg (substb s b)"
   24.59  
   24.60 -text{*\noindent
   24.61 +text\<open>\noindent
   24.62  Their first argument is a function mapping variables to expressions, the
   24.63  substitution. It is applied to all variables in the second argument. As a
   24.64  result, the type of variables in the expression may change from @{typ"'a"}
   24.65 @@ -89,19 +89,19 @@
   24.66  boolean expressions (by induction), you find that you always need the other
   24.67  theorem in the induction step. Therefore you need to state and prove both
   24.68  theorems simultaneously:
   24.69 -*}
   24.70 +\<close>
   24.71  
   24.72  lemma "evala (substa s a) env = evala a (\<lambda>x. evala (s x) env) \<and>
   24.73          evalb (substb s b) env = evalb b (\<lambda>x. evala (s x) env)"
   24.74  apply(induct_tac a and b)
   24.75  
   24.76 -txt{*\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
   24.77 -*}
   24.78 +txt\<open>\noindent The resulting 8 goals (one for each constructor) are proved in one fell swoop:
   24.79 +\<close>
   24.80  
   24.81  apply simp_all
   24.82  (*<*)done(*>*)
   24.83  
   24.84 -text{*
   24.85 +text\<open>
   24.86  In general, given $n$ mutually recursive datatypes $\tau@1$, \dots, $\tau@n$,
   24.87  an inductive proof expects a goal of the form
   24.88  \[ P@1(x@1)\ \land \dots \land P@n(x@n) \]
   24.89 @@ -121,7 +121,7 @@
   24.90    it.  ({\em Hint:} proceed as in \S\ref{sec:boolex} and read the discussion
   24.91    of type annotations following lemma @{text subst_id} below).
   24.92  \end{exercise}
   24.93 -*}
   24.94 +\<close>
   24.95  (*<*)
   24.96  primrec norma :: "'a aexp \<Rightarrow> 'a aexp" and
   24.97          normb :: "'a bexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp \<Rightarrow> 'a aexp" where
    25.1 --- a/src/Doc/Tutorial/Datatype/Fundata.thy	Thu Jan 11 13:48:17 2018 +0100
    25.2 +++ b/src/Doc/Tutorial/Datatype/Fundata.thy	Fri Jan 12 14:08:53 2018 +0100
    25.3 @@ -3,7 +3,7 @@
    25.4  (*>*)
    25.5  datatype (dead 'a,'i) bigtree = Tip | Br 'a "'i \<Rightarrow> ('a,'i)bigtree"
    25.6  
    25.7 -text{*\noindent
    25.8 +text\<open>\noindent
    25.9  Parameter @{typ"'a"} is the type of values stored in
   25.10  the @{term Br}anches of the tree, whereas @{typ"'i"} is the index
   25.11  type over which the tree branches. If @{typ"'i"} is instantiated to
   25.12 @@ -17,14 +17,14 @@
   25.13  has merely @{term"Tip"}s as further subtrees.
   25.14  
   25.15  Function @{term"map_bt"} applies a function to all labels in a @{text"bigtree"}:
   25.16 -*}
   25.17 +\<close>
   25.18  
   25.19  primrec map_bt :: "('a \<Rightarrow> 'b) \<Rightarrow> ('a,'i)bigtree \<Rightarrow> ('b,'i)bigtree"
   25.20  where
   25.21  "map_bt f Tip      = Tip" |
   25.22  "map_bt f (Br a F) = Br (f a) (\<lambda>i. map_bt f (F i))"
   25.23  
   25.24 -text{*\noindent This is a valid \isacommand{primrec} definition because the
   25.25 +text\<open>\noindent This is a valid \isacommand{primrec} definition because the
   25.26  recursive calls of @{term"map_bt"} involve only subtrees of
   25.27  @{term"F"}, which is itself a subterm of the left-hand side. Thus termination
   25.28  is assured.  The seasoned functional programmer might try expressing
   25.29 @@ -32,18 +32,18 @@
   25.30  however will reject.  Applying @{term"map_bt"} to only one of its arguments
   25.31  makes the termination proof less obvious.
   25.32  
   25.33 -The following lemma has a simple proof by induction:  *}
   25.34 +The following lemma has a simple proof by induction:\<close>
   25.35  
   25.36  lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
   25.37  apply(induct_tac T, simp_all)
   25.38  done
   25.39  (*<*)lemma "map_bt (g o f) T = map_bt g (map_bt f T)"
   25.40  apply(induct_tac T, rename_tac[2] F)(*>*)
   25.41 -txt{*\noindent
   25.42 +txt\<open>\noindent
   25.43  Because of the function type, the proof state after induction looks unusual.
   25.44  Notice the quantified induction hypothesis:
   25.45  @{subgoals[display,indent=0]}
   25.46 -*}
   25.47 +\<close>
   25.48  (*<*)
   25.49  oops
   25.50  end
    26.1 --- a/src/Doc/Tutorial/Datatype/Nested.thy	Thu Jan 11 13:48:17 2018 +0100
    26.2 +++ b/src/Doc/Tutorial/Datatype/Nested.thy	Fri Jan 12 14:08:53 2018 +0100
    26.3 @@ -2,7 +2,7 @@
    26.4  theory Nested imports ABexpr begin
    26.5  (*>*)
    26.6  
    26.7 -text{*
    26.8 +text\<open>
    26.9  \index{datatypes!and nested recursion}%
   26.10  So far, all datatypes had the property that on the right-hand side of their
   26.11  definition they occurred only at the top-level: directly below a
   26.12 @@ -10,11 +10,11 @@
   26.13  datatype occurs nested in some other datatype (but not inside itself!).
   26.14  Consider the following model of terms
   26.15  where function symbols can be applied to a list of arguments:
   26.16 -*}
   26.17 +\<close>
   26.18  (*<*)hide_const Var(*>*)
   26.19  datatype ('v,'f)"term" = Var 'v | App 'f "('v,'f)term list"
   26.20  
   26.21 -text{*\noindent
   26.22 +text\<open>\noindent
   26.23  Note that we need to quote @{text term} on the left to avoid confusion with
   26.24  the Isabelle command \isacommand{term}.
   26.25  Parameter @{typ"'v"} is the type of variables and @{typ"'f"} the type of
   26.26 @@ -41,7 +41,7 @@
   26.27  
   26.28  Let us define a substitution function on terms. Because terms involve term
   26.29  lists, we need to define two substitution functions simultaneously:
   26.30 -*}
   26.31 +\<close>
   26.32  
   26.33  primrec
   26.34  subst :: "('v\<Rightarrow>('v,'f)term) \<Rightarrow> ('v,'f)term      \<Rightarrow> ('v,'f)term" and
   26.35 @@ -54,7 +54,7 @@
   26.36  "substs s [] = []" |
   26.37  "substs s (t # ts) = subst s t # substs s ts"
   26.38  
   26.39 -text{*\noindent
   26.40 +text\<open>\noindent
   26.41  Individual equations in a \commdx{primrec} definition may be
   26.42  named as shown for @{thm[source]subst_App}.
   26.43  The significance of this device will become apparent below.
   26.44 @@ -63,14 +63,14 @@
   26.45  to prove a related statement about term lists simultaneously. For example,
   26.46  the fact that the identity substitution does not change a term needs to be
   26.47  strengthened and proved as follows:
   26.48 -*}
   26.49 +\<close>
   26.50  
   26.51  lemma subst_id(*<*)(*referred to from ABexpr*)(*>*): "subst  Var t  = (t ::('v,'f)term)  \<and>
   26.52                    substs Var ts = (ts::('v,'f)term list)"
   26.53  apply(induct_tac t and ts rule: subst.induct substs.induct, simp_all)
   26.54  done
   26.55  
   26.56 -text{*\noindent
   26.57 +text\<open>\noindent
   26.58  Note that @{term Var} is the identity substitution because by definition it
   26.59  leaves variables unchanged: @{prop"subst Var (Var x) = Var x"}. Note also
   26.60  that the type annotations are necessary because otherwise there is nothing in
   26.61 @@ -100,7 +100,7 @@
   26.62  @{text"map f [x1,...,xn] = [f x1,...,f xn]"}. This is true, but Isabelle
   26.63  insists on the conjunctive format. Fortunately, we can easily \emph{prove}
   26.64  that the suggested equation holds:
   26.65 -*}
   26.66 +\<close>
   26.67  (*<*)
   26.68  (* Exercise 1: *)
   26.69  lemma "subst  ((subst f) \<circ> g) t  = subst  f (subst g t) \<and>
   26.70 @@ -133,14 +133,14 @@
   26.71  apply(induct_tac ts, simp_all)
   26.72  done
   26.73  
   26.74 -text{*\noindent
   26.75 +text\<open>\noindent
   26.76  What is more, we can now disable the old defining equation as a
   26.77  simplification rule:
   26.78 -*}
   26.79 +\<close>
   26.80  
   26.81  declare subst_App [simp del]
   26.82  
   26.83 -text{*\noindent The advantage is that now we have replaced @{const
   26.84 +text\<open>\noindent The advantage is that now we have replaced @{const
   26.85  substs} by @{const map}, we can profit from the large number of
   26.86  pre-proved lemmas about @{const map}.  Unfortunately, inductive proofs
   26.87  about type @{text term} are still awkward because they expect a
   26.88 @@ -155,5 +155,5 @@
   26.89  Of course, you may also combine mutual and nested recursion of datatypes. For example,
   26.90  constructor @{text Sum} in \S\ref{sec:datatype-mut-rec} could take a list of
   26.91  expressions as its argument: @{text Sum}~@{typ[quotes]"'a aexp list"}.
   26.92 -*}
   26.93 +\<close>
   26.94  (*<*)end(*>*)
    27.1 --- a/src/Doc/Tutorial/Documents/Documents.thy	Thu Jan 11 13:48:17 2018 +0100
    27.2 +++ b/src/Doc/Tutorial/Documents/Documents.thy	Fri Jan 12 14:08:53 2018 +0100
    27.3 @@ -2,9 +2,9 @@
    27.4  theory Documents imports Main begin
    27.5  (*>*)
    27.6  
    27.7 -section {* Concrete Syntax \label{sec:concrete-syntax} *}
    27.8 +section \<open>Concrete Syntax \label{sec:concrete-syntax}\<close>
    27.9  
   27.10 -text {*
   27.11 +text \<open>
   27.12    The core concept of Isabelle's framework for concrete syntax is that
   27.13    of \bfindex{mixfix annotations}.  Associated with any kind of
   27.14    constant declaration, mixfixes affect both the grammar productions
   27.15 @@ -19,12 +19,12 @@
   27.16  
   27.17    Below we introduce a few simple syntax declaration
   27.18    forms that already cover many common situations fairly well.
   27.19 -*}
   27.20 +\<close>
   27.21  
   27.22  
   27.23 -subsection {* Infix Annotations *}
   27.24 +subsection \<open>Infix Annotations\<close>
   27.25  
   27.26 -text {*
   27.27 +text \<open>
   27.28    Syntax annotations may be included wherever constants are declared,
   27.29    such as \isacommand{definition} and \isacommand{primrec} --- and also
   27.30    \isacommand{datatype}, which declares constructor operations.
   27.31 @@ -35,12 +35,12 @@
   27.32    Infix declarations\index{infix annotations} provide a useful special
   27.33    case of mixfixes.  The following example of the exclusive-or
   27.34    operation on boolean values illustrates typical infix declarations.
   27.35 -*}
   27.36 +\<close>
   27.37  
   27.38  definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]" 60)
   27.39  where "A [+] B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
   27.40  
   27.41 -text {*
   27.42 +text \<open>
   27.43    \noindent Now @{text "xor A B"} and @{text "A [+] B"} refer to the
   27.44    same expression internally.  Any curried function with at least two
   27.45    arguments may be given infix syntax.  For partial applications with
   27.46 @@ -75,12 +75,12 @@
   27.47    below 50; algebraic ones (like @{text "+"} and @{text "*"}) are
   27.48    above 50.  User syntax should strive to coexist with common HOL
   27.49    forms, or use the mostly unused range 100--900.
   27.50 -*}
   27.51 +\<close>
   27.52  
   27.53  
   27.54 -subsection {* Mathematical Symbols \label{sec:syntax-symbols} *}
   27.55 +subsection \<open>Mathematical Symbols \label{sec:syntax-symbols}\<close>
   27.56  
   27.57 -text {*
   27.58 +text \<open>
   27.59    Concrete syntax based on ASCII characters has inherent limitations.
   27.60    Mathematical notation demands a larger repertoire of glyphs.
   27.61    Several standards of extended character sets have been proposed over
   27.62 @@ -133,39 +133,39 @@
   27.63  
   27.64    Replacing our previous definition of @{text xor} by the
   27.65    following specifies an Isabelle symbol for the new operator:
   27.66 -*}
   27.67 +\<close>
   27.68  
   27.69  (*<*)
   27.70  hide_const xor
   27.71 -setup {* Sign.add_path "version1" *}
   27.72 +setup \<open>Sign.add_path "version1"\<close>
   27.73  (*>*)
   27.74  definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "\<oplus>" 60)
   27.75  where "A \<oplus> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
   27.76  (*<*)
   27.77 -setup {* Sign.local_path *}
   27.78 +setup \<open>Sign.local_path\<close>
   27.79  (*>*)
   27.80  
   27.81 -text {*
   27.82 +text \<open>
   27.83    It is possible to provide alternative syntax forms
   27.84    through the \bfindex{print mode} concept~@{cite "isabelle-isar-ref"}.  By
   27.85    convention, the mode of ``$xsymbols$'' is enabled whenever
   27.86    Proof~General's X-Symbol mode or {\LaTeX} output is active.  Now
   27.87    consider the following hybrid declaration of @{text xor}:
   27.88 -*}
   27.89 +\<close>
   27.90  
   27.91  (*<*)
   27.92  hide_const xor
   27.93 -setup {* Sign.add_path "version2" *}
   27.94 +setup \<open>Sign.add_path "version2"\<close>
   27.95  (*>*)
   27.96  definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool"    (infixl "[+]\<ignore>" 60)
   27.97  where "A [+]\<ignore> B \<equiv> (A \<and> \<not> B) \<or> (\<not> A \<and> B)"
   27.98  
   27.99  notation (xsymbols) xor (infixl "\<oplus>\<ignore>" 60)
  27.100  (*<*)
  27.101 -setup {* Sign.local_path *}
  27.102 +setup \<open>Sign.local_path\<close>
  27.103  (*>*)
  27.104  
  27.105 -text {*\noindent
  27.106 +text \<open>\noindent
  27.107  The \commdx{notation} command associates a mixfix
  27.108  annotation with a known constant.  The print mode specification,
  27.109  here @{text "(xsymbols)"}, is optional.
  27.110 @@ -174,17 +174,17 @@
  27.111  output uses the nicer syntax of $xsymbols$ whenever that print mode is
  27.112  active.  Such an arrangement is particularly useful for interactive
  27.113  development, where users may type ASCII text and see mathematical
  27.114 -symbols displayed during proofs.  *}
  27.115 +symbols displayed during proofs.\<close>
  27.116  
  27.117  
  27.118 -subsection {* Prefix Annotations *}
  27.119 +subsection \<open>Prefix Annotations\<close>
  27.120  
  27.121 -text {*
  27.122 +text \<open>
  27.123    Prefix syntax annotations\index{prefix annotation} are another form
  27.124    of mixfixes @{cite "isabelle-isar-ref"}, without any template arguments or
  27.125    priorities --- just some literal syntax.  The following example
  27.126    associates common symbols with the constructors of a datatype.
  27.127 -*}
  27.128 +\<close>
  27.129  
  27.130  datatype currency =
  27.131      Euro nat    ("\<euro>")
  27.132 @@ -192,7 +192,7 @@
  27.133    | Yen nat     ("\<yen>")
  27.134    | Dollar nat  ("$")
  27.135  
  27.136 -text {*
  27.137 +text \<open>
  27.138    \noindent Here the mixfix annotations on the rightmost column happen
  27.139    to consist of a single Isabelle symbol each: \verb,\,\verb,<euro>,,
  27.140    \verb,\,\verb,<pounds>,, \verb,\,\verb,<yen>,, and \verb,$,.  Recall
  27.141 @@ -204,12 +204,12 @@
  27.142    Commission.
  27.143  
  27.144    Prefix syntax works the same way for other commands that introduce new constants, e.g. \isakeyword{primrec}.
  27.145 -*}
  27.146 +\<close>
  27.147  
  27.148  
  27.149 -subsection {* Abbreviations \label{sec:abbreviations} *}
  27.150 +subsection \<open>Abbreviations \label{sec:abbreviations}\<close>
  27.151  
  27.152 -text{* Mixfix syntax annotations merely decorate particular constant
  27.153 +text\<open>Mixfix syntax annotations merely decorate particular constant
  27.154  application forms with concrete syntax, for instance replacing
  27.155  @{text "xor A B"} by @{text "A \<oplus> B"}.  Occasionally, the relationship
  27.156  between some piece of notation and its internal form is more
  27.157 @@ -223,12 +223,12 @@
  27.158  A typical use of abbreviations is to introduce relational notation for
  27.159  membership in a set of pairs, replacing @{text "(x, y) \<in> sim"} by
  27.160  @{text "x \<approx> y"}. We assume that a constant @{text sim } of type
  27.161 -@{typ"('a \<times> 'a) set"} has been introduced at this point. *}
  27.162 +@{typ"('a \<times> 'a) set"} has been introduced at this point.\<close>
  27.163  (*<*)consts sim :: "('a \<times> 'a) set"(*>*)
  27.164  abbreviation sim2 :: "'a \<Rightarrow> 'a \<Rightarrow> bool"   (infix "\<approx>" 50)
  27.165  where "x \<approx> y  \<equiv>  (x, y) \<in> sim"
  27.166  
  27.167 -text {* \noindent The given meta-equality is used as a rewrite rule
  27.168 +text \<open>\noindent The given meta-equality is used as a rewrite rule
  27.169  after parsing (replacing \mbox{@{prop"x \<approx> y"}} by @{text"(x,y) \<in>
  27.170  sim"}) and before printing (turning @{text"(x,y) \<in> sim"} back into
  27.171  \mbox{@{prop"x \<approx> y"}}). The name of the dummy constant @{text "sim2"}
  27.172 @@ -238,14 +238,14 @@
  27.173  provide variant versions of fundamental relational expressions, such
  27.174  as @{text \<noteq>} for negated equalities.  The following declaration
  27.175  stems from Isabelle/HOL itself:
  27.176 -*}
  27.177 +\<close>
  27.178  
  27.179  abbreviation not_equal :: "'a \<Rightarrow> 'a \<Rightarrow> bool"    (infixl "~=\<ignore>" 50)
  27.180  where "x ~=\<ignore> y  \<equiv>  \<not> (x = y)"
  27.181  
  27.182  notation (xsymbols) not_equal (infix "\<noteq>\<ignore>" 50)
  27.183  
  27.184 -text {* \noindent The notation @{text \<noteq>} is introduced separately to restrict it
  27.185 +text \<open>\noindent The notation @{text \<noteq>} is introduced separately to restrict it
  27.186  to the \emph{xsymbols} mode.
  27.187  
  27.188  Abbreviations are appropriate when the defined concept is a
  27.189 @@ -257,12 +257,12 @@
  27.190  Abbreviations are a simplified form of the general concept of
  27.191  \emph{syntax translations}; even heavier transformations may be
  27.192  written in ML @{cite "isabelle-isar-ref"}.
  27.193 -*}
  27.194 +\<close>
  27.195  
  27.196  
  27.197 -section {* Document Preparation \label{sec:document-preparation} *}
  27.198 +section \<open>Document Preparation \label{sec:document-preparation}\<close>
  27.199  
  27.200 -text {*
  27.201 +text \<open>
  27.202    Isabelle/Isar is centered around the concept of \bfindex{formal
  27.203    proof documents}\index{documents|bold}.  The outcome of a formal
  27.204    development effort is meant to be a human-readable record, presented
  27.205 @@ -279,27 +279,27 @@
  27.206  
  27.207    Here is an example to illustrate the idea of Isabelle document
  27.208    preparation.
  27.209 -*}
  27.210 +\<close>
  27.211  
  27.212 -text_raw {* \begin{quotation} *}
  27.213 +text_raw \<open>\begin{quotation}\<close>
  27.214  
  27.215 -text {*
  27.216 +text \<open>
  27.217    The following datatype definition of @{text "'a bintree"} models
  27.218    binary trees with nodes being decorated by elements of type @{typ
  27.219    'a}.
  27.220 -*}
  27.221 +\<close>
  27.222  
  27.223  datatype 'a bintree =
  27.224       Leaf | Branch 'a  "'a bintree"  "'a bintree"
  27.225  
  27.226 -text {*
  27.227 +text \<open>
  27.228    \noindent The datatype induction rule generated here is of the form
  27.229    @{thm [indent = 1, display] bintree.induct [no_vars]}
  27.230 -*}
  27.231 +\<close>
  27.232  
  27.233 -text_raw {* \end{quotation} *}
  27.234 +text_raw \<open>\end{quotation}\<close>
  27.235  
  27.236 -text {*
  27.237 +text \<open>
  27.238    \noindent The above document output has been produced as follows:
  27.239  
  27.240    \begin{ttbox}
  27.241 @@ -324,12 +324,12 @@
  27.242    to formal entities by means of ``antiquotations'' (such as
  27.243    \texttt{\at}\verb,{text "'a bintree"}, or
  27.244    \texttt{\at}\verb,{typ 'a},), see also \S\ref{sec:doc-prep-text}.
  27.245 -*}
  27.246 +\<close>
  27.247  
  27.248  
  27.249 -subsection {* Isabelle Sessions *}
  27.250 +subsection \<open>Isabelle Sessions\<close>
  27.251  
  27.252 -text {*
  27.253 +text \<open>
  27.254    In contrast to the highly interactive mode of Isabelle/Isar theory
  27.255    development, the document preparation stage essentially works in
  27.256    batch-mode.  An Isabelle \bfindex{session} consists of a collection
  27.257 @@ -412,12 +412,12 @@
  27.258    Isabelle batch session leaves the generated sources in their target
  27.259    location, identified by the accompanying error message.  This lets
  27.260    you trace {\LaTeX} problems with the generated files at hand.
  27.261 -*}
  27.262 +\<close>
  27.263  
  27.264  
  27.265 -subsection {* Structure Markup *}
  27.266 +subsection \<open>Structure Markup\<close>
  27.267  
  27.268 -text {*
  27.269 +text \<open>
  27.270    The large-scale structure of Isabelle documents follows existing
  27.271    {\LaTeX} conventions, with chapters, sections, subsubsections etc.
  27.272    The Isar language includes separate \bfindex{markup commands}, which
  27.273 @@ -460,12 +460,12 @@
  27.274  
  27.275    end
  27.276    \end{ttbox}
  27.277 -*}
  27.278 +\<close>
  27.279  
  27.280  
  27.281 -subsection {* Formal Comments and Antiquotations \label{sec:doc-prep-text} *}
  27.282 +subsection \<open>Formal Comments and Antiquotations \label{sec:doc-prep-text}\<close>
  27.283  
  27.284 -text {*
  27.285 +text \<open>
  27.286    Isabelle \bfindex{source comments}, which are of the form
  27.287    \verb,(,\verb,*,~@{text \<dots>}~\verb,*,\verb,),, essentially act like
  27.288    white space and do not really contribute to the content.  They
  27.289 @@ -481,14 +481,14 @@
  27.290    \verb,{,\verb,*,~@{text \<dots>}~\verb,*,\verb,}, as before.  Multiple
  27.291    marginal comments may be given at the same time.  Here is a simple
  27.292    example:
  27.293 -*}
  27.294 +\<close>
  27.295  
  27.296  lemma "A --> A"
  27.297 -  -- "a triviality of propositional logic"
  27.298 -  -- "(should not really bother)"
  27.299 -  by (rule impI) -- "implicit assumption step involved here"
  27.300 +  \<comment> "a triviality of propositional logic"
  27.301 +  \<comment> "(should not really bother)"
  27.302 +  by (rule impI) \<comment> "implicit assumption step involved here"
  27.303  
  27.304 -text {*
  27.305 +text \<open>
  27.306    \noindent The above output has been produced as follows:
  27.307  
  27.308  \begin{verbatim}
  27.309 @@ -593,12 +593,12 @@
  27.310    document very easily, independently of the term language of
  27.311    Isabelle.  Manual {\LaTeX} code would leave more control over the
  27.312    typesetting, but is also slightly more tedious.
  27.313 -*}
  27.314 +\<close>
  27.315  
  27.316  
  27.317 -subsection {* Interpretation of Symbols \label{sec:doc-prep-symbols} *}
  27.318 +subsection \<open>Interpretation of Symbols \label{sec:doc-prep-symbols}\<close>
  27.319  
  27.320 -text {*
  27.321 +text \<open>
  27.322    As has been pointed out before (\S\ref{sec:syntax-symbols}),
  27.323    Isabelle symbols are the smallest syntactic entities --- a
  27.324    straightforward generalization of ASCII characters.  While Isabelle
  27.325 @@ -640,12 +640,12 @@
  27.326    quotes are not printed at all.  The resulting quality of typesetting
  27.327    is quite good, so this should be the default style for work that
  27.328    gets distributed to a broader audience.
  27.329 -*}
  27.330 +\<close>
  27.331  
  27.332  
  27.333 -subsection {* Suppressing Output \label{sec:doc-prep-suppress} *}
  27.334 +subsection \<open>Suppressing Output \label{sec:doc-prep-suppress}\<close>
  27.335  
  27.336 -text {*
  27.337 +text \<open>
  27.338    By default, Isabelle's document system generates a {\LaTeX} file for
  27.339    each theory that gets loaded while running the session.  The
  27.340    generated \texttt{session.tex} will include all of these in order of
  27.341 @@ -683,11 +683,11 @@
  27.342    commands involving ML code).  Users may add their own tags using the
  27.343    \verb,%,\emph{tag} notation right after a command name.  In the
  27.344    subsequent example we hide a particularly irrelevant proof:
  27.345 -*}
  27.346 +\<close>
  27.347  
  27.348  lemma "x = x" by %invisible (simp)
  27.349  
  27.350 -text {*
  27.351 +text \<open>
  27.352    The original source has been ``\verb,lemma "x = x" by %invisible (simp),''.
  27.353    Tags observe the structure of proofs; adjacent commands with the
  27.354    same tag are joined into a single region.  The Isabelle document
  27.355 @@ -705,12 +705,12 @@
  27.356    of the theory, of course.  For example, we may hide parts of a proof
  27.357    that seem unfit for general public inspection.  The following
  27.358    ``fully automatic'' proof is actually a fake:
  27.359 -*}
  27.360 +\<close>
  27.361  
  27.362  lemma "x \<noteq> (0::int) \<Longrightarrow> 0 < x * x"
  27.363    by (auto(*<*)simp add: zero_less_mult_iff(*>*))
  27.364  
  27.365 -text {*
  27.366 +text \<open>
  27.367    \noindent The real source of the proof has been as follows:
  27.368  
  27.369  \begin{verbatim}
  27.370 @@ -722,7 +722,7 @@
  27.371    should not misrepresent the underlying theory development.  It is
  27.372    easy to invalidate the visible text by hiding references to
  27.373    questionable axioms, for example.
  27.374 -*}
  27.375 +\<close>
  27.376  
  27.377  (*<*)
  27.378  end
    28.1 --- a/src/Doc/Tutorial/Fun/fun0.thy	Thu Jan 11 13:48:17 2018 +0100
    28.2 +++ b/src/Doc/Tutorial/Fun/fun0.thy	Fri Jan 12 14:08:53 2018 +0100
    28.3 @@ -2,19 +2,19 @@
    28.4  theory fun0 imports Main begin
    28.5  (*>*)
    28.6  
    28.7 -text{*
    28.8 +text\<open>
    28.9  \subsection{Definition}
   28.10  \label{sec:fun-examples}
   28.11  
   28.12  Here is a simple example, the \rmindex{Fibonacci function}:
   28.13 -*}
   28.14 +\<close>
   28.15  
   28.16  fun fib :: "nat \<Rightarrow> nat" where
   28.17  "fib 0 = 0" |
   28.18  "fib (Suc 0) = 1" |
   28.19  "fib (Suc(Suc x)) = fib x + fib (Suc x)"
   28.20  
   28.21 -text{*\noindent
   28.22 +text\<open>\noindent
   28.23  This resembles ordinary functional programming languages. Note the obligatory
   28.24  \isacommand{where} and \isa{|}. Command \isacommand{fun} declares and
   28.25  defines the function in one go. Isabelle establishes termination automatically
   28.26 @@ -22,35 +22,35 @@
   28.27  
   28.28  Slightly more interesting is the insertion of a fixed element
   28.29  between any two elements of a list:
   28.30 -*}
   28.31 +\<close>
   28.32  
   28.33  fun sep :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
   28.34  "sep a []     = []" |
   28.35  "sep a [x]    = [x]" |
   28.36  "sep a (x#y#zs) = x # a # sep a (y#zs)"
   28.37  
   28.38 -text{*\noindent
   28.39 +text\<open>\noindent
   28.40  This time the length of the list decreases with the
   28.41  recursive call; the first argument is irrelevant for termination.
   28.42  
   28.43  Pattern matching\index{pattern matching!and \isacommand{fun}}
   28.44  need not be exhaustive and may employ wildcards:
   28.45 -*}
   28.46 +\<close>
   28.47  
   28.48  fun last :: "'a list \<Rightarrow> 'a" where
   28.49  "last [x]      = x" |
   28.50  "last (_#y#zs) = last (y#zs)"
   28.51  
   28.52 -text{*
   28.53 +text\<open>
   28.54  Overlapping patterns are disambiguated by taking the order of equations into
   28.55  account, just as in functional programming:
   28.56 -*}
   28.57 +\<close>
   28.58  
   28.59  fun sep1 :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
   28.60  "sep1 a (x#y#zs) = x # a # sep1 a (y#zs)" |
   28.61  "sep1 _ xs       = xs"
   28.62  
   28.63 -text{*\noindent
   28.64 +text\<open>\noindent
   28.65  To guarantee that the second equation can only be applied if the first
   28.66  one does not match, Isabelle internally replaces the second equation
   28.67  by the two possibilities that are left: @{prop"sep1 a [] = []"} and
   28.68 @@ -59,13 +59,13 @@
   28.69  
   28.70  Because of its pattern matching syntax, \isacommand{fun} is also useful
   28.71  for the definition of non-recursive functions:
   28.72 -*}
   28.73 +\<close>
   28.74  
   28.75  fun swap12 :: "'a list \<Rightarrow> 'a list" where
   28.76  "swap12 (x#y#zs) = y#x#zs" |
   28.77  "swap12 zs       = zs"
   28.78  
   28.79 -text{*
   28.80 +text\<open>
   28.81  After a function~$f$ has been defined via \isacommand{fun},
   28.82  its defining equations (or variants derived from them) are available
   28.83  under the name $f$@{text".simps"} as theorems.
   28.84 @@ -87,14 +87,14 @@
   28.85  More generally, \isacommand{fun} allows any \emph{lexicographic
   28.86  combination} of size measures in case there are multiple
   28.87  arguments. For example, the following version of \rmindex{Ackermann's
   28.88 -function} is accepted: *}
   28.89 +function} is accepted:\<close>
   28.90  
   28.91  fun ack2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
   28.92  "ack2 n 0 = Suc n" |
   28.93  "ack2 0 (Suc m) = ack2 (Suc 0) m" |
   28.94  "ack2 (Suc n) (Suc m) = ack2 (ack2 n (Suc m)) m"
   28.95  
   28.96 -text{* The order of arguments has no influence on whether
   28.97 +text\<open>The order of arguments has no influence on whether
   28.98  \isacommand{fun} can prove termination of a function. For more details
   28.99  see elsewhere~@{cite bulwahnKN07}.
  28.100  
  28.101 @@ -108,12 +108,12 @@
  28.102  terminate because of automatic splitting of @{text "if"}.
  28.103  \index{*if expressions!splitting of}
  28.104  Let us look at an example:
  28.105 -*}
  28.106 +\<close>
  28.107  
  28.108  fun gcd :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
  28.109  "gcd m n = (if n=0 then m else gcd n (m mod n))"
  28.110  
  28.111 -text{*\noindent
  28.112 +text\<open>\noindent
  28.113  The second argument decreases with each recursive call.
  28.114  The termination condition
  28.115  @{prop[display]"n ~= (0::nat) ==> m mod n < n"}
  28.116 @@ -145,32 +145,32 @@
  28.117  If possible, the definition should be given by pattern matching on the left
  28.118  rather than @{text "if"} on the right. In the case of @{term gcd} the
  28.119  following alternative definition suggests itself:
  28.120 -*}
  28.121 +\<close>
  28.122  
  28.123  fun gcd1 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
  28.124  "gcd1 m 0 = m" |
  28.125  "gcd1 m n = gcd1 n (m mod n)"
  28.126  
  28.127 -text{*\noindent
  28.128 +text\<open>\noindent
  28.129  The order of equations is important: it hides the side condition
  28.130  @{prop"n ~= (0::nat)"}.  Unfortunately, not all conditionals can be
  28.131  expressed by pattern matching.
  28.132  
  28.133  A simple alternative is to replace @{text "if"} by @{text case}, 
  28.134  which is also available for @{typ bool} and is not split automatically:
  28.135 -*}
  28.136 +\<close>
  28.137  
  28.138  fun gcd2 :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
  28.139  "gcd2 m n = (case n=0 of True \<Rightarrow> m | False \<Rightarrow> gcd2 n (m mod n))"
  28.140  
  28.141 -text{*\noindent
  28.142 +text\<open>\noindent
  28.143  This is probably the neatest solution next to pattern matching, and it is
  28.144  always available.
  28.145  
  28.146  A final alternative is to replace the offending simplification rules by
  28.147  derived conditional ones. For @{term gcd} it means we have to prove
  28.148  these lemmas:
  28.149 -*}
  28.150 +\<close>
  28.151  
  28.152  lemma [simp]: "gcd m 0 = m"
  28.153  apply(simp)
  28.154 @@ -180,15 +180,15 @@
  28.155  apply(simp)
  28.156  done
  28.157  
  28.158 -text{*\noindent
  28.159 +text\<open>\noindent
  28.160  Simplification terminates for these proofs because the condition of the @{text
  28.161  "if"} simplifies to @{term True} or @{term False}.
  28.162  Now we can disable the original simplification rule:
  28.163 -*}
  28.164 +\<close>
  28.165  
  28.166  declare gcd.simps [simp del]
  28.167  
  28.168 -text{*
  28.169 +text\<open>
  28.170  \index{induction!recursion|(}
  28.171  \index{recursion induction|(}
  28.172  
  28.173 @@ -207,29 +207,29 @@
  28.174  you are trying to establish holds for the left-hand side provided it holds
  28.175  for all recursive calls on the right-hand side. Here is a simple example
  28.176  involving the predefined @{term"map"} functional on lists:
  28.177 -*}
  28.178 +\<close>
  28.179  
  28.180  lemma "map f (sep x xs) = sep (f x) (map f xs)"
  28.181  
  28.182 -txt{*\noindent
  28.183 +txt\<open>\noindent
  28.184  Note that @{term"map f xs"}
  28.185  is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
  28.186  this lemma by recursion induction over @{term"sep"}:
  28.187 -*}
  28.188 +\<close>
  28.189  
  28.190  apply(induct_tac x xs rule: sep.induct)
  28.191  
  28.192 -txt{*\noindent
  28.193 +txt\<open>\noindent
  28.194  The resulting proof state has three subgoals corresponding to the three
  28.195  clauses for @{term"sep"}:
  28.196  @{subgoals[display,indent=0]}
  28.197  The rest is pure simplification:
  28.198 -*}
  28.199 +\<close>
  28.200  
  28.201  apply simp_all
  28.202  done
  28.203  
  28.204 -text{*\noindent The proof goes smoothly because the induction rule
  28.205 +text\<open>\noindent The proof goes smoothly because the induction rule
  28.206  follows the recursion of @{const sep}.  Try proving the above lemma by
  28.207  structural induction, and you find that you need an additional case
  28.208  distinction.
  28.209 @@ -255,7 +255,7 @@
  28.210  holds for the tail of that list.
  28.211  \index{induction!recursion|)}
  28.212  \index{recursion induction|)}
  28.213 -*}
  28.214 +\<close>
  28.215  (*<*)
  28.216  end
  28.217  (*>*)
    29.1 --- a/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Thu Jan 11 13:48:17 2018 +0100
    29.2 +++ b/src/Doc/Tutorial/Ifexpr/Ifexpr.thy	Fri Jan 12 14:08:53 2018 +0100
    29.3 @@ -2,26 +2,26 @@
    29.4  theory Ifexpr imports Main begin
    29.5  (*>*)
    29.6  
    29.7 -subsection{*Case Study: Boolean Expressions*}
    29.8 +subsection\<open>Case Study: Boolean Expressions\<close>
    29.9  
   29.10 -text{*\label{sec:boolex}\index{boolean expressions example|(}
   29.11 +text\<open>\label{sec:boolex}\index{boolean expressions example|(}
   29.12  The aim of this case study is twofold: it shows how to model boolean
   29.13  expressions and some algorithms for manipulating them, and it demonstrates
   29.14  the constructs introduced above.
   29.15 -*}
   29.16 +\<close>
   29.17  
   29.18 -subsubsection{*Modelling Boolean Expressions*}
   29.19 +subsubsection\<open>Modelling Boolean Expressions\<close>
   29.20  
   29.21 -text{*
   29.22 +text\<open>
   29.23  We want to represent boolean expressions built up from variables and
   29.24  constants by negation and conjunction. The following datatype serves exactly
   29.25  that purpose:
   29.26 -*}
   29.27 +\<close>
   29.28  
   29.29  datatype boolex = Const bool | Var nat | Neg boolex
   29.30                  | And boolex boolex
   29.31  
   29.32 -text{*\noindent
   29.33 +text\<open>\noindent
   29.34  The two constants are represented by @{term"Const True"} and
   29.35  @{term"Const False"}. Variables are represented by terms of the form
   29.36  @{term"Var n"}, where @{term"n"} is a natural number (type @{typ"nat"}).
   29.37 @@ -34,7 +34,7 @@
   29.38  Hence the function @{text"value"} takes an additional parameter, an
   29.39  \emph{environment} of type @{typ"nat => bool"}, which maps variables to their
   29.40  values:
   29.41 -*}
   29.42 +\<close>
   29.43  
   29.44  primrec "value" :: "boolex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
   29.45  "value (Const b) env = b" |
   29.46 @@ -42,20 +42,20 @@
   29.47  "value (Neg b)   env = (\<not> value b env)" |
   29.48  "value (And b c) env = (value b env \<and> value c env)"
   29.49  
   29.50 -text{*\noindent
   29.51 +text\<open>\noindent
   29.52  \subsubsection{If-Expressions}
   29.53  
   29.54  An alternative and often more efficient (because in a certain sense
   29.55  canonical) representation are so-called \emph{If-expressions} built up
   29.56  from constants (@{term"CIF"}), variables (@{term"VIF"}) and conditionals
   29.57  (@{term"IF"}):
   29.58 -*}
   29.59 +\<close>
   29.60  
   29.61  datatype ifex = CIF bool | VIF nat | IF ifex ifex ifex
   29.62  
   29.63 -text{*\noindent
   29.64 +text\<open>\noindent
   29.65  The evaluation of If-expressions proceeds as for @{typ"boolex"}:
   29.66 -*}
   29.67 +\<close>
   29.68  
   29.69  primrec valif :: "ifex \<Rightarrow> (nat \<Rightarrow> bool) \<Rightarrow> bool" where
   29.70  "valif (CIF b)    env = b" |
   29.71 @@ -63,13 +63,13 @@
   29.72  "valif (IF b t e) env = (if valif b env then valif t env
   29.73                                          else valif e env)"
   29.74  
   29.75 -text{*
   29.76 +text\<open>
   29.77  \subsubsection{Converting Boolean and If-Expressions}
   29.78  
   29.79  The type @{typ"boolex"} is close to the customary representation of logical
   29.80  formulae, whereas @{typ"ifex"} is designed for efficiency. It is easy to
   29.81  translate from @{typ"boolex"} into @{typ"ifex"}:
   29.82 -*}
   29.83 +\<close>
   29.84  
   29.85  primrec bool2if :: "boolex \<Rightarrow> ifex" where
   29.86  "bool2if (Const b) = CIF b" |
   29.87 @@ -77,22 +77,22 @@
   29.88  "bool2if (Neg b)   = IF (bool2if b) (CIF False) (CIF True)" |
   29.89  "bool2if (And b c) = IF (bool2if b) (bool2if c) (CIF False)"
   29.90  
   29.91 -text{*\noindent
   29.92 +text\<open>\noindent
   29.93  At last, we have something we can verify: that @{term"bool2if"} preserves the
   29.94  value of its argument:
   29.95 -*}
   29.96 +\<close>
   29.97  
   29.98  lemma "valif (bool2if b) env = value b env"
   29.99  
  29.100 -txt{*\noindent
  29.101 +txt\<open>\noindent
  29.102  The proof is canonical:
  29.103 -*}
  29.104 +\<close>
  29.105  
  29.106  apply(induct_tac b)
  29.107  apply(auto)
  29.108  done
  29.109  
  29.110 -text{*\noindent
  29.111 +text\<open>\noindent
  29.112  In fact, all proofs in this case study look exactly like this. Hence we do
  29.113  not show them below.
  29.114  
  29.115 @@ -102,7 +102,7 @@
  29.116  repeatedly replacing a subterm of the form @{term"IF (IF b x y) z u"} by
  29.117  @{term"IF b (IF x z u) (IF y z u)"}, which has the same value. The following
  29.118  primitive recursive functions perform this task:
  29.119 -*}
  29.120 +\<close>
  29.121  
  29.122  primrec normif :: "ifex \<Rightarrow> ifex \<Rightarrow> ifex \<Rightarrow> ifex" where
  29.123  "normif (CIF b)    t e = IF (CIF b) t e" |
  29.124 @@ -114,18 +114,18 @@
  29.125  "norm (VIF x)    = VIF x" |
  29.126  "norm (IF b t e) = normif b (norm t) (norm e)"
  29.127  
  29.128 -text{*\noindent
  29.129 +text\<open>\noindent
  29.130  Their interplay is tricky; we leave it to you to develop an
  29.131  intuitive understanding. Fortunately, Isabelle can help us to verify that the
  29.132  transformation preserves the value of the expression:
  29.133 -*}
  29.134 +\<close>
  29.135  
  29.136  theorem "valif (norm b) env = valif b env"(*<*)oops(*>*)
  29.137  
  29.138 -text{*\noindent
  29.139 +text\<open>\noindent
  29.140  The proof is canonical, provided we first show the following simplification
  29.141  lemma, which also helps to understand what @{term"normif"} does:
  29.142 -*}
  29.143 +\<close>
  29.144  
  29.145  lemma [simp]:
  29.146    "\<forall>t e. valif (normif b t e) env = valif (IF b t e) env"
  29.147 @@ -137,13 +137,13 @@
  29.148  apply(induct_tac b)
  29.149  by(auto)
  29.150  (*>*)
  29.151 -text{*\noindent
  29.152 +text\<open>\noindent
  29.153  Note that the lemma does not have a name, but is implicitly used in the proof
  29.154  of the theorem shown above because of the @{text"[simp]"} attribute.
  29.155  
  29.156  But how can we be sure that @{term"norm"} really produces a normal form in
  29.157  the above sense? We define a function that tests If-expressions for normality:
  29.158 -*}
  29.159 +\<close>
  29.160  
  29.161  primrec normal :: "ifex \<Rightarrow> bool" where
  29.162  "normal(CIF b) = True" |
  29.163 @@ -151,10 +151,10 @@
  29.164  "normal(IF b t e) = (normal t \<and> normal e \<and>
  29.165       (case b of CIF b \<Rightarrow> True | VIF x \<Rightarrow> True | IF x y z \<Rightarrow> False))"
  29.166  
  29.167 -text{*\noindent
  29.168 +text\<open>\noindent
  29.169  Now we prove @{term"normal(norm b)"}. Of course, this requires a lemma about
  29.170  normality of @{term"normif"}:
  29.171 -*}
  29.172 +\<close>
  29.173  
  29.174  lemma [simp]: "\<forall>t e. normal(normif b t e) = (normal t \<and> normal e)"
  29.175  (*<*)
  29.176 @@ -166,7 +166,7 @@
  29.177  by(auto)
  29.178  (*>*)
  29.179  
  29.180 -text{*\medskip
  29.181 +text\<open>\medskip
  29.182  How do we come up with the required lemmas? Try to prove the main theorems
  29.183  without them and study carefully what @{text auto} leaves unproved. This 
  29.184  can provide the clue.  The necessity of universal quantification
  29.185 @@ -181,7 +181,7 @@
  29.186    equalities (@{text"="}).)
  29.187  \end{exercise}
  29.188  \index{boolean expressions example|)}
  29.189 -*}
  29.190 +\<close>
  29.191  (*<*)
  29.192  
  29.193  primrec normif2 :: "ifex => ifex => ifex => ifex" where
    30.1 --- a/src/Doc/Tutorial/Inductive/AB.thy	Thu Jan 11 13:48:17 2018 +0100
    30.2 +++ b/src/Doc/Tutorial/Inductive/AB.thy	Fri Jan 12 14:08:53 2018 +0100
    30.3 @@ -1,8 +1,8 @@
    30.4  (*<*)theory AB imports Main begin(*>*)
    30.5  
    30.6 -section{*Case Study: A Context Free Grammar*}
    30.7 +section\<open>Case Study: A Context Free Grammar\<close>
    30.8  
    30.9 -text{*\label{sec:CFG}
   30.10 +text\<open>\label{sec:CFG}
   30.11  \index{grammars!defining inductively|(}%
   30.12  Grammars are nothing but shorthands for inductive definitions of nonterminals
   30.13  which represent sets of strings. For example, the production
   30.14 @@ -21,24 +21,24 @@
   30.15  
   30.16  We start by fixing the alphabet, which consists only of @{term a}'s
   30.17  and~@{term b}'s:
   30.18 -*}
   30.19 +\<close>
   30.20  
   30.21  datatype alfa = a | b
   30.22  
   30.23 -text{*\noindent
   30.24 +text\<open>\noindent
   30.25  For convenience we include the following easy lemmas as simplification rules:
   30.26 -*}
   30.27 +\<close>
   30.28  
   30.29  lemma [simp]: "(x \<noteq> a) = (x = b) \<and> (x \<noteq> b) = (x = a)"
   30.30  by (case_tac x, auto)
   30.31  
   30.32 -text{*\noindent
   30.33 +text\<open>\noindent
   30.34  Words over this alphabet are of type @{typ"alfa list"}, and
   30.35  the three nonterminals are declared as sets of such words.
   30.36  The productions above are recast as a \emph{mutual} inductive
   30.37  definition\index{inductive definition!simultaneous}
   30.38  of @{term S}, @{term A} and~@{term B}:
   30.39 -*}
   30.40 +\<close>
   30.41  
   30.42  inductive_set
   30.43    S :: "alfa list set" and
   30.44 @@ -55,31 +55,31 @@
   30.45  | "w \<in> S            \<Longrightarrow> b#w   \<in> B"
   30.46  | "\<lbrakk> v \<in> B; w \<in> B \<rbrakk> \<Longrightarrow> a#v@w \<in> B"
   30.47  
   30.48 -text{*\noindent
   30.49 +text\<open>\noindent
   30.50  First we show that all words in @{term S} contain the same number of @{term
   30.51  a}'s and @{term b}'s. Since the definition of @{term S} is by mutual
   30.52  induction, so is the proof: we show at the same time that all words in
   30.53  @{term A} contain one more @{term a} than @{term b} and all words in @{term
   30.54  B} contain one more @{term b} than @{term a}.
   30.55 -*}
   30.56 +\<close>
   30.57  
   30.58  lemma correctness:
   30.59    "(w \<in> S \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b])     \<and>
   30.60     (w \<in> A \<longrightarrow> size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1) \<and>
   30.61     (w \<in> B \<longrightarrow> size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1)"
   30.62  
   30.63 -txt{*\noindent
   30.64 +txt\<open>\noindent
   30.65  These propositions are expressed with the help of the predefined @{term
   30.66  filter} function on lists, which has the convenient syntax @{text"[x\<leftarrow>xs. P
   30.67  x]"}, the list of all elements @{term x} in @{term xs} such that @{prop"P x"}
   30.68  holds. Remember that on lists @{text size} and @{text length} are synonymous.
   30.69  
   30.70  The proof itself is by rule induction and afterwards automatic:
   30.71 -*}
   30.72 +\<close>
   30.73  
   30.74  by (rule S_A_B.induct, auto)
   30.75  
   30.76 -text{*\noindent
   30.77 +text\<open>\noindent
   30.78  This may seem surprising at first, and is indeed an indication of the power
   30.79  of inductive definitions. But it is also quite straightforward. For example,
   30.80  consider the production $A \to b A A$: if $v,w \in A$ and the elements of $A$
   30.81 @@ -109,13 +109,13 @@
   30.82  and @{term b}'s to an arbitrary property @{term P}. Otherwise we would have
   30.83  to prove the desired lemma twice, once as stated above and once with the
   30.84  roles of @{term a}'s and @{term b}'s interchanged.
   30.85 -*}
   30.86 +\<close>
   30.87  
   30.88  lemma step1: "\<forall>i < size w.
   30.89    \<bar>(int(size[x\<leftarrow>take (i+1) w. P x])-int(size[x\<leftarrow>take (i+1) w. \<not>P x]))
   30.90     - (int(size[x\<leftarrow>take i w. P x])-int(size[x\<leftarrow>take i w. \<not>P x]))\<bar> \<le> 1"
   30.91  
   30.92 -txt{*\noindent
   30.93 +txt\<open>\noindent
   30.94  The lemma is a bit hard to read because of the coercion function
   30.95  @{text"int :: nat \<Rightarrow> int"}. It is required because @{term size} returns
   30.96  a natural number, but subtraction on type~@{typ nat} will do the wrong thing.
   30.97 @@ -126,34 +126,34 @@
   30.98  The proof is by induction on @{term w}, with a trivial base case, and a not
   30.99  so trivial induction step. Since it is essentially just arithmetic, we do not
  30.100  discuss it.
  30.101 -*}
  30.102 +\<close>
  30.103  
  30.104  apply(induct_tac w)
  30.105  apply(auto simp add: abs_if take_Cons split: nat.split)
  30.106  done
  30.107  
  30.108 -text{*
  30.109 +text\<open>
  30.110  Finally we come to the above-mentioned lemma about cutting in half a word with two more elements of one sort than of the other sort:
  30.111 -*}
  30.112 +\<close>
  30.113  
  30.114  lemma part1:
  30.115   "size[x\<leftarrow>w. P x] = size[x\<leftarrow>w. \<not>P x]+2 \<Longrightarrow>
  30.116    \<exists>i\<le>size w. size[x\<leftarrow>take i w. P x] = size[x\<leftarrow>take i w. \<not>P x]+1"
  30.117  
  30.118 -txt{*\noindent
  30.119 +txt\<open>\noindent
  30.120  This is proved by @{text force} with the help of the intermediate value theorem,
  30.121  instantiated appropriately and with its first premise disposed of by lemma
  30.122  @{thm[source]step1}:
  30.123 -*}
  30.124 +\<close>
  30.125  
  30.126  apply(insert nat0_intermed_int_val[OF step1, of "P" "w" "1"])
  30.127  by force
  30.128  
  30.129 -text{*\noindent
  30.130 +text\<open>\noindent
  30.131  
  30.132  Lemma @{thm[source]part1} tells us only about the prefix @{term"take i w"}.
  30.133  An easy lemma deals with the suffix @{term"drop i w"}:
  30.134 -*}
  30.135 +\<close>
  30.136  
  30.137  
  30.138  lemma part2:
  30.139 @@ -163,7 +163,7 @@
  30.140     \<Longrightarrow> size[x\<leftarrow>drop i w. P x] = size[x\<leftarrow>drop i w. \<not>P x]+1"
  30.141  by(simp del: append_take_drop_id)
  30.142  
  30.143 -text{*\noindent
  30.144 +text\<open>\noindent
  30.145  In the proof we have disabled the normally useful lemma
  30.146  \begin{isabelle}
  30.147  @{thm append_take_drop_id[no_vars]}
  30.148 @@ -174,34 +174,34 @@
  30.149  
  30.150  To dispose of trivial cases automatically, the rules of the inductive
  30.151  definition are declared simplification rules:
  30.152 -*}
  30.153 +\<close>
  30.154  
  30.155  declare S_A_B.intros[simp]
  30.156  
  30.157 -text{*\noindent
  30.158 +text\<open>\noindent
  30.159  This could have been done earlier but was not necessary so far.
  30.160  
  30.161  The completeness theorem tells us that if a word has the same number of
  30.162  @{term a}'s and @{term b}'s, then it is in @{term S}, and similarly 
  30.163  for @{term A} and @{term B}:
  30.164 -*}
  30.165 +\<close>
  30.166  
  30.167  theorem completeness:
  30.168    "(size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b]     \<longrightarrow> w \<in> S) \<and>
  30.169     (size[x\<leftarrow>w. x=a] = size[x\<leftarrow>w. x=b] + 1 \<longrightarrow> w \<in> A) \<and>
  30.170     (size[x\<leftarrow>w. x=b] = size[x\<leftarrow>w. x=a] + 1 \<longrightarrow> w \<in> B)"
  30.171  
  30.172 -txt{*\noindent
  30.173 +txt\<open>\noindent
  30.174  The proof is by induction on @{term w}. Structural induction would fail here
  30.175  because, as we can see from the grammar, we need to make bigger steps than
  30.176  merely appending a single letter at the front. Hence we induct on the length
  30.177  of @{term w}, using the induction rule @{thm[source]length_induct}:
  30.178 -*}
  30.179 +\<close>
  30.180  
  30.181  apply(induct_tac w rule: length_induct)
  30.182  apply(rename_tac w)
  30.183  
  30.184 -txt{*\noindent
  30.185 +txt\<open>\noindent
  30.186  The @{text rule} parameter tells @{text induct_tac} explicitly which induction
  30.187  rule to use. For details see \S\ref{sec:complete-ind} below.
  30.188  In this case the result is that we may assume the lemma already
  30.189 @@ -210,13 +210,13 @@
  30.190  
  30.191  The proof continues with a case distinction on @{term w},
  30.192  on whether @{term w} is empty or not.
  30.193 -*}
  30.194 +\<close>
  30.195  
  30.196  apply(case_tac w)
  30.197   apply(simp_all)
  30.198  (*<*)apply(rename_tac x v)(*>*)
  30.199  
  30.200 -txt{*\noindent
  30.201 +txt\<open>\noindent
  30.202  Simplification disposes of the base case and leaves only a conjunction
  30.203  of two step cases to be proved:
  30.204  if @{prop"w = a#v"} and @{prop[display]"size[x\<in>v. x=a] = size[x\<in>v. x=b]+2"} then
  30.205 @@ -226,49 +226,49 @@
  30.206  After breaking the conjunction up into two cases, we can apply
  30.207  @{thm[source]part1} to the assumption that @{term w} contains two more @{term
  30.208  a}'s than @{term b}'s.
  30.209 -*}
  30.210 +\<close>
  30.211  
  30.212  apply(rule conjI)
  30.213   apply(clarify)
  30.214   apply(frule part1[of "\<lambda>x. x=a", simplified])
  30.215   apply(clarify)
  30.216 -txt{*\noindent
  30.217 +txt\<open>\noindent
  30.218  This yields an index @{prop"i \<le> length v"} such that
  30.219  @{prop[display]"length [x\<leftarrow>take i v . x = a] = length [x\<leftarrow>take i v . x = b] + 1"}
  30.220  With the help of @{thm[source]part2} it follows that
  30.221  @{prop[display]"length [x\<leftarrow>drop i v . x = a] = length [x\<leftarrow>drop i v . x = b] + 1"}
  30.222 -*}
  30.223 +\<close>
  30.224  
  30.225   apply(drule part2[of "\<lambda>x. x=a", simplified])
  30.226    apply(assumption)
  30.227  
  30.228 -txt{*\noindent
  30.229 +txt\<open>\noindent
  30.230  Now it is time to decompose @{term v} in the conclusion @{prop"b#v \<in> A"}
  30.231  into @{term"take i v @ drop i v"},
  30.232 -*}
  30.233 +\<close>
  30.234  
  30.235   apply(rule_tac n1=i and t=v in subst[OF append_take_drop_id])
  30.236  
  30.237 -txt{*\noindent
  30.238 +txt\<open>\noindent
  30.239  (the variables @{term n1} and @{term t} are the result of composing the
  30.240  theorems @{thm[source]subst} and @{thm[source]append_take_drop_id})
  30.241  after which the appropriate rule of the grammar reduces the goal
  30.242  to the two subgoals @{prop"take i v \<in> A"} and @{prop"drop i v \<in> A"}:
  30.243 -*}
  30.244 +\<close>
  30.245  
  30.246   apply(rule S_A_B.intros)
  30.247  
  30.248 -txt{*
  30.249 +txt\<open>
  30.250  Both subgoals follow from the induction hypothesis because both @{term"take i
  30.251  v"} and @{term"drop i v"} are shorter than @{term w}:
  30.252 -*}
  30.253 +\<close>
  30.254  
  30.255    apply(force simp add: min_less_iff_disj)
  30.256   apply(force split: nat_diff_split)
  30.257  
  30.258 -txt{*
  30.259 +txt\<open>
  30.260  The case @{prop"w = b#v"} is proved analogously:
  30.261 -*}
  30.262 +\<close>
  30.263  
  30.264  apply(clarify)
  30.265  apply(frule part1[of "\<lambda>x. x=b", simplified])
  30.266 @@ -280,7 +280,7 @@
  30.267   apply(force simp add: min_less_iff_disj)
  30.268  by(force simp add: min_less_iff_disj split: nat_diff_split)
  30.269  
  30.270 -text{*
  30.271 +text\<open>
  30.272  We conclude this section with a comparison of our proof with 
  30.273  Hopcroft\index{Hopcroft, J. E.} and Ullman's\index{Ullman, J. D.}
  30.274  @{cite \<open>p.\ts81\<close> HopcroftUllman}.
  30.275 @@ -304,6 +304,6 @@
  30.276  cases.  Such errors are found in many pen-and-paper proofs when they
  30.277  are scrutinized formally.%
  30.278  \index{grammars!defining inductively|)}
  30.279 -*}
  30.280 +\<close>
  30.281  
  30.282  (*<*)end(*>*)
    31.1 --- a/src/Doc/Tutorial/Inductive/Advanced.thy	Thu Jan 11 13:48:17 2018 +0100
    31.2 +++ b/src/Doc/Tutorial/Inductive/Advanced.thy	Fri Jan 12 14:08:53 2018 +0100
    31.3 @@ -2,7 +2,7 @@
    31.4  ML_file "../../antiquote_setup.ML"
    31.5  (*>*)
    31.6  
    31.7 -text {*
    31.8 +text \<open>
    31.9  The premises of introduction rules may contain universal quantifiers and
   31.10  monotone functions.  A universal quantifier lets the rule 
   31.11  refer to any number of instances of 
   31.12 @@ -10,11 +10,11 @@
   31.13  to existing constructions (such as ``list of'') over the inductively defined
   31.14  set.  The examples below show how to use the additional expressiveness
   31.15  and how to reason from the resulting definitions.
   31.16 -*}
   31.17 +\<close>
   31.18  
   31.19 -subsection{* Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype} *}
   31.20 +subsection\<open>Universal Quantifiers in Introduction Rules \label{sec:gterm-datatype}\<close>
   31.21  
   31.22 -text {*
   31.23 +text \<open>
   31.24  \index{ground terms example|(}%
   31.25  \index{quantifiers!and inductive definitions|(}%
   31.26  As a running example, this section develops the theory of \textbf{ground
   31.27 @@ -23,19 +23,19 @@
   31.28  constant as a function applied to the null argument  list.  Let us declare a
   31.29  datatype @{text gterm} for the type of ground  terms. It is a type constructor
   31.30  whose argument is a type of  function symbols. 
   31.31 -*}
   31.32 +\<close>
   31.33  
   31.34  datatype 'f gterm = Apply 'f "'f gterm list"
   31.35  
   31.36 -text {*
   31.37 +text \<open>
   31.38  To try it out, we declare a datatype of some integer operations: 
   31.39  integer constants, the unary minus operator and the addition 
   31.40  operator.
   31.41 -*}
   31.42 +\<close>
   31.43  
   31.44  datatype integer_op = Number int | UnaryMinus | Plus
   31.45  
   31.46 -text {*
   31.47 +text \<open>
   31.48  Now the type @{typ "integer_op gterm"} denotes the ground 
   31.49  terms built over those symbols.
   31.50  
   31.51 @@ -56,7 +56,7 @@
   31.52  to our inductively defined set: is a ground term 
   31.53  over~@{text F}.  The function @{term set} denotes the set of elements in a given 
   31.54  list. 
   31.55 -*}
   31.56 +\<close>
   31.57  
   31.58  inductive_set
   31.59    gterms :: "'f set \<Rightarrow> 'f gterm set"
   31.60 @@ -65,11 +65,11 @@
   31.61  step[intro!]: "\<lbrakk>\<forall>t \<in> set args. t \<in> gterms F;  f \<in> F\<rbrakk>
   31.62                 \<Longrightarrow> (Apply f args) \<in> gterms F"
   31.63  
   31.64 -text {*
   31.65 +text \<open>
   31.66  To demonstrate a proof from this definition, let us 
   31.67  show that the function @{term gterms}
   31.68  is \textbf{monotone}.  We shall need this concept shortly.
   31.69 -*}
   31.70 +\<close>
   31.71  
   31.72  lemma gterms_mono: "F\<subseteq>G \<Longrightarrow> gterms F \<subseteq> gterms G"
   31.73  apply clarify
   31.74 @@ -81,7 +81,7 @@
   31.75  apply clarify
   31.76  apply (erule gterms.induct)
   31.77  (*>*)
   31.78 -txt{*
   31.79 +txt\<open>
   31.80  Intuitively, this theorem says that
   31.81  enlarging the set of function symbols enlarges the set of ground 
   31.82  terms. The proof is a trivial rule induction.
   31.83 @@ -92,9 +92,9 @@
   31.84  The assumptions state that @{text f} belongs 
   31.85  to~@{text F}, which is included in~@{text G}, and that every element of the list @{text args} is
   31.86  a ground term over~@{text G}.  The @{text blast} method finds this chain of reasoning easily.  
   31.87 -*}
   31.88 +\<close>
   31.89  (*<*)oops(*>*)
   31.90 -text {*
   31.91 +text \<open>
   31.92  \begin{warn}
   31.93  Why do we call this function @{text gterms} instead 
   31.94  of @{text gterm}?  A constant may have the same name as a type.  However,
   31.95 @@ -113,7 +113,7 @@
   31.96  terms and a function  symbol~@{text f}. If the length of the list matches the
   31.97  function's arity  then applying @{text f} to @{text args} yields a well-formed
   31.98  term.
   31.99 -*}
  31.100 +\<close>
  31.101  
  31.102  inductive_set
  31.103    well_formed_gterm :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
  31.104 @@ -123,16 +123,16 @@
  31.105                  length args = arity f\<rbrakk>
  31.106                 \<Longrightarrow> (Apply f args) \<in> well_formed_gterm arity"
  31.107  
  31.108 -text {*
  31.109 +text \<open>
  31.110  The inductive definition neatly captures the reasoning above.
  31.111  The universal quantification over the
  31.112  @{text set} of arguments expresses that all of them are well-formed.%
  31.113  \index{quantifiers!and inductive definitions|)}
  31.114 -*}
  31.115 +\<close>
  31.116  
  31.117 -subsection{* Alternative Definition Using a Monotone Function *}
  31.118 +subsection\<open>Alternative Definition Using a Monotone Function\<close>
  31.119  
  31.120 -text {*
  31.121 +text \<open>
  31.122  \index{monotone functions!and inductive definitions|(}% 
  31.123  An inductive definition may refer to the
  31.124  inductively defined  set through an arbitrary monotone function.  To
  31.125 @@ -148,7 +148,7 @@
  31.126  introduction rule.  The first premise states that @{text args} belongs to
  31.127  the @{text lists} of well-formed terms.  This formulation is more
  31.128  direct, if more obscure, than using a universal quantifier.
  31.129 -*}
  31.130 +\<close>
  31.131  
  31.132  inductive_set
  31.133    well_formed_gterm' :: "('f \<Rightarrow> nat) \<Rightarrow> 'f gterm set"
  31.134 @@ -159,7 +159,7 @@
  31.135                 \<Longrightarrow> (Apply f args) \<in> well_formed_gterm' arity"
  31.136  monos lists_mono
  31.137  
  31.138 -text {*
  31.139 +text \<open>
  31.140  We cite the theorem @{text lists_mono} to justify 
  31.141  using the function @{term lists}.%
  31.142  \footnote{This particular theorem is installed by default already, but we
  31.143 @@ -194,15 +194,15 @@
  31.144  Further lists of well-formed
  31.145  terms become available and none are taken away.%
  31.146  \index{monotone functions!and inductive definitions|)} 
  31.147 -*}
  31.148 +\<close>
  31.149  
  31.150 -subsection{* A Proof of Equivalence *}
  31.151 +subsection\<open>A Proof of Equivalence\<close>
  31.152  
  31.153 -text {*
  31.154 +text \<open>
  31.155  We naturally hope that these two inductive definitions of ``well-formed'' 
  31.156  coincide.  The equality can be proved by separate inclusions in 
  31.157  each direction.  Each is a trivial rule induction. 
  31.158 -*}
  31.159 +\<close>
  31.160  
  31.161  lemma "well_formed_gterm arity \<subseteq> well_formed_gterm' arity"
  31.162  apply clarify
  31.163 @@ -214,7 +214,7 @@
  31.164  apply clarify
  31.165  apply (erule well_formed_gterm.induct)
  31.166  (*>*)
  31.167 -txt {*
  31.168 +txt \<open>
  31.169  The @{text clarify} method gives
  31.170  us an element of @{term "well_formed_gterm arity"} on which to perform 
  31.171  induction.  The resulting subgoal can be proved automatically:
  31.172 @@ -222,7 +222,7 @@
  31.173  This proof resembles the one given in
  31.174  {\S}\ref{sec:gterm-datatype} above, especially in the form of the
  31.175  induction hypothesis.  Next, we consider the opposite inclusion:
  31.176 -*}
  31.177 +\<close>
  31.178  (*<*)oops(*>*)
  31.179  lemma "well_formed_gterm' arity \<subseteq> well_formed_gterm arity"
  31.180  apply clarify
  31.181 @@ -234,7 +234,7 @@
  31.182  apply clarify
  31.183  apply (erule well_formed_gterm'.induct)
  31.184  (*>*)
  31.185 -txt {*
  31.186 +txt \<open>
  31.187  The proof script is virtually identical,
  31.188  but the subgoal after applying induction may be surprising:
  31.189  @{subgoals[display,indent=0,margin=65]}
  31.190 @@ -257,13 +257,13 @@
  31.191  distribute over intersection.  Monotonicity implies one direction of
  31.192  this set equality; we have this theorem:
  31.193  @{named_thms [display,indent=0] mono_Int [no_vars] (mono_Int)}
  31.194 -*}
  31.195 +\<close>
  31.196  (*<*)oops(*>*)
  31.197  
  31.198  
  31.199 -subsection{* Another Example of Rule Inversion *}
  31.200 +subsection\<open>Another Example of Rule Inversion\<close>
  31.201  
  31.202 -text {*
  31.203 +text \<open>
  31.204  \index{rule inversion|(}%
  31.205  Does @{term gterms} distribute over intersection?  We have proved that this
  31.206  function is monotone, so @{text mono_Int} gives one of the inclusions.  The
  31.207 @@ -271,20 +271,20 @@
  31.208  sets
  31.209  @{term F} and~@{term G} then it is also a ground term over their intersection,
  31.210  @{term "F \<inter> G"}.
  31.211 -*}
  31.212 +\<close>
  31.213  
  31.214  lemma gterms_IntI:
  31.215       "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
  31.216  (*<*)oops(*>*)
  31.217 -text {*
  31.218 +text \<open>
  31.219  Attempting this proof, we get the assumption 
  31.220  @{term "Apply f args \<in> gterms G"}, which cannot be broken down. 
  31.221  It looks like a job for rule inversion:\cmmdx{inductive\protect\_cases}
  31.222 -*}
  31.223 +\<close>
  31.224  
  31.225  inductive_cases gterm_Apply_elim [elim!]: "Apply f args \<in> gterms F"
  31.226  
  31.227 -text {*
  31.228 +text \<open>
  31.229  Here is the result.
  31.230  @{named_thms [display,indent=0,margin=50] gterm_Apply_elim [no_vars] (gterm_Apply_elim)}
  31.231  This rule replaces an assumption about @{term "Apply f args"} by 
  31.232 @@ -295,7 +295,7 @@
  31.233  have given the @{text "elim!"} attribute. 
  31.234  
  31.235  Now we can prove the other half of that distributive law.
  31.236 -*}
  31.237 +\<close>
  31.238  
  31.239  lemma gterms_IntI [rule_format, intro!]:
  31.240       "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
  31.241 @@ -306,7 +306,7 @@
  31.242  lemma "t \<in> gterms F \<Longrightarrow> t \<in> gterms G \<longrightarrow> t \<in> gterms (F\<inter>G)"
  31.243  apply (erule gterms.induct)
  31.244  (*>*)
  31.245 -txt {*
  31.246 +txt \<open>
  31.247  The proof begins with rule induction over the definition of
  31.248  @{term gterms}, which leaves a single subgoal:  
  31.249  @{subgoals[display,indent=0,margin=65]}
  31.250 @@ -320,13 +320,13 @@
  31.251  
  31.252  \smallskip
  31.253  Our distributive law is a trivial consequence of previously-proved results:
  31.254 -*}
  31.255 +\<close>
  31.256  (*<*)oops(*>*)
  31.257  lemma gterms_Int_eq [simp]:
  31.258       "gterms (F \<inter> G) = gterms F \<inter> gterms G"
  31.259  by (blast intro!: mono_Int monoI gterms_mono)
  31.260  
  31.261 -text_raw {*
  31.262 +text_raw \<open>
  31.263  \index{rule inversion|)}%
  31.264  \index{ground terms example|)}
  31.265  
  31.266 @@ -339,7 +339,7 @@
  31.267  list of argument types paired with the result type. 
  31.268  Complete this inductive definition:
  31.269  \begin{isabelle}
  31.270 -*}
  31.271 +\<close>
  31.272  
  31.273  inductive_set
  31.274    well_typed_gterm :: "('f \<Rightarrow> 't list * 't) \<Rightarrow> ('f gterm * 't)set"
  31.275 @@ -352,15 +352,15 @@
  31.276       \<Longrightarrow> (Apply f (map fst args), rtype) 
  31.277           \<in> well_typed_gterm sig"
  31.278  (*>*)
  31.279 -text_raw {*
  31.280 +text_raw \<open>
  31.281  \end{isabelle}
  31.282  \end{exercise}
  31.283  \end{isamarkuptext}
  31.284 -*}
  31.285 +\<close>
  31.286  
  31.287  (*<*)
  31.288  
  31.289 -text{*the following declaration isn't actually used*}
  31.290 +text\<open>the following declaration isn't actually used\<close>
  31.291  primrec
  31.292    integer_arity :: "integer_op \<Rightarrow> nat"
  31.293  where
  31.294 @@ -368,7 +368,7 @@
  31.295  | "integer_arity UnaryMinus        = 1"
  31.296  | "integer_arity Plus              = 2"
  31.297  
  31.298 -text{* the rest isn't used: too complicated.  OK for an exercise though.*}
  31.299 +text\<open>the rest isn't used: too complicated.  OK for an exercise though.\<close>
  31.300  
  31.301  inductive_set
  31.302    integer_signature :: "(integer_op * (unit list * unit)) set"
    32.1 --- a/src/Doc/Tutorial/Inductive/Even.thy	Thu Jan 11 13:48:17 2018 +0100
    32.2 +++ b/src/Doc/Tutorial/Inductive/Even.thy	Fri Jan 12 14:08:53 2018 +0100
    32.3 @@ -2,9 +2,9 @@
    32.4  ML_file "../../antiquote_setup.ML" 
    32.5  (*>*)
    32.6  
    32.7 -section{* The Set of Even Numbers *}
    32.8 +section\<open>The Set of Even Numbers\<close>
    32.9  
   32.10 -text {*
   32.11 +text \<open>
   32.12  \index{even numbers!defining inductively|(}%
   32.13  The set of even numbers can be inductively defined as the least set
   32.14  containing 0 and closed under the operation $+2$.  Obviously,
   32.15 @@ -12,20 +12,20 @@
   32.16  We shall prove below that the two formulations coincide.  On the way we
   32.17  shall examine the primary means of reasoning about inductively defined
   32.18  sets: rule induction.
   32.19 -*}
   32.20 +\<close>
   32.21  
   32.22 -subsection{* Making an Inductive Definition *}
   32.23 +subsection\<open>Making an Inductive Definition\<close>
   32.24  
   32.25 -text {*
   32.26 +text \<open>
   32.27  Using \commdx{inductive\protect\_set}, we declare the constant @{text even} to be
   32.28  a set of natural numbers with the desired properties.
   32.29 -*}
   32.30 +\<close>
   32.31  
   32.32  inductive_set even :: "nat set" where
   32.33  zero[intro!]: "0 \<in> even" |
   32.34  step[intro!]: "n \<in> even \<Longrightarrow> (Suc (Suc n)) \<in> even"
   32.35  
   32.36 -text {*
   32.37 +text \<open>
   32.38  An inductive definition consists of introduction rules.  The first one
   32.39  above states that 0 is even; the second states that if $n$ is even, then so
   32.40  is~$n+2$.  Given this declaration, Isabelle generates a fixed point
   32.41 @@ -44,16 +44,16 @@
   32.42  apply them aggressively. Obviously, regarding 0 as even is safe.  The
   32.43  @{text step} rule is also safe because $n+2$ is even if and only if $n$ is
   32.44  even.  We prove this equivalence later.
   32.45 -*}
   32.46 +\<close>
   32.47  
   32.48 -subsection{*Using Introduction Rules*}
   32.49 +subsection\<open>Using Introduction Rules\<close>
   32.50  
   32.51 -text {*
   32.52 +text \<open>
   32.53  Our first lemma states that numbers of the form $2\times k$ are even.
   32.54  Introduction rules are used to show that specific values belong to the
   32.55  inductive set.  Such proofs typically involve 
   32.56  induction, perhaps over some other inductive set.
   32.57 -*}
   32.58 +\<close>
   32.59  
   32.60  lemma two_times_even[intro!]: "2*k \<in> even"
   32.61  apply (induct_tac k)
   32.62 @@ -63,7 +63,7 @@
   32.63  lemma "2*k \<in> even"
   32.64  apply (induct_tac k)
   32.65  (*>*)
   32.66 -txt {*
   32.67 +txt \<open>
   32.68  \noindent
   32.69  The first step is induction on the natural number @{text k}, which leaves
   32.70  two subgoals:
   32.71 @@ -75,14 +75,14 @@
   32.72  definition of @{text even} (using the divides relation) and our inductive
   32.73  definition.  One direction of this equivalence is immediate by the lemma
   32.74  just proved, whose @{text "intro!"} attribute ensures it is applied automatically.
   32.75 -*}
   32.76 +\<close>
   32.77  (*<*)oops(*>*)
   32.78  lemma dvd_imp_even: "2 dvd n \<Longrightarrow> n \<in> even"
   32.79  by (auto simp add: dvd_def)
   32.80  
   32.81 -subsection{* Rule Induction \label{sec:rule-induction} *}
   32.82 +subsection\<open>Rule Induction \label{sec:rule-induction}\<close>
   32.83  
   32.84 -text {*
   32.85 +text \<open>
   32.86  \index{rule induction|(}%
   32.87  From the definition of the set
   32.88  @{term even}, Isabelle has
   32.89 @@ -102,56 +102,56 @@
   32.90  Induction is the usual way of proving a property of the elements of an
   32.91  inductively defined set.  Let us prove that all members of the set
   32.92  @{term even} are multiples of two.
   32.93 -*}
   32.94 +\<close>
   32.95  
   32.96  lemma even_imp_dvd: "n \<in> even \<Longrightarrow> 2 dvd n"
   32.97 -txt {*
   32.98 +txt \<open>
   32.99  We begin by applying induction.  Note that @{text even.induct} has the form
  32.100  of an elimination rule, so we use the method @{text erule}.  We get two
  32.101  subgoals:
  32.102 -*}
  32.103 +\<close>
  32.104  apply (erule even.induct)
  32.105 -txt {*
  32.106 +txt \<open>
  32.107  @{subgoals[display,indent=0]}
  32.108  We unfold the definition of @{text dvd} in both subgoals, proving the first
  32.109  one and simplifying the second:
  32.110 -*}
  32.111 +\<close>
  32.112  apply (simp_all add: dvd_def)
  32.113 -txt {*
  32.114 +txt \<open>
  32.115  @{subgoals[display,indent=0]}
  32.116  The next command eliminates the existential quantifier from the assumption
  32.117  and replaces @{text n} by @{text "2 * k"}.
  32.118 -*}
  32.119 +\<close>
  32.120  apply clarify
  32.121 -txt {*
  32.122 +txt \<open>
  32.123  @{subgoals[display,indent=0]}
  32.124  To conclude, we tell Isabelle that the desired value is
  32.125  @{term "Suc k"}.  With this hint, the subgoal falls to @{text simp}.
  32.126 -*}
  32.127 +\<close>
  32.128  apply (rule_tac x = "Suc k" in exI, simp)
  32.129  (*<*)done(*>*)
  32.130  
  32.131 -text {*
  32.132 +text \<open>
  32.133  Combining the previous two results yields our objective, the
  32.134  equivalence relating @{term even} and @{text dvd}. 
  32.135  %
  32.136  %we don't want [iff]: discuss?
  32.137 -*}
  32.138 +\<close>
  32.139  
  32.140  theorem even_iff_dvd: "(n \<in> even) = (2 dvd n)"
  32.141  by (blast intro: dvd_imp_even even_imp_dvd)
  32.142  
  32.143  
  32.144 -subsection{* Generalization and Rule Induction \label{sec:gen-rule-induction} *}
  32.145 +subsection\<open>Generalization and Rule Induction \label{sec:gen-rule-induction}\<close>
  32.146  
  32.147 -text {*
  32.148 +text \<open>
  32.149  \index{generalizing for induction}%
  32.150  Before applying induction, we typically must generalize
  32.151  the induction formula.  With rule induction, the required generalization
  32.152  can be hard to find and sometimes requires a complete reformulation of the
  32.153  problem.  In this  example, our first attempt uses the obvious statement of
  32.154  the result.  It fails:
  32.155 -*}
  32.156 +\<close>
  32.157  
  32.158  lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
  32.159  apply (erule even.induct)
  32.160 @@ -160,7 +160,7 @@
  32.161  lemma "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
  32.162  apply (erule even.induct)
  32.163  (*>*)
  32.164 -txt {*
  32.165 +txt \<open>
  32.166  Rule induction finds no occurrences of @{term "Suc(Suc n)"} in the
  32.167  conclusion, which it therefore leaves unchanged.  (Look at
  32.168  @{text even.induct} to see why this happens.)  We have these subgoals:
  32.169 @@ -171,7 +171,7 @@
  32.170  in general is described in {\S}\ref{sec:ind-var-in-prems} below.
  32.171  In the current case the solution is easy because
  32.172  we have the necessary inverse, subtraction:
  32.173 -*}
  32.174 +\<close>
  32.175  (*<*)oops(*>*)
  32.176  lemma even_imp_even_minus_2: "n \<in> even \<Longrightarrow> n - 2 \<in> even"
  32.177  apply (erule even.induct)
  32.178 @@ -181,7 +181,7 @@
  32.179  lemma "n \<in>  even \<Longrightarrow> n - 2 \<in> even"
  32.180  apply (erule even.induct)
  32.181  (*>*)
  32.182 -txt {*
  32.183 +txt \<open>
  32.184  This lemma is trivially inductive.  Here are the subgoals:
  32.185  @{subgoals[display,indent=0]}
  32.186  The first is trivial because @{text "0 - 2"} simplifies to @{text 0}, which is
  32.187 @@ -191,24 +191,24 @@
  32.188  
  32.189  \medskip
  32.190  Using our lemma, we can easily prove the result we originally wanted:
  32.191 -*}
  32.192 +\<close>
  32.193  (*<*)oops(*>*)
  32.194  lemma Suc_Suc_even_imp_even: "Suc (Suc n) \<in> even \<Longrightarrow> n \<in> even"
  32.195  by (drule even_imp_even_minus_2, simp)
  32.196  
  32.197 -text {*
  32.198 +text \<open>
  32.199  We have just proved the converse of the introduction rule @{text even.step}.
  32.200  This suggests proving the following equivalence.  We give it the
  32.201  \attrdx{iff} attribute because of its obvious value for simplification.
  32.202 -*}
  32.203 +\<close>
  32.204  
  32.205  lemma [iff]: "((Suc (Suc n)) \<in> even) = (n \<in> even)"
  32.206  by (blast dest: Suc_Suc_even_imp_even)
  32.207  
  32.208  
  32.209 -subsection{* Rule Inversion \label{sec:rule-inversion} *}
  32.210 +subsection\<open>Rule Inversion \label{sec:rule-inversion}\<close>
  32.211  
  32.212 -text {*
  32.213 +text \<open>
  32.214  \index{rule inversion|(}%
  32.215  Case analysis on an inductive definition is called \textbf{rule
  32.216  inversion}.  It is frequently used in proofs about operational
  32.217 @@ -232,11 +232,11 @@
  32.218  @{term "Suc(Suc n)"} then the first case becomes irrelevant, while the second
  32.219  case tells us that @{term n} belongs to @{term even}.  Isabelle will generate
  32.220  this instance for us:
  32.221 -*}
  32.222 +\<close>
  32.223  
  32.224  inductive_cases Suc_Suc_cases [elim!]: "Suc(Suc n) \<in> even"
  32.225  
  32.226 -text {*
  32.227 +text \<open>
  32.228  The \commdx{inductive\protect\_cases} command generates an instance of
  32.229  the @{text cases} rule for the supplied pattern and gives it the supplied name:
  32.230  @{named_thms [display,indent=0] Suc_Suc_cases [no_vars] (Suc_Suc_cases)}
  32.231 @@ -265,13 +265,13 @@
  32.232  
  32.233  For one-off applications of rule inversion, use the \methdx{ind_cases} method. 
  32.234  Here is an example:
  32.235 -*}
  32.236 +\<close>
  32.237  
  32.238  (*<*)lemma "Suc(Suc n) \<in> even \<Longrightarrow> P"(*>*)
  32.239  apply (ind_cases "Suc(Suc n) \<in> even")
  32.240  (*<*)oops(*>*)
  32.241  
  32.242 -text {*
  32.243 +text \<open>
  32.244  The specified instance of the @{text cases} rule is generated, then applied
  32.245  as an elimination rule.
  32.246  
  32.247 @@ -285,6 +285,6 @@
  32.248  used.  Later examples will show that they are actually worth using.%
  32.249  \index{rule inversion|)}%
  32.250  \index{even numbers!defining inductively|)}
  32.251 -*}
  32.252 +\<close>
  32.253  
  32.254  (*<*)end(*>*)
    33.1 --- a/src/Doc/Tutorial/Inductive/Mutual.thy	Thu Jan 11 13:48:17 2018 +0100
    33.2 +++ b/src/Doc/Tutorial/Inductive/Mutual.thy	Fri Jan 12 14:08:53 2018 +0100
    33.3 @@ -1,12 +1,12 @@
    33.4  (*<*)theory Mutual imports Main begin(*>*)
    33.5  
    33.6 -subsection{*Mutually Inductive Definitions*}
    33.7 +subsection\<open>Mutually Inductive Definitions\<close>
    33.8  
    33.9 -text{*
   33.10 +text\<open>
   33.11  Just as there are datatypes defined by mutual recursion, there are sets defined
   33.12  by mutual induction. As a trivial example we consider the even and odd
   33.13  natural numbers:
   33.14 -*}
   33.15 +\<close>
   33.16  
   33.17  inductive_set
   33.18    Even :: "nat set" and
   33.19 @@ -16,7 +16,7 @@
   33.20  | EvenI: "n \<in> Odd \<Longrightarrow> Suc n \<in> Even"
   33.21  | OddI:  "n \<in> Even \<Longrightarrow> Suc n \<in> Odd"
   33.22  
   33.23 -text{*\noindent
   33.24 +text\<open>\noindent
   33.25  The mutually inductive definition of multiple sets is no different from
   33.26  that of a single set, except for induction: just as for mutually recursive
   33.27  datatypes, induction needs to involve all the simultaneously defined sets. In
   33.28 @@ -26,25 +26,25 @@
   33.29  
   33.30  If we want to prove that all even numbers are divisible by two, we have to
   33.31  generalize the statement as follows:
   33.32 -*}
   33.33 +\<close>
   33.34  
   33.35  lemma "(m \<in> Even \<longrightarrow> 2 dvd m) \<and> (n \<in> Odd \<longrightarrow> 2 dvd (Suc n))"
   33.36  
   33.37 -txt{*\noindent
   33.38 +txt\<open>\noindent
   33.39  The proof is by rule induction. Because of the form of the induction theorem,
   33.40  it is applied by @{text rule} rather than @{text erule} as for ordinary
   33.41  inductive definitions:
   33.42 -*}
   33.43 +\<close>
   33.44  
   33.45  apply(rule Even_Odd.induct)
   33.46  
   33.47 -txt{*
   33.48 +txt\<open>
   33.49  @{subgoals[display,indent=0]}
   33.50  The first two subgoals are proved by simplification and the final one can be
   33.51  proved in the same manner as in \S\ref{sec:rule-induction}
   33.52  where the same subgoal was encountered before.
   33.53  We do not show the proof script.
   33.54 -*}
   33.55 +\<close>
   33.56  (*<*)
   33.57    apply simp
   33.58   apply simp
   33.59 @@ -55,17 +55,17 @@
   33.60  done
   33.61  (*>*)
   33.62  
   33.63 -subsection{*Inductively Defined Predicates\label{sec:ind-predicates}*}
   33.64 +subsection\<open>Inductively Defined Predicates\label{sec:ind-predicates}\<close>
   33.65  
   33.66 -text{*\index{inductive predicates|(}
   33.67 +text\<open>\index{inductive predicates|(}
   33.68  Instead of a set of even numbers one can also define a predicate on @{typ nat}:
   33.69 -*}
   33.70 +\<close>
   33.71  
   33.72  inductive evn :: "nat \<Rightarrow> bool" where
   33.73  zero: "evn 0" |
   33.74  step: "evn n \<Longrightarrow> evn(Suc(Suc n))"
   33.75  
   33.76 -text{*\noindent Everything works as before, except that
   33.77 +text\<open>\noindent Everything works as before, except that
   33.78  you write \commdx{inductive} instead of \isacommand{inductive\_set} and
   33.79  @{prop"evn n"} instead of @{prop"n : Even"}.
   33.80  When defining an n-ary relation as a predicate, it is recommended to curry
   33.81 @@ -75,6 +75,6 @@
   33.82  
   33.83  When should you choose sets and when predicates? If you intend to combine your notion with set theoretic notation, define it as an inductive set. If not, define it as an inductive predicate, thus avoiding the @{text"\<in>"} notation. But note that predicates of more than one argument cannot be combined with the usual set theoretic operators: @{term"P \<union> Q"} is not well-typed if @{text"P, Q :: \<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> bool"}, you have to write @{term"%x y. P x y & Q x y"} instead.
   33.84  \index{inductive predicates|)}
   33.85 -*}
   33.86 +\<close>
   33.87  
   33.88  (*<*)end(*>*)
    34.1 --- a/src/Doc/Tutorial/Inductive/Star.thy	Thu Jan 11 13:48:17 2018 +0100
    34.2 +++ b/src/Doc/Tutorial/Inductive/Star.thy	Fri Jan 12 14:08:53 2018 +0100
    34.3 @@ -1,8 +1,8 @@
    34.4  (*<*)theory Star imports Main begin(*>*)
    34.5  
    34.6 -section{*The Reflexive Transitive Closure*}
    34.7 +section\<open>The Reflexive Transitive Closure\<close>
    34.8  
    34.9 -text{*\label{sec:rtc}
   34.10 +text\<open>\label{sec:rtc}
   34.11  \index{reflexive transitive closure!defining inductively|(}%
   34.12  An inductive definition may accept parameters, so it can express 
   34.13  functions that yield sets.
   34.14 @@ -12,7 +12,7 @@
   34.15  introduced in \S\ref{sec:Relations}, where the operator @{text"\<^sup>*"} was
   34.16  defined as a least fixed point because inductive definitions were not yet
   34.17  available. But now they are:
   34.18 -*}
   34.19 +\<close>
   34.20  
   34.21  inductive_set
   34.22    rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"   ("_*" [1000] 999)
   34.23 @@ -21,7 +21,7 @@
   34.24    rtc_refl[iff]:  "(x,x) \<in> r*"
   34.25  | rtc_step:       "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
   34.26  
   34.27 -text{*\noindent
   34.28 +text\<open>\noindent
   34.29  The function @{term rtc} is annotated with concrete syntax: instead of
   34.30  @{text"rtc r"} we can write @{term"r*"}. The actual definition
   34.31  consists of two rules. Reflexivity is obvious and is immediately given the
   34.32 @@ -36,12 +36,12 @@
   34.33  for a start, it does not even mention transitivity.
   34.34  The rest of this section is devoted to proving that it is equivalent to
   34.35  the standard definition. We start with a simple lemma:
   34.36 -*}
   34.37 +\<close>
   34.38  
   34.39  lemma [intro]: "(x,y) \<in> r \<Longrightarrow> (x,y) \<in> r*"
   34.40  by(blast intro: rtc_step)
   34.41  
   34.42 -text{*\noindent
   34.43 +text\<open>\noindent
   34.44  Although the lemma itself is an unremarkable consequence of the basic rules,
   34.45  it has the advantage that it can be declared an introduction rule without the
   34.46  danger of killing the automatic tactics because @{term"r*"} occurs only in
   34.47 @@ -61,12 +61,12 @@
   34.48  expects a premise of the form $(x@1,\dots,x@n) \in R$.
   34.49  
   34.50  Now we turn to the inductive proof of transitivity:
   34.51 -*}
   34.52 +\<close>
   34.53  
   34.54  lemma rtc_trans: "\<lbrakk> (x,y) \<in> r*; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
   34.55  apply(erule rtc.induct)
   34.56  
   34.57 -txt{*\noindent
   34.58 +txt\<open>\noindent
   34.59  Unfortunately, even the base case is a problem:
   34.60  @{subgoals[display,indent=0,goals_limit=1]}
   34.61  We have to abandon this proof attempt.
   34.62 @@ -85,12 +85,12 @@
   34.63  goal, of the pair @{term"(x,y)"} only @{term x} appears also in the
   34.64  conclusion, but not @{term y}. Thus our induction statement is too
   34.65  general. Fortunately, it can easily be specialized:
   34.66 -transfer the additional premise @{prop"(y,z):r*"} into the conclusion:*}
   34.67 +transfer the additional premise @{prop"(y,z):r*"} into the conclusion:\<close>
   34.68  (*<*)oops(*>*)
   34.69  lemma rtc_trans[rule_format]:
   34.70    "(x,y) \<in> r* \<Longrightarrow> (y,z) \<in> r* \<longrightarrow> (x,z) \<in> r*"
   34.71  
   34.72 -txt{*\noindent
   34.73 +txt\<open>\noindent
   34.74  This is not an obscure trick but a generally applicable heuristic:
   34.75  \begin{quote}\em
   34.76  When proving a statement by rule induction on $(x@1,\dots,x@n) \in R$,
   34.77 @@ -101,24 +101,24 @@
   34.78  \S\ref{sec:ind-var-in-prems}. The @{text rule_format} directive turns
   34.79  @{text"\<longrightarrow>"} back into @{text"\<Longrightarrow>"}: in the end we obtain the original
   34.80  statement of our lemma.
   34.81 -*}
   34.82 +\<close>
   34.83  
   34.84  apply(erule rtc.induct)
   34.85  
   34.86 -txt{*\noindent
   34.87 +txt\<open>\noindent
   34.88  Now induction produces two subgoals which are both proved automatically:
   34.89  @{subgoals[display,indent=0]}
   34.90 -*}
   34.91 +\<close>
   34.92  
   34.93   apply(blast)
   34.94  apply(blast intro: rtc_step)
   34.95  done
   34.96  
   34.97 -text{*
   34.98 +text\<open>
   34.99  Let us now prove that @{term"r*"} is really the reflexive transitive closure
  34.100  of @{term r}, i.e.\ the least reflexive and transitive
  34.101  relation containing @{term r}. The latter is easily formalized
  34.102 -*}
  34.103 +\<close>
  34.104  
  34.105  inductive_set
  34.106    rtc2 :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set"
  34.107 @@ -128,10 +128,10 @@
  34.108  | "(x,x) \<in> rtc2 r"
  34.109  | "\<lbrakk> (x,y) \<in> rtc2 r; (y,z) \<in> rtc2 r \<rbrakk> \<Longrightarrow> (x,z) \<in> rtc2 r"
  34.110  
  34.111 -text{*\noindent
  34.112 +text\<open>\noindent
  34.113  and the equivalence of the two definitions is easily shown by the obvious rule
  34.114  inductions:
  34.115 -*}
  34.116 +\<close>
  34.117  
  34.118  lemma "(x,y) \<in> rtc2 r \<Longrightarrow> (x,y) \<in> r*"
  34.119  apply(erule rtc2.induct)
  34.120 @@ -146,7 +146,7 @@
  34.121  apply(blast intro: rtc2.intros)
  34.122  done
  34.123  
  34.124 -text{*
  34.125 +text\<open>
  34.126  So why did we start with the first definition? Because it is simpler. It
  34.127  contains only two rules, and the single step rule is simpler than
  34.128  transitivity.  As a consequence, @{thm[source]rtc.induct} is simpler than
  34.129 @@ -164,7 +164,7 @@
  34.130  @{term rtc} where @{thm[source]rtc_step} is replaced by its converse as shown
  34.131  in exercise~\ref{ex:converse-rtc-step}.
  34.132  \end{exercise}
  34.133 -*}
  34.134 +\<close>
  34.135  (*<*)
  34.136  lemma rtc_step2[rule_format]: "(x,y) : r* \<Longrightarrow> (y,z) : r --> (x,z) : r*"
  34.137  apply(erule rtc.induct)
    35.1 --- a/src/Doc/Tutorial/Misc/AdvancedInd.thy	Thu Jan 11 13:48:17 2018 +0100
    35.2 +++ b/src/Doc/Tutorial/Misc/AdvancedInd.thy	Fri Jan 12 14:08:53 2018 +0100
    35.3 @@ -2,29 +2,29 @@
    35.4  theory AdvancedInd imports Main begin
    35.5  (*>*)
    35.6  
    35.7 -text{*\noindent
    35.8 +text\<open>\noindent
    35.9  Now that we have learned about rules and logic, we take another look at the
   35.10  finer points of induction.  We consider two questions: what to do if the
   35.11  proposition to be proved is not directly amenable to induction
   35.12  (\S\ref{sec:ind-var-in-prems}), and how to utilize (\S\ref{sec:complete-ind})
   35.13  and even derive (\S\ref{sec:derive-ind}) new induction schemas. We conclude
   35.14  with an extended example of induction (\S\ref{sec:CTL-revisited}).
   35.15 -*}
   35.16 +\<close>
   35.17  
   35.18 -subsection{*Massaging the Proposition*}
   35.19 +subsection\<open>Massaging the Proposition\<close>
   35.20  
   35.21 -text{*\label{sec:ind-var-in-prems}
   35.22 +text\<open>\label{sec:ind-var-in-prems}
   35.23  Often we have assumed that the theorem to be proved is already in a form
   35.24  that is amenable to induction, but sometimes it isn't.
   35.25  Here is an example.
   35.26  Since @{term"hd"} and @{term"last"} return the first and last element of a
   35.27  non-empty list, this lemma looks easy to prove:
   35.28 -*}
   35.29 +\<close>
   35.30  
   35.31  lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) = last xs"
   35.32  apply(induct_tac xs)
   35.33  
   35.34 -txt{*\noindent
   35.35 +txt\<open>\noindent
   35.36  But induction produces the warning
   35.37  \begin{quote}\tt
   35.38  Induction variable occurs also among premises!
   35.39 @@ -51,14 +51,14 @@
   35.40  implication~(@{text"\<longrightarrow>"}), letting
   35.41  \attrdx{rule_format} (\S\ref{sec:forward}) convert the
   35.42  result to the usual @{text"\<Longrightarrow>"} form:
   35.43 -*}
   35.44 +\<close>
   35.45  (*<*)oops(*>*)
   35.46  lemma hd_rev [rule_format]: "xs \<noteq> [] \<longrightarrow> hd(rev xs) = last xs"
   35.47  (*<*)
   35.48  apply(induct_tac xs)
   35.49  (*>*)
   35.50  
   35.51 -txt{*\noindent
   35.52 +txt\<open>\noindent
   35.53  This time, induction leaves us with a trivial base case:
   35.54  @{subgoals[display,indent=0,goals_limit=1]}
   35.55  And @{text"auto"} completes the proof.
   35.56 @@ -109,12 +109,12 @@
   35.57  Unfortunately, this induction schema cannot be expressed as a
   35.58  single theorem because it depends on the number of free variables in $t$ ---
   35.59  the notation $\overline{y}$ is merely an informal device.
   35.60 -*}
   35.61 +\<close>
   35.62  (*<*)by auto(*>*)
   35.63  
   35.64 -subsection{*Beyond Structural and Recursion Induction*}
   35.65 +subsection\<open>Beyond Structural and Recursion Induction\<close>
   35.66  
   35.67 -text{*\label{sec:complete-ind}
   35.68 +text\<open>\label{sec:complete-ind}
   35.69  So far, inductive proofs were by structural induction for
   35.70  primitive recursive functions and recursion induction for total recursive
   35.71  functions. But sometimes structural induction is awkward and there is no
   35.72 @@ -130,12 +130,12 @@
   35.73  @{thm[display]"nat_less_induct"[no_vars]}
   35.74  As an application, we prove a property of the following
   35.75  function:
   35.76 -*}
   35.77 +\<close>
   35.78  
   35.79  axiomatization f :: "nat \<Rightarrow> nat"
   35.80    where f_ax: "f(f(n)) < f(Suc(n))" for n :: nat
   35.81  
   35.82 -text{*
   35.83 +text\<open>
   35.84  \begin{warn}
   35.85  We discourage the use of axioms because of the danger of
   35.86  inconsistencies.  Axiom @{text f_ax} does
   35.87 @@ -148,35 +148,35 @@
   35.88  The axiom for @{term"f"} implies @{prop"n <= f n"}, which can
   35.89  be proved by induction on \mbox{@{term"f n"}}. Following the recipe outlined
   35.90  above, we have to phrase the proposition as follows to allow induction:
   35.91 -*}
   35.92 +\<close>
   35.93  
   35.94  lemma f_incr_lem: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
   35.95  
   35.96 -txt{*\noindent
   35.97 +txt\<open>\noindent
   35.98  To perform induction on @{term k} using @{thm[source]nat_less_induct}, we use
   35.99  the same general induction method as for recursion induction (see
  35.100  \S\ref{sec:fun-induction}):
  35.101 -*}
  35.102 +\<close>
  35.103  
  35.104  apply(induct_tac k rule: nat_less_induct)
  35.105  
  35.106 -txt{*\noindent
  35.107 +txt\<open>\noindent
  35.108  We get the following proof state:
  35.109  @{subgoals[display,indent=0,margin=65]}
  35.110  After stripping the @{text"\<forall>i"}, the proof continues with a case
  35.111  distinction on @{term"i"}. The case @{prop"i = (0::nat)"} is trivial and we focus on
  35.112  the other case:
  35.113 -*}
  35.114 +\<close>
  35.115  
  35.116  apply(rule allI)
  35.117  apply(case_tac i)
  35.118   apply(simp)
  35.119 -txt{*
  35.120 +txt\<open>
  35.121  @{subgoals[display,indent=0]}
  35.122 -*}
  35.123 +\<close>
  35.124  by(blast intro!: f_ax Suc_leI intro: le_less_trans)
  35.125  
  35.126 -text{*\noindent
  35.127 +text\<open>\noindent
  35.128  If you find the last step puzzling, here are the two lemmas it employs:
  35.129  \begin{isabelle}
  35.130  @{thm Suc_leI[no_vars]}
  35.131 @@ -203,19 +203,19 @@
  35.132  proofs are easy to write but hard to read and understand.
  35.133  
  35.134  The desired result, @{prop"i <= f i"}, follows from @{thm[source]f_incr_lem}:
  35.135 -*}
  35.136 +\<close>
  35.137  
  35.138  lemmas f_incr = f_incr_lem[rule_format, OF refl]
  35.139  
  35.140 -text{*\noindent
  35.141 +text\<open>\noindent
  35.142  The final @{thm[source]refl} gets rid of the premise @{text"?k = f ?i"}. 
  35.143  We could have included this derivation in the original statement of the lemma:
  35.144 -*}
  35.145 +\<close>
  35.146  
  35.147  lemma f_incr[rule_format, OF refl]: "\<forall>i. k = f i \<longrightarrow> i \<le> f i"
  35.148  (*<*)oops(*>*)
  35.149  
  35.150 -text{*
  35.151 +text\<open>
  35.152  \begin{exercise}
  35.153  From the axiom and lemma for @{term"f"}, show that @{term"f"} is the
  35.154  identity function.
  35.155 @@ -235,32 +235,32 @@
  35.156  which is a special case of @{thm[source]measure_induct}
  35.157  @{thm[display]measure_induct[no_vars]}
  35.158  where @{term f} may be any function into type @{typ nat}.
  35.159 -*}
  35.160 +\<close>
  35.161  
  35.162 -subsection{*Derivation of New Induction Schemas*}
  35.163 +subsection\<open>Derivation of New Induction Schemas\<close>
  35.164  
  35.165 -text{*\label{sec:derive-ind}
  35.166 +text\<open>\label{sec:derive-ind}
  35.167  \index{induction!deriving new schemas}%
  35.168  Induction schemas are ordinary theorems and you can derive new ones
  35.169  whenever you wish.  This section shows you how, using the example
  35.170  of @{thm[source]nat_less_induct}. Assume we only have structural induction
  35.171  available for @{typ"nat"} and want to derive complete induction.  We
  35.172  must generalize the statement as shown:
  35.173 -*}
  35.174 +\<close>
  35.175  
  35.176  lemma induct_lem: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> \<forall>m<n. P m"
  35.177  apply(induct_tac n)
  35.178  
  35.179 -txt{*\noindent
  35.180 +txt\<open>\noindent
  35.181  The base case is vacuously true. For the induction step (@{prop"m <
  35.182  Suc n"}) we distinguish two cases: case @{prop"m < n"} is true by induction
  35.183  hypothesis and case @{prop"m = n"} follows from the assumption, again using
  35.184  the induction hypothesis:
  35.185 -*}
  35.186 +\<close>
  35.187   apply(blast)
  35.188  by(blast elim: less_SucE)
  35.189  
  35.190 -text{*\noindent
  35.191 +text\<open>\noindent
  35.192  The elimination rule @{thm[source]less_SucE} expresses the case distinction:
  35.193  @{thm[display]"less_SucE"[no_vars]}
  35.194  
  35.195 @@ -270,16 +270,16 @@
  35.196  and remove the trivial condition @{prop"n < Suc n"}. Fortunately, this
  35.197  happens automatically when we add the lemma as a new premise to the
  35.198  desired goal:
  35.199 -*}
  35.200 +\<close>
  35.201  
  35.202  theorem nat_less_induct: "(\<And>n::nat. \<forall>m<n. P m \<Longrightarrow> P n) \<Longrightarrow> P n"
  35.203  by(insert induct_lem, blast)
  35.204  
  35.205 -text{*
  35.206 +text\<open>
  35.207  HOL already provides the mother of
  35.208  all inductions, well-founded induction (see \S\ref{sec:Well-founded}).  For
  35.209  example theorem @{thm[source]nat_less_induct} is
  35.210  a special case of @{thm[source]wf_induct} where @{term r} is @{text"<"} on
  35.211  @{typ nat}. The details can be found in theory \isa{Wellfounded_Recursion}.
  35.212 -*}
  35.213 +\<close>
  35.214  (*<*)end(*>*)
    36.1 --- a/src/Doc/Tutorial/Misc/Itrev.thy	Thu Jan 11 13:48:17 2018 +0100
    36.2 +++ b/src/Doc/Tutorial/Misc/Itrev.thy	Fri Jan 12 14:08:53 2018 +0100
    36.3 @@ -5,9 +5,9 @@
    36.4  declare [[names_unique = false]]
    36.5  (*>*)
    36.6  
    36.7 -section{*Induction Heuristics*}
    36.8 +section\<open>Induction Heuristics\<close>
    36.9  
   36.10 -text{*\label{sec:InductionHeuristics}
   36.11 +text\<open>\label{sec:InductionHeuristics}
   36.12  \index{induction heuristics|(}%
   36.13  The purpose of this section is to illustrate some simple heuristics for
   36.14  inductive proofs. The first one we have already mentioned in our initial
   36.15 @@ -43,13 +43,13 @@
   36.16  @{text"@"} is linear in its first argument.  A linear time version of
   36.17  @{term"rev"} reqires an extra argument where the result is accumulated
   36.18  gradually, using only~@{text"#"}:
   36.19 -*}
   36.20 +\<close>
   36.21  
   36.22  primrec itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
   36.23  "itrev []     ys = ys" |
   36.24  "itrev (x#xs) ys = itrev xs (x#ys)"
   36.25  
   36.26 -text{*\noindent
   36.27 +text\<open>\noindent
   36.28  The behaviour of \cdx{itrev} is simple: it reverses
   36.29  its first argument by stacking its elements onto the second argument,
   36.30  and returning that second argument when the first one becomes
   36.31 @@ -58,17 +58,17 @@
   36.32  
   36.33  Naturally, we would like to show that @{term"itrev"} does indeed reverse
   36.34  its first argument provided the second one is empty:
   36.35 -*}
   36.36 +\<close>
   36.37  
   36.38  lemma "itrev xs [] = rev xs"
   36.39  
   36.40 -txt{*\noindent
   36.41 +txt\<open>\noindent
   36.42  There is no choice as to the induction variable, and we immediately simplify:
   36.43 -*}
   36.44 +\<close>
   36.45  
   36.46  apply(induct_tac xs, simp_all)
   36.47  
   36.48 -txt{*\noindent
   36.49 +txt\<open>\noindent
   36.50  Unfortunately, this attempt does not prove
   36.51  the induction step:
   36.52  @{subgoals[display,indent=0,margin=70]}
   36.53 @@ -80,11 +80,11 @@
   36.54  \end{quote}
   36.55  Of course one cannot do this na\"{\i}vely: @{term"itrev xs ys = rev xs"} is
   36.56  just not true.  The correct generalization is
   36.57 -*}
   36.58 +\<close>
   36.59  (*<*)oops(*>*)
   36.60  lemma "itrev xs ys = rev xs @ ys"
   36.61  (*<*)apply(induct_tac xs, simp_all)(*>*)
   36.62 -txt{*\noindent
   36.63 +txt\<open>\noindent
   36.64  If @{term"ys"} is replaced by @{term"[]"}, the right-hand side simplifies to
   36.65  @{term"rev xs"}, as required.
   36.66  
   36.67 @@ -100,14 +100,14 @@
   36.68  the subgoal, but the induction hypothesis needs to be applied with
   36.69  @{term"a # ys"} instead of @{term"ys"}. Hence we prove the theorem
   36.70  for all @{term"ys"} instead of a fixed one:
   36.71 -*}
   36.72 +\<close>
   36.73  (*<*)oops(*>*)
   36.74  lemma "\<forall>ys. itrev xs ys = rev xs @ ys"
   36.75  (*<*)
   36.76  by(induct_tac xs, simp_all)
   36.77  (*>*)
   36.78  
   36.79 -text{*\noindent
   36.80 +text\<open>\noindent
   36.81  This time induction on @{term"xs"} followed by simplification succeeds. This
   36.82  leads to another heuristic for generalization:
   36.83  \begin{quote}
   36.84 @@ -139,7 +139,7 @@
   36.85  Additionally, you can read \S\ref{sec:advanced-ind}
   36.86  to learn about some advanced techniques for inductive proofs.%
   36.87  \index{induction heuristics|)}
   36.88 -*}
   36.89 +\<close>
   36.90  (*<*)
   36.91  declare [[names_unique = true]]
   36.92  end
    37.1 --- a/src/Doc/Tutorial/Misc/Option2.thy	Thu Jan 11 13:48:17 2018 +0100
    37.2 +++ b/src/Doc/Tutorial/Misc/Option2.thy	Fri Jan 12 14:08:53 2018 +0100
    37.3 @@ -4,14 +4,14 @@
    37.4  hide_type option
    37.5  (*>*)
    37.6  
    37.7 -text{*\indexbold{*option (type)}\indexbold{*None (constant)}%
    37.8 +text\<open>\indexbold{*option (type)}\indexbold{*None (constant)}%
    37.9  \indexbold{*Some (constant)}
   37.10  Our final datatype is very simple but still eminently useful:
   37.11 -*}
   37.12 +\<close>
   37.13  
   37.14  datatype 'a option = None | Some 'a
   37.15  
   37.16 -text{*\noindent
   37.17 +text\<open>\noindent
   37.18  Frequently one needs to add a distinguished element to some existing type.
   37.19  For example, type @{text"t option"} can model the result of a computation that
   37.20  may either terminate with an error (represented by @{const None}) or return
   37.21 @@ -21,7 +21,7 @@
   37.22  customized constructors like @{term Error} and @{term Infinity},
   37.23  but it is often simpler to use @{text option}. For an application see
   37.24  \S\ref{sec:Trie}.
   37.25 -*}
   37.26 +\<close>
   37.27  (*<*)
   37.28  (*
   37.29  definition infplus :: "nat option \<Rightarrow> nat option \<Rightarrow> nat option" where
    38.1 --- a/src/Doc/Tutorial/Misc/Plus.thy	Thu Jan 11 13:48:17 2018 +0100
    38.2 +++ b/src/Doc/Tutorial/Misc/Plus.thy	Fri Jan 12 14:08:53 2018 +0100
    38.3 @@ -2,13 +2,13 @@
    38.4  theory Plus imports Main begin
    38.5  (*>*)
    38.6  
    38.7 -text{*\noindent Define the following addition function *}
    38.8 +text\<open>\noindent Define the following addition function\<close>
    38.9  
   38.10  primrec add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
   38.11  "add m 0 = m" |
   38.12  "add m (Suc n) = add (Suc m) n"
   38.13  
   38.14 -text{*\noindent and prove*}
   38.15 +text\<open>\noindent and prove\<close>
   38.16  (*<*)
   38.17  lemma [simp]: "!m. add m n = m+n"
   38.18  apply(induct_tac n)
    39.1 --- a/src/Doc/Tutorial/Misc/Tree.thy	Thu Jan 11 13:48:17 2018 +0100
    39.2 +++ b/src/Doc/Tutorial/Misc/Tree.thy	Fri Jan 12 14:08:53 2018 +0100
    39.3 @@ -2,9 +2,9 @@
    39.4  theory Tree imports Main begin
    39.5  (*>*)
    39.6  
    39.7 -text{*\noindent
    39.8 +text\<open>\noindent
    39.9  Define the datatype of \rmindex{binary trees}:
   39.10 -*}
   39.11 +\<close>
   39.12  
   39.13  datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"(*<*)
   39.14  
   39.15 @@ -12,10 +12,10 @@
   39.16  "mirror Tip = Tip" |
   39.17  "mirror (Node l x r) = Node (mirror r) x (mirror l)"(*>*)
   39.18  
   39.19 -text{*\noindent
   39.20 +text\<open>\noindent
   39.21  Define a function @{term"mirror"} that mirrors a binary tree
   39.22  by swapping subtrees recursively. Prove
   39.23 -*}
   39.24 +\<close>
   39.25  
   39.26  lemma mirror_mirror: "mirror(mirror t) = t"
   39.27  (*<*)
   39.28 @@ -27,10 +27,10 @@
   39.29  "flatten (Node l x r) = flatten l @ [x] @ flatten r"
   39.30  (*>*)
   39.31  
   39.32 -text{*\noindent
   39.33 +text\<open>\noindent
   39.34  Define a function @{term"flatten"} that flattens a tree into a list
   39.35  by traversing it in infix order. Prove
   39.36 -*}
   39.37 +\<close>
   39.38  
   39.39  lemma "flatten(mirror t) = rev(flatten t)"
   39.40  (*<*)
    40.1 --- a/src/Doc/Tutorial/Misc/Tree2.thy	Thu Jan 11 13:48:17 2018 +0100
    40.2 +++ b/src/Doc/Tutorial/Misc/Tree2.thy	Fri Jan 12 14:08:53 2018 +0100
    40.3 @@ -2,17 +2,17 @@
    40.4  theory Tree2 imports Tree begin
    40.5  (*>*)
    40.6  
    40.7 -text{*\noindent In Exercise~\ref{ex:Tree} we defined a function
    40.8 +text\<open>\noindent In Exercise~\ref{ex:Tree} we defined a function
    40.9  @{term"flatten"} from trees to lists. The straightforward version of
   40.10  @{term"flatten"} is based on @{text"@"} and is thus, like @{term"rev"},
   40.11  quadratic. A linear time version of @{term"flatten"} again reqires an extra
   40.12 -argument, the accumulator. Define *}
   40.13 +argument, the accumulator. Define\<close>
   40.14  (*<*)primrec(*>*)flatten2 :: "'a tree \<Rightarrow> 'a list \<Rightarrow> 'a list"(*<*)where
   40.15  "flatten2 Tip xs = xs" |
   40.16  "flatten2 (Node l x r) xs = flatten2 l (x#(flatten2 r xs))"
   40.17  (*>*)
   40.18  
   40.19 -text{*\noindent and prove*}
   40.20 +text\<open>\noindent and prove\<close>
   40.21  (*<*)
   40.22  lemma [simp]: "!xs. flatten2 t xs = flatten t @ xs"
   40.23  apply(induct_tac t)
    41.1 --- a/src/Doc/Tutorial/Misc/appendix.thy	Thu Jan 11 13:48:17 2018 +0100
    41.2 +++ b/src/Doc/Tutorial/Misc/appendix.thy	Fri Jan 12 14:08:53 2018 +0100
    41.3 @@ -2,7 +2,7 @@
    41.4  imports Main
    41.5  begin(*>*)
    41.6  
    41.7 -text{*
    41.8 +text\<open>
    41.9  \begin{table}[htbp]
   41.10  \begin{center}
   41.11  \begin{tabular}{lll}
   41.12 @@ -28,6 +28,6 @@
   41.13  \label{tab:overloading}
   41.14  \end{center}
   41.15  \end{table}
   41.16 -*}
   41.17 +\<close>
   41.18  
   41.19  (*<*)end(*>*)
    42.1 --- a/src/Doc/Tutorial/Misc/case_exprs.thy	Thu Jan 11 13:48:17 2018 +0100
    42.2 +++ b/src/Doc/Tutorial/Misc/case_exprs.thy	Fri Jan 12 14:08:53 2018 +0100
    42.3 @@ -2,7 +2,7 @@
    42.4  theory case_exprs imports Main begin
    42.5  (*>*)
    42.6  
    42.7 -text{*
    42.8 +text\<open>
    42.9  \subsection{Case Expressions}
   42.10  \label{sec:case-expressions}\index{*case expressions}%
   42.11  HOL also features \isa{case}-expressions for analyzing
   42.12 @@ -50,20 +50,20 @@
   42.13  it works for any datatype.  In some cases, induction is overkill and a case
   42.14  distinction over all constructors of the datatype suffices.  This is performed
   42.15  by \methdx{case_tac}.  Here is a trivial example:
   42.16 -*}
   42.17 +\<close>
   42.18  
   42.19  lemma "(case xs of [] \<Rightarrow> [] | y#ys \<Rightarrow> xs) = xs"
   42.20  apply(case_tac xs)
   42.21  
   42.22 -txt{*\noindent
   42.23 +txt\<open>\noindent
   42.24  results in the proof state
   42.25  @{subgoals[display,indent=0,margin=65]}
   42.26  which is solved automatically:
   42.27 -*}
   42.28 +\<close>
   42.29  
   42.30  apply(auto)
   42.31  (*<*)done(*>*)
   42.32 -text{*
   42.33 +text\<open>
   42.34  Note that we do not need to give a lemma a name if we do not intend to refer
   42.35  to it explicitly in the future.
   42.36  Other basic laws about a datatype are applied automatically during
   42.37 @@ -81,7 +81,7 @@
   42.38    the @{term xs} as a new free variable distinct from the bound
   42.39    @{term xs} in the goal.
   42.40  \end{warn}
   42.41 -*}
   42.42 +\<close>
   42.43  
   42.44  (*<*)
   42.45  end
    43.1 --- a/src/Doc/Tutorial/Misc/fakenat.thy	Thu Jan 11 13:48:17 2018 +0100
    43.2 +++ b/src/Doc/Tutorial/Misc/fakenat.thy	Fri Jan 12 14:08:53 2018 +0100
    43.3 @@ -2,11 +2,11 @@
    43.4  theory fakenat imports Main begin
    43.5  (*>*)
    43.6  
    43.7 -text{*\noindent
    43.8 +text\<open>\noindent
    43.9  The type \tydx{nat} of natural
   43.10  numbers is predefined to have the constructors \cdx{0} and~\cdx{Suc}.
   43.11  It behaves approximately as if it were declared like this:
   43.12 -*}
   43.13 +\<close>
   43.14  
   43.15  datatype nat = zero ("0") | Suc nat
   43.16  (*<*)
    44.1 --- a/src/Doc/Tutorial/Misc/natsum.thy	Thu Jan 11 13:48:17 2018 +0100
    44.2 +++ b/src/Doc/Tutorial/Misc/natsum.thy	Fri Jan 12 14:08:53 2018 +0100
    44.3 @@ -1,26 +1,26 @@
    44.4  (*<*)
    44.5  theory natsum imports Main begin
    44.6  (*>*)
    44.7 -text{*\noindent
    44.8 +text\<open>\noindent
    44.9  In particular, there are @{text"case"}-expressions, for example
   44.10  @{term[display]"case n of 0 => 0 | Suc m => m"}
   44.11  primitive recursion, for example
   44.12 -*}
   44.13 +\<close>
   44.14  
   44.15  primrec sum :: "nat \<Rightarrow> nat" where
   44.16  "sum 0 = 0" |
   44.17  "sum (Suc n) = Suc n + sum n"
   44.18  
   44.19 -text{*\noindent
   44.20 +text\<open>\noindent
   44.21  and induction, for example
   44.22 -*}
   44.23 +\<close>
   44.24  
   44.25  lemma "sum n + sum n = n*(Suc n)"
   44.26  apply(induct_tac n)
   44.27  apply(auto)
   44.28  done
   44.29  
   44.30 -text{*\newcommand{\mystar}{*%
   44.31 +text\<open>\newcommand{\mystar}{*%
   44.32  }
   44.33  \index{arithmetic operations!for \protect\isa{nat}}%
   44.34  The arithmetic operations \isadxboldpos{+}{$HOL2arithfun},
   44.35 @@ -73,40 +73,40 @@
   44.36  Both @{text auto} and @{text simp}
   44.37  (a method introduced below, \S\ref{sec:Simplification}) prove 
   44.38  simple arithmetic goals automatically:
   44.39 -*}
   44.40 +\<close>
   44.41  
   44.42  lemma "\<lbrakk> \<not> m < n; m < n + (1::nat) \<rbrakk> \<Longrightarrow> m = n"
   44.43  (*<*)by(auto)(*>*)
   44.44  
   44.45 -text{*\noindent
   44.46 +text\<open>\noindent
   44.47  For efficiency's sake, this built-in prover ignores quantified formulae,
   44.48  many logical connectives, and all arithmetic operations apart from addition.
   44.49  In consequence, @{text auto} and @{text simp} cannot prove this slightly more complex goal:
   44.50 -*}
   44.51 +\<close>
   44.52  
   44.53  lemma "m \<noteq> (n::nat) \<Longrightarrow> m < n \<or> n < m"
   44.54  (*<*)by(arith)(*>*)
   44.55  
   44.56 -text{*\noindent The method \methdx{arith} is more general.  It attempts to
   44.57 +text\<open>\noindent The method \methdx{arith} is more general.  It attempts to
   44.58  prove the first subgoal provided it is a \textbf{linear arithmetic} formula.
   44.59  Such formulas may involve the usual logical connectives (@{text"\<not>"},
   44.60  @{text"\<and>"}, @{text"\<or>"}, @{text"\<longrightarrow>"}, @{text"="},
   44.61  @{text"\<forall>"}, @{text"\<exists>"}), the relations @{text"="},
   44.62  @{text"\<le>"} and @{text"<"}, and the operations @{text"+"}, @{text"-"},
   44.63 -@{term min} and @{term max}.  For example, *}
   44.64 +@{term min} and @{term max}.  For example,\<close>
   44.65  
   44.66  lemma "min i (max j (k*k)) = max (min (k*k) i) (min i (j::nat))"
   44.67  apply(arith)
   44.68  (*<*)done(*>*)
   44.69  
   44.70 -text{*\noindent
   44.71 +text\<open>\noindent
   44.72  succeeds because @{term"k*k"} can be treated as atomic. In contrast,
   44.73 -*}
   44.74 +\<close>
   44.75  
   44.76  lemma "n*n = n+1 \<Longrightarrow> n=0"
   44.77  (*<*)oops(*>*)
   44.78  
   44.79 -text{*\noindent
   44.80 +text\<open>\noindent
   44.81  is not proved by @{text arith} because the proof relies 
   44.82  on properties of multiplication. Only multiplication by numerals (which is
   44.83  the same as iterated addition) is taken into account.
   44.84 @@ -122,7 +122,7 @@
   44.85  If the formula involves quantifiers, @{text arith} may take
   44.86  super-exponential time and space.
   44.87  \end{warn}
   44.88 -*}
   44.89 +\<close>
   44.90  
   44.91  (*<*)
   44.92  end
    45.1 --- a/src/Doc/Tutorial/Misc/pairs2.thy	Thu Jan 11 13:48:17 2018 +0100
    45.2 +++ b/src/Doc/Tutorial/Misc/pairs2.thy	Fri Jan 12 14:08:53 2018 +0100
    45.3 @@ -1,7 +1,7 @@
    45.4  (*<*)
    45.5  theory pairs2 imports Main begin
    45.6  (*>*)
    45.7 -text{*\label{sec:pairs}\index{pairs and tuples}
    45.8 +text\<open>\label{sec:pairs}\index{pairs and tuples}
    45.9  HOL also has ordered pairs: \isa{($a@1$,$a@2$)} is of type $\tau@1$
   45.10  \indexboldpos{\isasymtimes}{$Isatype} $\tau@2$ provided each $a@i$ is of type
   45.11  $\tau@i$. The functions \cdx{fst} and
   45.12 @@ -29,7 +29,7 @@
   45.13  records are preferable.
   45.14  \end{itemize}
   45.15  For more information on pairs and records see Chapter~\ref{ch:more-types}.
   45.16 -*}
   45.17 +\<close>
   45.18  (*<*)
   45.19  end
   45.20  (*>*)
    46.1 --- a/src/Doc/Tutorial/Misc/prime_def.thy	Thu Jan 11 13:48:17 2018 +0100
    46.2 +++ b/src/Doc/Tutorial/Misc/prime_def.thy	Fri Jan 12 14:08:53 2018 +0100
    46.3 @@ -2,7 +2,7 @@
    46.4  theory prime_def imports Main begin
    46.5  consts prime :: "nat \<Rightarrow> bool"
    46.6  (*>*)
    46.7 -text{*
    46.8 +text\<open>
    46.9  \begin{warn}
   46.10  A common mistake when writing definitions is to introduce extra free
   46.11  variables on the right-hand side.  Consider the following, flawed definition
   46.12 @@ -14,7 +14,7 @@
   46.13  The correct version is
   46.14  @{term[display,quotes]"prime(p) == 1 < p & (!m. m dvd p --> (m=1 | m=p))"}
   46.15  \end{warn}
   46.16 -*}
   46.17 +\<close>
   46.18  (*<*)
   46.19  end
   46.20  (*>*)
    47.1 --- a/src/Doc/Tutorial/Misc/simp.thy	Thu Jan 11 13:48:17 2018 +0100
    47.2 +++ b/src/Doc/Tutorial/Misc/simp.thy	Fri Jan 12 14:08:53 2018 +0100
    47.3 @@ -2,9 +2,9 @@
    47.4  theory simp imports Main begin
    47.5  (*>*)
    47.6  
    47.7 -subsection{*Simplification Rules*}
    47.8 +subsection\<open>Simplification Rules\<close>
    47.9  
   47.10 -text{*\index{simplification rules}
   47.11 +text\<open>\index{simplification rules}
   47.12  To facilitate simplification,  
   47.13  the attribute @{text"[simp]"}\index{*simp (attribute)}
   47.14  declares theorems to be simplification rules, which the simplifier
   47.15 @@ -49,11 +49,11 @@
   47.16    different path) on $A$, it is not defined what the simplification attribute
   47.17    of that theorem will be in $C$: it could be either.
   47.18  \end{warn}
   47.19 -*} 
   47.20 +\<close> 
   47.21  
   47.22 -subsection{*The {\tt\slshape simp}  Method*}
   47.23 +subsection\<open>The {\tt\slshape simp}  Method\<close>
   47.24  
   47.25 -text{*\index{*simp (method)|bold}
   47.26 +text\<open>\index{*simp (method)|bold}
   47.27  The general format of the simplification method is
   47.28  \begin{quote}
   47.29  @{text simp} \textit{list of modifiers}
   47.30 @@ -65,11 +65,11 @@
   47.31  only the first subgoal and may thus need to be repeated --- use
   47.32  \methdx{simp_all} to simplify all subgoals.
   47.33  If nothing changes, @{text simp} fails.
   47.34 -*}
   47.35 +\<close>
   47.36  
   47.37 -subsection{*Adding and Deleting Simplification Rules*}
   47.38 +subsection\<open>Adding and Deleting Simplification Rules\<close>
   47.39  
   47.40 -text{*
   47.41 +text\<open>
   47.42  \index{simplification rules!adding and deleting}%
   47.43  If a certain theorem is merely needed in a few proofs by simplification,
   47.44  we do not need to make it a global simplification rule. Instead we can modify
   47.45 @@ -88,41 +88,41 @@
   47.46  \begin{quote}
   47.47  \isacommand{apply}@{text"(simp add: mod_mult_distrib add_mult_distrib)"}
   47.48  \end{quote}
   47.49 -*}
   47.50 +\<close>
   47.51  
   47.52 -subsection{*Assumptions*}
   47.53 +subsection\<open>Assumptions\<close>
   47.54  
   47.55 -text{*\index{simplification!with/of assumptions}
   47.56 +text\<open>\index{simplification!with/of assumptions}
   47.57  By default, assumptions are part of the simplification process: they are used
   47.58  as simplification rules and are simplified themselves. For example:
   47.59 -*}
   47.60 +\<close>
   47.61  
   47.62  lemma "\<lbrakk> xs @ zs = ys @ xs; [] @ xs = [] @ [] \<rbrakk> \<Longrightarrow> ys = zs"
   47.63  apply simp
   47.64  done
   47.65  
   47.66 -text{*\noindent
   47.67 +text\<open>\noindent
   47.68  The second assumption simplifies to @{term"xs = []"}, which in turn
   47.69  simplifies the first assumption to @{term"zs = ys"}, thus reducing the
   47.70  conclusion to @{term"ys = ys"} and hence to @{term"True"}.
   47.71  
   47.72  In some cases, using the assumptions can lead to nontermination:
   47.73 -*}
   47.74 +\<close>
   47.75  
   47.76  lemma "\<forall>x. f x = g (f (g x)) \<Longrightarrow> f [] = f [] @ []"
   47.77  
   47.78 -txt{*\noindent
   47.79 +txt\<open>\noindent
   47.80  An unmodified application of @{text"simp"} loops.  The culprit is the
   47.81  simplification rule @{term"f x = g (f (g x))"}, which is extracted from
   47.82  the assumption.  (Isabelle notices certain simple forms of
   47.83  nontermination but not this one.)  The problem can be circumvented by
   47.84  telling the simplifier to ignore the assumptions:
   47.85 -*}
   47.86 +\<close>
   47.87  
   47.88  apply(simp (no_asm))
   47.89  done
   47.90  
   47.91 -text{*\noindent
   47.92 +text\<open>\noindent
   47.93  Three modifiers influence the treatment of assumptions:
   47.94  \begin{description}
   47.95  \item[@{text"(no_asm)"}]\index{*no_asm (modifier)}
   47.96 @@ -145,11 +145,11 @@
   47.97  %positive, and from left to right, if $n$ is negative.
   47.98  %Beware that such rotations make proofs quite brittle.
   47.99  %\end{warn}
  47.100 -*}
  47.101 +\<close>
  47.102  
  47.103 -subsection{*Rewriting with Definitions*}
  47.104 +subsection\<open>Rewriting with Definitions\<close>
  47.105  
  47.106 -text{*\label{sec:Simp-with-Defs}\index{simplification!with definitions}
  47.107 +text\<open>\label{sec:Simp-with-Defs}\index{simplification!with definitions}
  47.108  Constant definitions (\S\ref{sec:ConstDefinitions}) can be used as
  47.109  simplification rules, but by default they are not: the simplifier does not
  47.110  expand them automatically.  Definitions are intended for introducing abstract
  47.111 @@ -159,32 +159,32 @@
  47.112  proofs more robust: if the definition has to be changed,
  47.113  only the proofs of the abstract properties will be affected.
  47.114  
  47.115 -For example, given *}
  47.116 +For example, given\<close>
  47.117  
  47.118  definition xor :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
  47.119  "xor A B \<equiv> (A \<and> \<not>B) \<or> (\<not>A \<and> B)"
  47.120  
  47.121 -text{*\noindent
  47.122 +text\<open>\noindent
  47.123  we may want to prove
  47.124 -*}
  47.125 +\<close>
  47.126  
  47.127  lemma "xor A (\<not>A)"
  47.128  
  47.129 -txt{*\noindent
  47.130 +txt\<open>\noindent
  47.131  Typically, we begin by unfolding some definitions:
  47.132  \indexbold{definitions!unfolding}
  47.133 -*}
  47.134 +\<close>
  47.135  
  47.136  apply(simp only: xor_def)
  47.137  
  47.138 -txt{*\noindent
  47.139 +txt\<open>\noindent
  47.140  In this particular case, the resulting goal
  47.141  @{subgoals[display,indent=0]}
  47.142  can be proved by simplification. Thus we could have proved the lemma outright by
  47.143 -*}(*<*)oops lemma "xor A (\<not>A)"(*>*)
  47.144 +\<close>(*<*)oops lemma "xor A (\<not>A)"(*>*)
  47.145  apply(simp add: xor_def)
  47.146  (*<*)done(*>*)
  47.147 -text{*\noindent
  47.148 +text\<open>\noindent
  47.149  Of course we can also unfold definitions in the middle of a proof.
  47.150  
  47.151  \begin{warn}
  47.152 @@ -199,78 +199,78 @@
  47.153  one or several definitions, as in \isacommand{apply}\isa{(unfold xor_def)}.
  47.154  This is can be useful in situations where \isa{simp} does too much.
  47.155  Warning: \isa{unfold} acts on all subgoals!
  47.156 -*}
  47.157 +\<close>
  47.158  
  47.159 -subsection{*Simplifying {\tt\slshape let}-Expressions*}
  47.160 +subsection\<open>Simplifying {\tt\slshape let}-Expressions\<close>
  47.161  
  47.162 -text{*\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
  47.163 +text\<open>\index{simplification!of \isa{let}-expressions}\index{*let expressions}%
  47.164  Proving a goal containing \isa{let}-expressions almost invariably requires the
  47.165  @{text"let"}-con\-structs to be expanded at some point. Since
  47.166  @{text"let"}\ldots\isa{=}\ldots@{text"in"}{\ldots} is just syntactic sugar for
  47.167  the predefined constant @{term"Let"}, expanding @{text"let"}-constructs
  47.168 -means rewriting with \tdx{Let_def}: *}
  47.169 +means rewriting with \tdx{Let_def}:\<close>
  47.170  
  47.171  lemma "(let xs = [] in xs@ys@xs) = ys"
  47.172  apply(simp add: Let_def)
  47.173  done
  47.174  
  47.175 -text{*
  47.176 +text\<open>
  47.177  If, in a particular context, there is no danger of a combinatorial explosion
  47.178  of nested @{text"let"}s, you could even simplify with @{thm[source]Let_def} by
  47.179  default:
  47.180 -*}
  47.181 +\<close>
  47.182  declare Let_def [simp]
  47.183  
  47.184 -subsection{*Conditional Simplification Rules*}
  47.185 +subsection\<open>Conditional Simplification Rules\<close>
  47.186  
  47.187 -text{*
  47.188 +text\<open>
  47.189  \index{conditional simplification rules}%
  47.190  So far all examples of rewrite rules were equations. The simplifier also
  47.191  accepts \emph{conditional} equations, for example
  47.192 -*}
  47.193 +\<close>
  47.194  
  47.195  lemma hd_Cons_tl[simp]: "xs \<noteq> []  \<Longrightarrow>  hd xs # tl xs = xs"
  47.196  apply(case_tac xs, simp, simp)
  47.197  done
  47.198  
  47.199 -text{*\noindent
  47.200 +text\<open>\noindent
  47.201  Note the use of ``\ttindexboldpos{,}{$Isar}'' to string together a
  47.202  sequence of methods. Assuming that the simplification rule
  47.203  @{term"(rev xs = []) = (xs = [])"}
  47.204  is present as well,
  47.205  the lemma below is proved by plain simplification:
  47.206 -*}
  47.207 +\<close>
  47.208  
  47.209  lemma "xs \<noteq> [] \<Longrightarrow> hd(rev xs) # tl(rev xs) = rev xs"
  47.210  (*<*)
  47.211  by(simp)
  47.212  (*>*)
  47.213 -text{*\noindent
  47.214 +text\<open>\noindent
  47.215  The conditional equation @{thm[source]hd_Cons_tl} above
  47.216  can simplify @{term"hd(rev xs) # tl(rev xs)"} to @{term"rev xs"}
  47.217  because the corresponding precondition @{term"rev xs ~= []"}
  47.218  simplifies to @{term"xs ~= []"}, which is exactly the local
  47.219  assumption of the subgoal.
  47.220 -*}
  47.221 +\<close>
  47.222  
  47.223  
  47.224 -subsection{*Automatic Case Splits*}
  47.225 +subsection\<open>Automatic Case Splits\<close>
  47.226  
  47.227 -text{*\label{sec:AutoCaseSplits}\indexbold{case splits}%
  47.228 +text\<open>\label{sec:AutoCaseSplits}\indexbold{case splits}%
  47.229  Goals containing @{text"if"}-expressions\index{*if expressions!splitting of}
  47.230  are usually proved by case
  47.231  distinction on the boolean condition.  Here is an example:
  47.232 -*}
  47.233 +\<close>
  47.234  
  47.235  lemma "\<forall>xs. if xs = [] then rev xs = [] else rev xs \<noteq> []"
  47.236  
  47.237 -txt{*\noindent
  47.238 +txt\<open>\noindent
  47.239  The goal can be split by a special method, \methdx{split}:
  47.240 -*}
  47.241 +\<close>
  47.242  
  47.243  apply(split if_split)
  47.244  
  47.245 -txt{*\noindent
  47.246 +txt\<open>\noindent
  47.247  @{subgoals[display,indent=0]}
  47.248  where \tdx{if_split} is a theorem that expresses splitting of
  47.249  @{text"if"}s. Because
  47.250 @@ -280,11 +280,11 @@
  47.251  
  47.252  This splitting idea generalizes from @{text"if"} to \sdx{case}.
  47.253  Let us simplify a case analysis over lists:\index{*list.split (theorem)}
  47.254 -*}(*<*)by simp(*>*)
  47.255 +\<close>(*<*)by simp(*>*)
  47.256  lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
  47.257  apply(split list.split)
  47.258   
  47.259 -txt{*
  47.260 +txt\<open>
  47.261  @{subgoals[display,indent=0]}
  47.262  The simplifier does not split
  47.263  @{text"case"}-expressions, as it does @{text"if"}-expressions, 
  47.264 @@ -293,26 +293,26 @@
  47.265  @{text split}\index{*split (modifier)} 
  47.266  for adding splitting rules explicitly.  The
  47.267  lemma above can be proved in one step by
  47.268 -*}
  47.269 +\<close>
  47.270  (*<*)oops
  47.271  lemma "(case xs of [] \<Rightarrow> zs | y#ys \<Rightarrow> y#(ys@zs)) = xs@zs"
  47.272  (*>*)
  47.273  apply(simp split: list.split)
  47.274  (*<*)done(*>*)
  47.275 -text{*\noindent
  47.276 +text\<open>\noindent
  47.277  whereas \isacommand{apply}@{text"(simp)"} alone will not succeed.
  47.278  
  47.279  Every datatype $t$ comes with a theorem
  47.280  $t$@{text".split"} which can be declared to be a \bfindex{split rule} either
  47.281  locally as above, or by giving it the \attrdx{split} attribute globally:
  47.282 -*}
  47.283 +\<close>
  47.284  
  47.285  declare list.split [split]
  47.286  
  47.287 -text{*\noindent
  47.288 +text\<open>\noindent
  47.289  The @{text"split"} attribute can be removed with the @{text"del"} modifier,
  47.290  either locally
  47.291 -*}
  47.292 +\<close>
  47.293  (*<*)
  47.294  lemma "dummy=dummy"
  47.295  (*>*)
  47.296 @@ -320,12 +320,12 @@
  47.297  (*<*)
  47.298  oops
  47.299  (*>*)
  47.300 -text{*\noindent
  47.301 +text\<open>\noindent
  47.302  or globally:
  47.303 -*}
  47.304 +\<close>
  47.305  declare list.split [split del]
  47.306  
  47.307 -text{*
  47.308 +text\<open>
  47.309  Polished proofs typically perform splitting within @{text simp} rather than 
  47.310  invoking the @{text split} method.  However, if a goal contains
  47.311  several @{text "if"} and @{text case} expressions, 
  47.312 @@ -335,12 +335,12 @@
  47.313  The split rules shown above are intended to affect only the subgoal's
  47.314  conclusion.  If you want to split an @{text"if"} or @{text"case"}-expression
  47.315  in the assumptions, you have to apply \tdx{if_split_asm} or
  47.316 -$t$@{text".split_asm"}: *}
  47.317 +$t$@{text".split_asm"}:\<close>
  47.318  
  47.319  lemma "if xs = [] then ys \<noteq> [] else ys = [] \<Longrightarrow> xs @ ys \<noteq> []"
  47.320  apply(split if_split_asm)
  47.321  
  47.322 -txt{*\noindent
  47.323 +txt\<open>\noindent
  47.324  Unlike splitting the conclusion, this step creates two
  47.325  separate subgoals, which here can be solved by @{text"simp_all"}:
  47.326  @{subgoals[display,indent=0]}
  47.327 @@ -357,22 +357,22 @@
  47.328    simplified at first, until either the expression reduces to one of the
  47.329    cases or it is split.
  47.330  \end{warn}
  47.331 -*}
  47.332 +\<close>
  47.333  (*<*)
  47.334  by(simp_all)
  47.335  (*>*)
  47.336  
  47.337 -subsection{*Tracing*}
  47.338 -text{*\indexbold{tracing the simplifier}
  47.339 +subsection\<open>Tracing\<close>
  47.340 +text\<open>\indexbold{tracing the simplifier}
  47.341  Using the simplifier effectively may take a bit of experimentation.  Set the
  47.342  Proof General flag \pgmenu{Isabelle} $>$ \pgmenu{Settings} $>$ \pgmenu{Trace Simplifier} to get a better idea of what is going on:
  47.343 -*}
  47.344 +\<close>
  47.345  
  47.346  lemma "rev [a] = []"
  47.347  apply(simp)
  47.348  (*<*)oops(*>*)
  47.349  
  47.350 -text{*\noindent
  47.351 +text\<open>\noindent
  47.352  produces the following trace in Proof General's \pgmenu{Trace} buffer:
  47.353  
  47.354  \begin{ttbox}\makeatother
  47.355 @@ -418,7 +418,7 @@
  47.356  obtained the desired trace.
  47.357  Since this is easily forgotten (and may have the unpleasant effect of
  47.358  swamping the interface with trace information), here is how you can switch
  47.359 -the trace on locally in a proof: *}
  47.360 +the trace on locally in a proof:\<close>
  47.361  
  47.362  (*<*)lemma "x=x"
  47.363  (*>*)
  47.364 @@ -426,14 +426,14 @@
  47.365  apply simp
  47.366  (*<*)oops(*>*)
  47.367  
  47.368 -text{* \noindent
  47.369 +text\<open>\noindent
  47.370  Within the current proof, all simplifications in subsequent proof steps
  47.371  will be traced, but the text reminds you to remove the \isa{using} clause
  47.372 -after it has done its job. *}
  47.373 +after it has done its job.\<close>
  47.374  
  47.375 -subsection{*Finding Theorems\label{sec:find}*}
  47.376 +subsection\<open>Finding Theorems\label{sec:find}\<close>
  47.377  
  47.378 -text{*\indexbold{finding theorems}\indexbold{searching theorems}
  47.379 +text\<open>\indexbold{finding theorems}\indexbold{searching theorems}
  47.380  Isabelle's large database of proved theorems 
  47.381  offers a powerful search engine. Its chief limitation is
  47.382  its restriction to the theories currently loaded.
  47.383 @@ -512,7 +512,7 @@
  47.384  through previous searches and just modify them. This saves you having
  47.385  to type in lengthy expressions again and again.
  47.386  \end{pgnote}
  47.387 -*}
  47.388 +\<close>
  47.389  (*<*)
  47.390  end
  47.391  (*>*)
    48.1 --- a/src/Doc/Tutorial/Misc/types.thy	Thu Jan 11 13:48:17 2018 +0100
    48.2 +++ b/src/Doc/Tutorial/Misc/types.thy	Fri Jan 12 14:08:53 2018 +0100
    48.3 @@ -3,30 +3,30 @@
    48.4  type_synonym gate = "bool \<Rightarrow> bool \<Rightarrow> bool"
    48.5  type_synonym ('a, 'b) alist = "('a \<times> 'b) list"
    48.6  
    48.7 -text{*\noindent
    48.8 +text\<open>\noindent
    48.9  Internally all synonyms are fully expanded.  As a consequence Isabelle's
   48.10  output never contains synonyms.  Their main purpose is to improve the
   48.11  readability of theories.  Synonyms can be used just like any other
   48.12  type.
   48.13 -*}
   48.14 +\<close>
   48.15  
   48.16 -subsection{*Constant Definitions*}
   48.17 +subsection\<open>Constant Definitions\<close>
   48.18  
   48.19 -text{*\label{sec:ConstDefinitions}\indexbold{definitions}%
   48.20 +text\<open>\label{sec:ConstDefinitions}\indexbold{definitions}%
   48.21  Nonrecursive definitions can be made with the \commdx{definition}
   48.22  command, for example @{text nand} and @{text xor} gates
   48.23  (based on type @{typ gate} above):
   48.24 -*}
   48.25 +\<close>
   48.26  
   48.27  definition nand :: gate where "nand A B \<equiv> \<not>(A \<and> B)"
   48.28  definition xor  :: gate where "xor  A B \<equiv> A \<and> \<not>B \<or> \<not>A \<and> B"
   48.29  
   48.30 -text{*\noindent%
   48.31 +text\<open>\noindent%
   48.32  The symbol \indexboldpos{\isasymequiv}{$IsaEq} is a special form of equality
   48.33  that must be used in constant definitions.
   48.34  Pattern-matching is not allowed: each definition must be of
   48.35  the form $f\,x@1\,\dots\,x@n~\isasymequiv~t$.
   48.36  Section~\ref{sec:Simp-with-Defs} explains how definitions are used
   48.37  in proofs. The default name of each definition is $f$@{text"_def"}, where
   48.38 -$f$ is the name of the defined constant.*}
   48.39 +$f$ is the name of the defined constant.\<close>
   48.40  (*<*)end(*>*)
    49.1 --- a/src/Doc/Tutorial/Protocol/Event.thy	Thu Jan 11 13:48:17 2018 +0100
    49.2 +++ b/src/Doc/Tutorial/Protocol/Event.thy	Fri Jan 12 14:08:53 2018 +0100
    49.3 @@ -7,7 +7,7 @@
    49.4      stores are visible to him
    49.5  *)(*<*)
    49.6  
    49.7 -section{*Theory of Events for Security Protocols*}
    49.8 +section\<open>Theory of Events for Security Protocols\<close>
    49.9  
   49.10  theory Event imports Message begin
   49.11  
   49.12 @@ -20,10 +20,10 @@
   49.13          | Notes agent       msg
   49.14         
   49.15  consts 
   49.16 -  bad    :: "agent set"                         -- {* compromised agents *}
   49.17 +  bad    :: "agent set"                         \<comment> \<open>compromised agents\<close>
   49.18  
   49.19  
   49.20 -text{*The constant "spies" is retained for compatibility's sake*}
   49.21 +text\<open>The constant "spies" is retained for compatibility's sake\<close>
   49.22  
   49.23  primrec
   49.24    knows :: "agent => event list => msg set"
   49.25 @@ -50,7 +50,7 @@
   49.26    spies  :: "event list => msg set" where
   49.27    "spies == knows Spy"
   49.28  
   49.29 -text{*Spy has access to his own key for spoof messages, but Server is secure*}
   49.30 +text\<open>Spy has access to his own key for spoof messages, but Server is secure\<close>
   49.31  specification (bad)
   49.32    Spy_in_bad     [iff]: "Spy \<in> bad"
   49.33    Server_not_bad [iff]: "Server \<notin> bad"
   49.34 @@ -73,9 +73,9 @@
   49.35                          Says A B X => parts {X} \<union> used evs
   49.36                        | Gets A X   => used evs
   49.37                        | Notes A X  => parts {X} \<union> used evs)"
   49.38 -    --{*The case for @{term Gets} seems anomalous, but @{term Gets} always
   49.39 +    \<comment>\<open>The case for @{term Gets} seems anomalous, but @{term Gets} always
   49.40          follows @{term Says} in real protocols.  Seems difficult to change.
   49.41 -        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}. *}
   49.42 +        See @{text Gets_correct} in theory @{text "Guard/Extensions.thy"}.\<close>
   49.43  
   49.44  lemma Notes_imp_used [rule_format]: "Notes A X \<in> set evs --> X \<in> used evs"
   49.45  apply (induct_tac evs)
   49.46 @@ -88,7 +88,7 @@
   49.47  done
   49.48  
   49.49  
   49.50 -subsection{*Function @{term knows}*}
   49.51 +subsection\<open>Function @{term knows}\<close>
   49.52  
   49.53  (*Simplifying   
   49.54   parts(insert X (knows Spy evs)) = parts{X} \<union> parts(knows Spy evs).
   49.55 @@ -99,8 +99,8 @@
   49.56       "knows Spy (Says A B X # evs) = insert X (knows Spy evs)"
   49.57  by simp
   49.58  
   49.59 -text{*Letting the Spy see "bad" agents' notes avoids redundant case-splits
   49.60 -      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}*}
   49.61 +text\<open>Letting the Spy see "bad" agents' notes avoids redundant case-splits
   49.62 +      on whether @{term "A=Spy"} and whether @{term "A\<in>bad"}\<close>
   49.63  lemma knows_Spy_Notes [simp]:
   49.64       "knows Spy (Notes A X # evs) =  
   49.65            (if A:bad then insert X (knows Spy evs) else knows Spy evs)"
   49.66 @@ -121,7 +121,7 @@
   49.67       "knows Spy evs \<subseteq> knows Spy (Gets A X # evs)"
   49.68  by (simp add: subset_insertI)
   49.69  
   49.70 -text{*Spy sees what is sent on the traffic*}
   49.71 +text\<open>Spy sees what is sent on the traffic\<close>
   49.72  lemma Says_imp_knows_Spy [rule_format]:
   49.73       "Says A B X \<in> set evs --> X \<in> knows Spy evs"
   49.74  apply (induct_tac "evs")
   49.75 @@ -135,21 +135,21 @@
   49.76  done
   49.77  
   49.78  
   49.79 -text{*Elimination rules: derive contradictions from old Says events containing
   49.80 -  items known to be fresh*}
   49.81 +text\<open>Elimination rules: derive contradictions from old Says events containing
   49.82 +  items known to be fresh\<close>
   49.83  lemmas knows_Spy_partsEs =
   49.84       Says_imp_knows_Spy [THEN parts.Inj, elim_format] 
   49.85       parts.Body [elim_format]
   49.86  
   49.87  lemmas Says_imp_analz_Spy = Says_imp_knows_Spy [THEN analz.Inj]
   49.88  
   49.89 -text{*Compatibility for the old "spies" function*}
   49.90 +text\<open>Compatibility for the old "spies" function\<close>
   49.91  lemmas spies_partsEs = knows_Spy_partsEs
   49.92  lemmas Says_imp_spies = Says_imp_knows_Spy
   49.93  lemmas parts_insert_spies = parts_insert_knows_A [of _ Spy]
   49.94  
   49.95  
   49.96 -subsection{*Knowledge of Agents*}
   49.97 +subsection\<open>Knowledge of Agents\<close>
   49.98  
   49.99  lemma knows_Says: "knows A (Says A B X # evs) = insert X (knows A evs)"
  49.100  by simp
  49.101 @@ -171,21 +171,21 @@
  49.102  lemma knows_subset_knows_Gets: "knows A evs \<subseteq> knows A (Gets A' X # evs)"
  49.103  by (simp add: subset_insertI)
  49.104  
  49.105 -text{*Agents know what they say*}
  49.106 +text\<open>Agents know what they say\<close>
  49.107  lemma Says_imp_knows [rule_format]: "Says A B X \<in> set evs --> X \<in> knows A evs"
  49.108  apply (induct_tac "evs")
  49.109  apply (simp_all (no_asm_simp) split: event.split)
  49.110  apply blast
  49.111  done
  49.112  
  49.113 -text{*Agents know what they note*}
  49.114 +text\<open>Agents know what they note\<close>
  49.115  lemma Notes_imp_knows [rule_format]: "Notes A X \<in> set evs --> X \<in> knows A evs"
  49.116  apply (induct_tac "evs")
  49.117  apply (simp_all (no_asm_simp) split: event.split)
  49.118  apply blast
  49.119  done
  49.120  
  49.121 -text{*Agents know what they receive*}
  49.122 +text\<open>Agents know what they receive\<close>
  49.123  lemma Gets_imp_knows_agents [rule_format]:
  49.124       "A \<noteq> Spy --> Gets A X \<in> set evs --> X \<in> knows A evs"
  49.125  apply (induct_tac "evs")
  49.126 @@ -193,8 +193,8 @@
  49.127  done
  49.128  
  49.129  
  49.130 -text{*What agents DIFFERENT FROM Spy know 
  49.131 -  was either said, or noted, or got, or known initially*}
  49.132 +text\<open>What agents DIFFERENT FROM Spy know 
  49.133 +  was either said, or noted, or got, or known initially\<close>
  49.134  lemma knows_imp_Says_Gets_Notes_initState [rule_format]:
  49.135       "[| X \<in> knows A evs; A \<noteq> Spy |] ==> EX B.  
  49.136    Says A B X \<in> set evs | Gets A X \<in> set evs | Notes A X \<in> set evs | X \<in> initState A"
  49.137 @@ -204,8 +204,8 @@
  49.138  apply blast
  49.139  done
  49.140  
  49.141 -text{*What the Spy knows -- for the time being --
  49.142 -  was either said or noted, or known initially*}
  49.143 +text\<open>What the Spy knows -- for the time being --
  49.144 +  was either said or noted, or known initially\<close>
  49.145  lemma knows_Spy_imp_Says_Notes_initState [rule_format]:
  49.146       "[| X \<in> knows Spy evs |] ==> EX A B.  
  49.147    Says A B X \<in> set evs | Notes A X \<in> set evs | X \<in> initState Spy"
  49.148 @@ -241,15 +241,15 @@
  49.149  apply (blast intro: initState_into_used)
  49.150  done
  49.151  
  49.152 -text{*NOTE REMOVAL--laws above are cleaner, as they don't involve "case"*}
  49.153 +text\<open>NOTE REMOVAL--laws above are cleaner, as they don't involve "case"\<close>
  49.154  declare knows_Cons [simp del]
  49.155          used_Nil [simp del] used_Cons [simp del]
  49.156  
  49.157  
  49.158 -text{*For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
  49.159 +text\<open>For proving theorems of the form @{term "X \<notin> analz (knows Spy evs) --> P"}
  49.160    New events added by induction to "evs" are discarded.  Provided 
  49.161    this information isn't needed, the proof will be much shorter, since
  49.162 -  it will omit complicated reasoning about @{term analz}.*}
  49.163 +  it will omit complicated reasoning about @{term analz}.\<close>
  49.164  
  49.165  lemmas analz_mono_contra =
  49.166         knows_Spy_subset_knows_Spy_Says [THEN analz_mono, THEN contra_subsetD]
  49.167 @@ -259,12 +259,12 @@
  49.168  lemmas analz_impI = impI [where P = "Y \<notin> analz (knows Spy evs)"] for Y evs
  49.169  
  49.170  ML
  49.171 -{*
  49.172 +\<open>
  49.173  fun analz_mono_contra_tac ctxt =
  49.174    resolve_tac ctxt @{thms analz_impI} THEN' 
  49.175    REPEAT1 o (dresolve_tac ctxt @{thms analz_mono_contra})
  49.176    THEN' mp_tac ctxt
  49.177 -*}
  49.178 +\<close>
  49.179  
  49.180  lemma knows_subset_knows_Cons: "knows A evs \<subseteq> knows A (e # evs)"
  49.181  by (induct e, auto simp: knows_Cons)
  49.182 @@ -275,7 +275,7 @@
  49.183  done
  49.184  
  49.185  
  49.186 -text{*For proving @{text new_keys_not_used}*}
  49.187 +text\<open>For proving @{text new_keys_not_used}\<close>
  49.188  lemma keysFor_parts_insert:
  49.189       "[| K \<in> keysFor (parts (insert X G));  X \<in> synth (analz H) |] 
  49.190        ==> K \<in> keysFor (parts (G \<union> H)) | Key (invKey K) \<in> parts H" 
  49.191 @@ -284,16 +284,16 @@
  49.192             analz_subset_parts [THEN keysFor_mono, THEN [2] rev_subsetD]
  49.193      intro: analz_subset_parts [THEN subsetD] parts_mono [THEN [2] rev_subsetD])
  49.194  
  49.195 -method_setup analz_mono_contra = {*
  49.196 -    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt))) *}
  49.197 +method_setup analz_mono_contra = \<open>
  49.198 +    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (analz_mono_contra_tac ctxt)))\<close>
  49.199      "for proving theorems of the form X \<notin> analz (knows Spy evs) --> P"
  49.200  
  49.201 -subsubsection{*Useful for case analysis on whether a hash is a spoof or not*}
  49.202 +subsubsection\<open>Useful for case analysis on whether a hash is a spoof or not\<close>
  49.203  
  49.204  lemmas syan_impI = impI [where P = "Y \<notin> synth (analz (knows Spy evs))"] for Y evs
  49.205  
  49.206  ML
  49.207 -{*
  49.208 +\<open>
  49.209  val knows_Cons = @{thm knows_Cons};
  49.210  val used_Nil = @{thm used_Nil};
  49.211  val used_Cons = @{thm used_Cons};
  49.212 @@ -339,16 +339,16 @@
  49.213        @{thm knows_Spy_subset_knows_Spy_Gets} RS @{thm synth_analz_mono} RS @{thm contra_subsetD}])
  49.214    THEN'
  49.215    mp_tac ctxt
  49.216 -*}
  49.217 +\<close>
  49.218  
  49.219 -method_setup synth_analz_mono_contra = {*
  49.220 -    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt))) *}
  49.221 +method_setup synth_analz_mono_contra = \<open>
  49.222 +    Scan.succeed (fn ctxt => SIMPLE_METHOD (REPEAT_FIRST (synth_analz_mono_contra_tac ctxt)))\<close>
  49.223      "for proving theorems of the form X \<notin> synth (analz (knows Spy evs)) --> P"
  49.224  (*>*)
  49.225  
  49.226 -section{* Event Traces \label{sec:events} *}
  49.227 +section\<open>Event Traces \label{sec:events}\<close>
  49.228  
  49.229 -text {*
  49.230 +text \<open>
  49.231  The system's behaviour is formalized as a set of traces of
  49.232  \emph{events}.  The most important event, @{text "Says A B X"}, expresses
  49.233  $A\to B : X$, which is the attempt by~$A$ to send~$B$ the message~$X$.
  49.234 @@ -379,7 +379,7 @@
  49.235  \item @{term "synth (analz (knows Spy evs))"} is everything that the spy
  49.236  could generate
  49.237  \end{itemize}
  49.238 -*}
  49.239 +\<close>
  49.240  
  49.241  (*<*)
  49.242  end
    50.1 --- a/src/Doc/Tutorial/Protocol/Message.thy	Thu Jan 11 13:48:17 2018 +0100
    50.2 +++ b/src/Doc/Tutorial/Protocol/Message.thy	Fri Jan 12 14:08:53 2018 +0100
    50.3 @@ -5,7 +5,7 @@
    50.4  Inductive relations "parts", "analz" and "synth"
    50.5  *)(*<*)
    50.6  
    50.7 -section{*Theory of Agents and Messages for Security Protocols*}
    50.8 +section\<open>Theory of Agents and Messages for Security Protocols\<close>
    50.9  
   50.10  theory Message imports Main begin
   50.11  ML_file "../../antiquote_setup.ML"
   50.12 @@ -15,27 +15,27 @@
   50.13  by blast
   50.14  (*>*)
   50.15  
   50.16 -section{* Agents and Messages *}
   50.17 +section\<open>Agents and Messages\<close>
   50.18  
   50.19 -text {*
   50.20 +text \<open>
   50.21  All protocol specifications refer to a syntactic theory of messages. 
   50.22  Datatype
   50.23  @{text agent} introduces the constant @{text Server} (a trusted central
   50.24  machine, needed for some protocols), an infinite population of
   50.25  friendly agents, and the~@{text Spy}:
   50.26 -*}
   50.27 +\<close>
   50.28  
   50.29  datatype agent = Server | Friend nat | Spy
   50.30  
   50.31 -text {*
   50.32 +text \<open>
   50.33  Keys are just natural numbers.  Function @{text invKey} maps a public key to
   50.34  the matching private key, and vice versa:
   50.35 -*}
   50.36 +\<close>
   50.37  
   50.38  type_synonym key = nat
   50.39  consts invKey :: "key \<Rightarrow> key"
   50.40  (*<*)
   50.41 -consts all_symmetric :: bool        --{*true if all keys are symmetric*}
   50.42 +consts all_symmetric :: bool        \<comment>\<open>true if all keys are symmetric\<close>
   50.43  
   50.44  specification (invKey)
   50.45    invKey [simp]: "invKey (invKey K) = K"
   50.46 @@ -43,18 +43,18 @@
   50.47      by (rule exI [of _ id], auto)
   50.48  
   50.49  
   50.50 -text{*The inverse of a symmetric key is itself; that of a public key
   50.51 -      is the private key and vice versa*}
   50.52 +text\<open>The inverse of a symmetric key is itself; that of a public key
   50.53 +      is the private key and vice versa\<close>
   50.54  
   50.55  definition symKeys :: "key set" where
   50.56    "symKeys == {K. invKey K = K}"
   50.57  (*>*)
   50.58  
   50.59 -text {*
   50.60 +text \<open>
   50.61  Datatype
   50.62  @{text msg} introduces the message forms, which include agent names, nonces,
   50.63  keys, compound messages, and encryptions.  
   50.64 -*}
   50.65 +\<close>
   50.66  
   50.67  datatype
   50.68       msg = Agent  agent
   50.69 @@ -63,7 +63,7 @@
   50.70           | MPair  msg msg
   50.71           | Crypt  key msg
   50.72  
   50.73 -text {*
   50.74 +text \<open>
   50.75  \noindent
   50.76  The notation $\comp{X\sb 1,\ldots X\sb{n-1},X\sb n}$
   50.77  abbreviates
   50.78 @@ -76,10 +76,10 @@
   50.79  wrong key succeeds but yields garbage.  Our model of encryption is
   50.80  realistic if encryption adds some redundancy to the plaintext, such as a
   50.81  checksum, so that garbage can be detected.
   50.82 -*}
   50.83 +\<close>
   50.84  
   50.85  (*<*)
   50.86 -text{*Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...*}
   50.87 +text\<open>Concrete syntax: messages appear as \<open>\<lbrace>A,B,NA\<rbrace>\<close>, etc...\<close>
   50.88  syntax
   50.89    "_MTuple"      :: "['a, args] => 'a * 'b"       ("(2\<lbrace>_,/ _\<rbrace>)")
   50.90  translations
   50.91 @@ -88,11 +88,11 @@
   50.92  
   50.93  
   50.94  definition keysFor :: "msg set => key set" where
   50.95 -    --{*Keys useful to decrypt elements of a message set*}
   50.96 +    \<comment>\<open>Keys useful to decrypt elements of a message set\<close>
   50.97    "keysFor H == invKey ` {K. \<exists>X. Crypt K X \<in> H}"
   50.98  
   50.99  
  50.100 -subsubsection{*Inductive Definition of All Parts" of a Message*}
  50.101 +subsubsection\<open>Inductive Definition of All Parts" of a Message\<close>
  50.102  
  50.103  inductive_set
  50.104    parts :: "msg set => msg set"
  50.105 @@ -104,7 +104,7 @@
  50.106    | Body:        "Crypt K X \<in> parts H ==> X \<in> parts H"
  50.107  
  50.108  
  50.109 -text{*Monotonicity*}
  50.110 +text\<open>Monotonicity\<close>
  50.111  lemma parts_mono: "G \<subseteq> H ==> parts(G) \<subseteq> parts(H)"
  50.112  apply auto
  50.113  apply (erule parts.induct) 
  50.114 @@ -112,7 +112,7 @@
  50.115  done
  50.116  
  50.117  
  50.118 -text{*Equations hold because constructors are injective.*}
  50.119 +text\<open>Equations hold because constructors are injective.\<close>
  50.120  lemma Friend_image_eq [simp]: "(Friend x \<in> Friend`A) = (x:A)"
  50.121  by auto
  50.122  
  50.123 @@ -123,7 +123,7 @@
  50.124  by auto
  50.125  
  50.126  
  50.127 -subsubsection{*Inverse of keys *}
  50.128 +subsubsection\<open>Inverse of keys\<close>
  50.129  
  50.130  lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')"
  50.131  apply safe
  50.132 @@ -131,7 +131,7 @@
  50.133  done
  50.134  
  50.135  
  50.136 -subsection{*keysFor operator*}
  50.137 +subsection\<open>keysFor operator\<close>
  50.138  
  50.139  lemma keysFor_empty [simp]: "keysFor {} = {}"
  50.140  by (unfold keysFor_def, blast)
  50.141 @@ -142,7 +142,7 @@
  50.142  lemma keysFor_UN [simp]: "keysFor (\<Union>i\<in>A. H i) = (\<Union>i\<in>A. keysFor (H i))"
  50.143  by (unfold keysFor_def, blast)
  50.144  
  50.145 -text{*Monotonicity*}
  50.146 +text\<open>Monotonicity\<close>
  50.147  lemma keysFor_mono: "G \<subseteq> H ==> keysFor(G) \<subseteq> keysFor(H)"
  50.148  by (unfold keysFor_def, blast)
  50.149  
  50.150 @@ -169,7 +169,7 @@
  50.151  by (unfold keysFor_def, blast)
  50.152  
  50.153  
  50.154 -subsection{*Inductive relation "parts"*}
  50.155 +subsection\<open>Inductive relation "parts"\<close>
  50.156  
  50.157  lemma MPair_parts:
  50.158       "[| \<lbrace>X,Y\<rbrace> \<in> parts H;        
  50.159 @@ -177,10 +177,10 @@
  50.160  by (blast dest: parts.Fst parts.Snd) 
  50.161  
  50.162  declare MPair_parts [elim!]  parts.Body [dest!]
  50.163 -text{*NB These two rules are UNSAFE in the formal sense, as they discard the
  50.164 +text\<open>NB These two rules are UNSAFE in the formal sense, as they discard the
  50.165       compound message.  They work well on THIS FILE.  
  50.166    @{text MPair_parts} is left as SAFE because it speeds up proofs.
  50.167 -  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.*}
  50.168 +  The Crypt rule is normally kept UNSAFE to avoid breaking up certificates.\<close>
  50.169  
  50.170  lemma parts_increasing: "H \<subseteq> parts(H)"
  50.171  by blast
  50.172 @@ -195,12 +195,12 @@
  50.173  lemma parts_emptyE [elim!]: "X\<in> parts{} ==> P"
  50.174  by simp
  50.175  
  50.176 -text{*WARNING: loops if H = {Y}, therefore must not be repeated!*}
  50.177 +text\<open>WARNING: loops if H = {Y}, therefore must not be repeated!\<close>
  50.178  lemma parts_singleton: "X\<in> parts H ==> \<exists>Y\<in>H. X\<in> parts {Y}"
  50.179  by (erule parts.induct, fast+)
  50.180  
  50.181  
  50.182 -subsubsection{*Unions *}
  50.183 +subsubsection\<open>Unions\<close>
  50.184  
  50.185  lemma parts_Un_subset1: "parts(G) \<union> parts(H) \<subseteq> parts(G \<union> H)"
  50.186  by (intro Un_least parts_mono Un_upper1 Un_upper2)
  50.187 @@ -218,8 +218,8 @@
  50.188  apply (simp only: parts_Un)
  50.189  done
  50.190  
  50.191 -text{*TWO inserts to avoid looping.  This rewrite is better than nothing.
  50.192 -  Not suitable for Addsimps: its behaviour can be strange.*}
  50.193 +text\<open>TWO inserts to avoid looping.  This rewrite is better than nothing.
  50.194 +  Not suitable for Addsimps: its behaviour can be strange.\<close>
  50.195  lemma parts_insert2:
  50.196       "parts (insert X (insert Y H)) = parts {X} \<union> parts {Y} \<union> parts H"
  50.197  apply (simp add: Un_assoc)
  50.198 @@ -237,12 +237,12 @@
  50.199  lemma parts_UN [simp]: "parts(\<Union>x\<in>A. H x) = (\<Union>x\<in>A. parts(H x))"
  50.200  by (intro equalityI parts_UN_subset1 parts_UN_subset2)
  50.201  
  50.202 -text{*Added to simplify arguments to parts, analz and synth.
  50.203 -  NOTE: the UN versions are no longer used!*}
  50.204 +text\<open>Added to simplify arguments to parts, analz and synth.
  50.205 +  NOTE: the UN versions are no longer used!\<close>
  50.206  
  50.207  
  50.208 -text{*This allows @{text blast} to simplify occurrences of 
  50.209 -  @{term "parts(G\<union>H)"} in the assumption.*}
  50.210 +text\<open>This allows @{text blast} to simplify occurrences of 
  50.211 +  @{term "parts(G\<union>H)"} in the assumption.\<close>
  50.212  lemmas in_parts_UnE = parts_Un [THEN equalityD1, THEN subsetD, THEN UnE] 
  50.213  declare in_parts_UnE [elim!]
  50.214  
  50.215 @@ -250,7 +250,7 @@
  50.216  lemma parts_insert_subset: "insert X (parts H) \<subseteq> parts(insert X H)"
  50.217  by (blast intro: parts_mono [THEN [2] rev_subsetD])
  50.218  
  50.219 -subsubsection{*Idempotence and transitivity *}
  50.220 +subsubsection\<open>Idempotence and transitivity\<close>
  50.221  
  50.222  lemma parts_partsD [dest!]: "X\<in> parts (parts H) ==> X\<in> parts H"
  50.223  by (erule parts.induct, blast+)
  50.224 @@ -267,7 +267,7 @@
  50.225  lemma parts_trans: "[| X\<in> parts G;  G \<subseteq> parts H |] ==> X\<in> parts H"
  50.226  by (drule parts_mono, blast)
  50.227  
  50.228 -text{*Cut*}
  50.229 +text\<open>Cut\<close>
  50.230  lemma parts_cut:
  50.231       "[| Y\<in> parts (insert X G);  X\<in> parts H |] ==> Y\<in> parts (G \<union> H)" 
  50.232  by (blast intro: parts_trans) 
  50.233 @@ -277,7 +277,7 @@
  50.234  by (force dest!: parts_cut intro: parts_insertI)
  50.235  
  50.236  
  50.237 -subsubsection{*Rewrite rules for pulling out atomic messages *}
  50.238 +subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
  50.239  
  50.240  lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset]
  50.241  
  50.242 @@ -323,21 +323,21 @@
  50.243  done
  50.244  
  50.245  
  50.246 -text{*In any message, there is an upper bound N on its greatest nonce.*}
  50.247 +text\<open>In any message, there is an upper bound N on its greatest nonce.\<close>
  50.248  lemma msg_Nonce_supply: "\<exists>N. \<forall>n. N\<le>n --> Nonce n \<notin> parts {msg}"
  50.249  apply (induct_tac "msg")
  50.250  apply (simp_all (no_asm_simp) add: exI parts_insert2)
  50.251 - txt{*MPair case: blast works out the necessary sum itself!*}
  50.252 + txt\<open>MPair case: blast works out the necessary sum itself!\<close>
  50.253   prefer 2 apply auto apply (blast elim!: add_leE)
  50.254 -txt{*Nonce case*}
  50.255 +txt\<open>Nonce case\<close>
  50.256  apply (rename_tac nat)
  50.257  apply (rule_tac x = "N + Suc nat" in exI, auto) 
  50.258  done
  50.259  (*>*)
  50.260  
  50.261 -section{* Modelling the Adversary *}
  50.262 +section\<open>Modelling the Adversary\<close>
  50.263  
  50.264 -text {*
  50.265 +text \<open>
  50.266  The spy is part of the system and must be built into the model.  He is
  50.267  a malicious user who does not have to follow the protocol.  He
  50.268  watches the network and uses any keys he knows to decrypt messages.
  50.269 @@ -349,7 +349,7 @@
  50.270  messages. The set @{text "analz H"} formalizes what the adversary can learn
  50.271  from the set of messages~$H$.  The closure properties of this set are
  50.272  defined inductively.
  50.273 -*}
  50.274 +\<close>
  50.275  
  50.276  inductive_set
  50.277    analz :: "msg set \<Rightarrow> msg set"
  50.278 @@ -362,14 +362,14 @@
  50.279               "\<lbrakk>Crypt K X \<in> analz H; Key(invKey K) \<in> analz H\<rbrakk>
  50.280                \<Longrightarrow> X \<in> analz H"
  50.281  (*<*)
  50.282 -text{*Monotonicity; Lemma 1 of Lowe's paper*}
  50.283 +text\<open>Monotonicity; Lemma 1 of Lowe's paper\<close>
  50.284  lemma analz_mono: "G\<subseteq>H ==> analz(G) \<subseteq> analz(H)"
  50.285  apply auto
  50.286  apply (erule analz.induct) 
  50.287  apply (auto dest: analz.Fst analz.Snd) 
  50.288  done
  50.289  
  50.290 -text{*Making it safe speeds up proofs*}
  50.291 +text\<open>Making it safe speeds up proofs\<close>
  50.292  lemma MPair_analz [elim!]:
  50.293       "[| \<lbrace>X,Y\<rbrace> \<in> analz H;        
  50.294               [| X \<in> analz H; Y \<in> analz H |] ==> P   
  50.295 @@ -402,22 +402,22 @@
  50.296  
  50.297  lemmas analz_insertI = subset_insertI [THEN analz_mono, THEN [2] rev_subsetD]
  50.298  
  50.299 -subsubsection{*General equational properties *}
  50.300 +subsubsection\<open>General equational properties\<close>
  50.301  
  50.302  lemma analz_empty [simp]: "analz{} = {}"
  50.303  apply safe
  50.304  apply (erule analz.induct, blast+)
  50.305  done
  50.306  
  50.307 -text{*Converse fails: we can analz more from the union than from the 
  50.308 -  separate parts, as a key in one might decrypt a message in the other*}
  50.309 +text\<open>Converse fails: we can analz more from the union than from the 
  50.310 +  separate parts, as a key in one might decrypt a message in the other\<close>
  50.311  lemma analz_Un: "analz(G) \<union> analz(H) \<subseteq> analz(G \<union> H)"
  50.312  by (intro Un_least analz_mono Un_upper1 Un_upper2)
  50.313  
  50.314  lemma analz_insert: "insert X (analz H) \<subseteq> analz(insert X H)"
  50.315  by (blast intro: analz_mono [THEN [2] rev_subsetD])
  50.316  
  50.317 -subsubsection{*Rewrite rules for pulling out atomic messages *}
  50.318 +subsubsection\<open>Rewrite rules for pulling out atomic messages\<close>
  50.319  
  50.320  lemmas analz_insert_eq_I = equalityI [OF subsetI analz_insert]
  50.321  
  50.322 @@ -433,7 +433,7 @@
  50.323  apply (erule analz.induct, auto) 
  50.324  done
  50.325  
  50.326 -text{*Can only pull out Keys if they are not needed to decrypt the rest*}
  50.327 +text\<open>Can only pull out Keys if they are not needed to decrypt the rest\<close>
  50.328  lemma analz_insert_Key [simp]: 
  50.329      "K \<notin> keysFor (analz H) ==>   
  50.330            analz (insert (Key K) H) = insert (Key K) (analz H)"
  50.331 @@ -452,7 +452,7 @@
  50.332  apply (blast intro: analz.Fst analz.Snd)+
  50.333  done
  50.334  
  50.335 -text{*Can pull out enCrypted message if the Key is not known*}
  50.336 +text\<open>Can pull out enCrypted message if the Key is not known\<close>
  50.337  lemma analz_insert_Crypt:
  50.338       "Key (invKey K) \<notin> analz H 
  50.339        ==> analz (insert (Crypt K X) H) = insert (Crypt K X) (analz H)"
  50.340 @@ -482,10 +482,10 @@
  50.341                 insert (Crypt K X) (analz (insert X H))"
  50.342  by (intro equalityI lemma1 lemma2)
  50.343  
  50.344 -text{*Case analysis: either the message is secure, or it is not! Effective,
  50.345 +text\<open>Case analysis: either the message is secure, or it is not! Effective,
  50.346  but can cause subgoals to blow up! Use with @{text "if_split"}; apparently
  50.347  @{text "split_tac"} does not cope with patterns such as @{term"analz (insert
  50.348 -(Crypt K X) H)"} *} 
  50.349 +(Crypt K X) H)"}\<close> 
  50.350  lemma analz_Crypt_if [simp]:
  50.351       "analz (insert (Crypt K X) H) =                 
  50.352            (if (Key (invKey K) \<in> analz H)                 
  50.353 @@ -494,7 +494,7 @@
  50.354  by (simp add: analz_insert_Crypt analz_insert_Decrypt)
  50.355  
  50.356  
  50.357 -text{*This rule supposes "for the sake of argument" that we have the key.*}
  50.358 +text\<open>This rule supposes "for the sake of argument" that we have the key.\<close>
  50.359  lemma analz_insert_Crypt_subset:
  50.360       "analz (insert (Crypt K X) H) \<subseteq>   
  50.361             insert (Crypt K X) (analz (insert X H))"
  50.362 @@ -509,7 +509,7 @@
  50.363  done
  50.364  
  50.365  
  50.366 -subsubsection{*Idempotence and transitivity *}
  50.367 +subsubsection\<open>Idempotence and transitivity\<close>
  50.368  
  50.369  lemma analz_analzD [dest!]: "X\<in> analz (analz H) ==> X\<in> analz H"
  50.370  by (erule analz.induct, blast+)
  50.371 @@ -526,7 +526,7 @@
  50.372  lemma analz_trans: "[| X\<in> analz G;  G \<subseteq> analz H |] ==> X\<in> analz H"
  50.373  by (drule analz_mono, blast)
  50.374  
  50.375 -text{*Cut; Lemma 2 of Lowe*}
  50.376 +text\<open>Cut; Lemma 2 of Lowe\<close>
  50.377  lemma analz_cut: "[| Y\<in> analz (insert X H);  X\<in> analz H |] ==> Y\<in> analz H"
  50.378  by (erule analz_trans, blast)
  50.379  
  50.380 @@ -534,14 +534,14 @@
  50.381     "Y: analz (insert X H) ==> X: analz H --> Y: analz H"
  50.382  *)
  50.383  
  50.384 -text{*This rewrite rule helps in the simplification of messages that involve
  50.385 +text\<open>This rewrite rule helps in the simplification of messages that involve
  50.386    the forwarding of unknown components (X).  Without it, removing occurrences
  50.387 -  of X can be very complicated. *}
  50.388 +  of X can be very complicated.\<close>
  50.389  lemma analz_insert_eq: "X\<in> analz H ==> analz (insert X H) = analz H"
  50.390  by (blast intro: analz_cut analz_insertI)
  50.391  
  50.392  
  50.393 -text{*A congruence rule for "analz" *}
  50.394 +text\<open>A congruence rule for "analz"\<close>
  50.395  
  50.396  lemma analz_subset_cong:
  50.397       "[| analz G \<subseteq> analz G'; analz H \<subseteq> analz H' |] 
  50.398 @@ -559,14 +559,14 @@
  50.399       "analz H = analz H' ==> analz(insert X H) = analz(insert X H')"
  50.400  by (force simp only: insert_def intro!: analz_cong)
  50.401  
  50.402 -text{*If there are no pairs or encryptions then analz does nothing*}
  50.403 +text\<open>If there are no pairs or encryptions then analz does nothing\<close>
  50.404  lemma analz_trivial:
  50.405       "[| \<forall>X Y. \<lbrace>X,Y\<rbrace> \<notin> H;  \<forall>X K. Crypt K X \<notin> H |] ==> analz H = H"
  50.406  apply safe
  50.407  apply (erule analz.induct, blast+)
  50.408  done
  50.409  
  50.410 -text{*These two are obsolete (with a single Spy) but cost little to prove...*}
  50.411 +text\<open>These two are obsolete (with a single Spy) but cost little to prove...\<close>
  50.412  lemma analz_UN_analz_lemma:
  50.413       "X\<in> analz (\<Union>i\<in>A. analz (H i)) ==> X\<in> analz (\<Union>i\<in>A. H i)"
  50.414  apply (erule analz.induct)
  50.415 @@ -576,7 +576,7 @@
  50.416  lemma analz_UN_analz [simp]: "analz (\<Union>i\<in>A. analz (H i)) = analz (\<Union>i\<in>A. H i)"
  50.417  by (blast intro: analz_UN_analz_lemma analz_mono [THEN [2] rev_subsetD])
  50.418  (*>*)
  50.419 -text {*
  50.420 +text \<open>
  50.421  Note the @{text Decrypt} rule: the spy can decrypt a
  50.422  message encrypted with key~$K$ if he has the matching key,~$K^{-1}$. 
  50.423  Properties proved by rule induction include the following:
  50.424 @@ -585,7 +585,7 @@
  50.425  The set of fake messages that an intruder could invent
  50.426  starting from~@{text H} is @{text "synth(analz H)"}, where @{text "synth H"}
  50.427  formalizes what the adversary can build from the set of messages~$H$.  
  50.428 -*}
  50.429 +\<close>
  50.430  
  50.431  inductive_set
  50.432    synth :: "msg set \<Rightarrow> msg set"
  50.433 @@ -618,7 +618,7 @@
  50.434  apply (simp (no_asm_use))
  50.435  done
  50.436  (*>*)
  50.437 -text {*
  50.438 +text \<open>
  50.439  The set includes all agent names.  Nonces and keys are assumed to be
  50.440  unguessable, so none are included beyond those already in~$H$.   Two
  50.441  elements of @{term "synth H"} can be combined, and an element can be encrypted
  50.442 @@ -629,11 +629,11 @@
  50.443  @{named_thms [display,indent=0] analz_synth [no_vars] (analz_synth)}
  50.444  Rule inversion plays a major role in reasoning about @{text synth}, through
  50.445  declarations such as this one:
  50.446 -*}
  50.447 +\<close>
  50.448  
  50.449  inductive_cases Nonce_synth [elim!]: "Nonce n \<in> synth H"
  50.450  
  50.451 -text {*
  50.452 +text \<open>
  50.453  \noindent
  50.454  The resulting elimination rule replaces every assumption of the form
  50.455  @{term "Nonce n \<in> synth H"} by @{term "Nonce n \<in> H"},
  50.456 @@ -651,22 +651,22 @@
  50.457  use @{text parts} to express general well-formedness properties of a protocol,
  50.458  for example, that an uncompromised agent's private key will never be
  50.459  included as a component of any message.
  50.460 -*}
  50.461 +\<close>
  50.462  (*<*)
  50.463  lemma synth_increasing: "H \<subseteq> synth(H)"
  50.464  by blast
  50.465  
  50.466 -subsubsection{*Unions *}
  50.467 +subsubsection\<open>Unions\<close>
  50.468  
  50.469 -text{*Converse fails: we can synth more from the union than from the 
  50.470 -  separate parts, building a compound message using elements of each.*}
  50.471 +text\<open>Converse fails: we can synth more from the union than from the 
  50.472 +  separate parts, building a compound message using elements of each.\<close>
  50.473  lemma synth_Un: "synth(G) \<union> synth(H) \<subseteq> synth(G \<union> H)"
  50.474  by (intro Un_least synth_mono Un_upper1 Un_upper2)
  50.475  
  50.476  lemma synth_insert: "insert X (synth H) \<subseteq> synth(insert X H)"
  50.477  by (blast intro: synth_mono [THEN [2] rev_subsetD])
  50.478  
  50.479 -subsubsection{*Idempotence and transitivity *}
  50.480 +subsubsection\<open>Idempotence and transitivity\<close>
  50.481  
  50.482  lemma synth_synthD [dest!]: "X\<in> synth (synth H) ==> X\<in> synth H"
  50.483  by (erule synth.induct, blast+)
  50.484 @@ -683,7 +683,7 @@
  50.485  lemma synth_trans: "[| X\<in> synth G;  G \<subseteq> synth H |] ==> X\<in> synth H"
  50.486  by (drule synth_mono, blast)
  50.487  
  50.488 -text{*Cut; Lemma 2 of Lowe*}
  50.489 +text\<open>Cut; Lemma 2 of Lowe\<close>
  50.490  lemma synth_cut: "[| Y\<in> synth (insert X H);  X\<in> synth H |] ==> Y\<in> synth H"
  50.491  by (erule synth_trans, blast)
  50.492  
  50.493 @@ -706,7 +706,7 @@
  50.494  by (unfold keysFor_def, blast)
  50.495  
  50.496  
  50.497 -subsubsection{*Combinations of parts, analz and synth *}
  50.498 +subsubsection\<open>Combinations of parts, analz and synth\<close>
  50.499  
  50.500  lemma parts_synth [simp]: "parts (synth H) = parts H \<union> synth H"
  50.501  apply (rule equalityI)
  50.502 @@ -722,13 +722,13 @@
  50.503  done
  50.504  
  50.505  
  50.506 -subsubsection{*For reasoning about the Fake rule in traces *}
  50.507 +subsubsection\<open>For reasoning about the Fake rule in traces\<close>
  50.508  
  50.509  lemma parts_insert_subset_Un: "X\<in> G ==> parts(insert X H) \<subseteq> parts G \<union> parts H"
  50.510  by (rule subset_trans [OF parts_mono parts_Un_subset2], blast)
  50.511  
  50.512 -text{*More specifically for Fake.  Very occasionally we could do with a version
  50.513 -  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"} *}
  50.514 +text\<open>More specifically for Fake.  Very occasionally we could do with a version
  50.515 +  of the form  @{term"parts{X} \<subseteq> synth (analz H) \<union> parts H"}\<close>
  50.516  lemma Fake_parts_insert:
  50.517       "X \<in> synth (analz H) ==>  
  50.518        parts (insert X H) \<subseteq> synth (analz H) \<union> parts H"
  50.519 @@ -742,8 +742,8 @@
  50.520        ==> Z \<in>  synth (analz H) \<union> parts H"
  50.521  by (blast dest: Fake_parts_insert  [THEN subsetD, dest])
  50.522  
  50.523 -text{*@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
  50.524 -  @{term "G=H"}.*}
  50.525 +text\<open>@{term H} is sometimes @{term"Key ` KK \<union> spies evs"}, so can't put 
  50.526 +  @{term "G=H"}.\<close>
  50.527  lemma Fake_analz_insert:
  50.528       "X\<in> synth (analz G) ==>  
  50.529        analz (insert X H) \<subseteq> synth (analz G) \<union> analz (G \<union> H)"
  50.530 @@ -762,8 +762,8 @@
  50.531       "(X \<in> analz H | X \<in> parts H) = (X \<in> parts H)"
  50.532  by (blast intro: analz_subset_parts [THEN subsetD])
  50.533  
  50.534 -text{*Without this equation, other rules for synth and analz would yield
  50.535 -  redundant cases*}
  50.536 +text\<open>Without this equation, other rules for synth and analz would yield
  50.537 +  redundant cases\<close>
  50.538  lemma MPair_synth_analz [iff]:
  50.539       "(\<lbrace>X,Y\<rbrace> \<in> synth (analz H)) =  
  50.540        (X \<in> synth (analz H) & Y \<in> synth (analz H))"
  50.541 @@ -775,12 +775,12 @@
  50.542  by blast
  50.543  
  50.544  
  50.545 -text{*We do NOT want Crypt... messages broken up in protocols!!*}
  50.546 +text\<open>We do NOT want Crypt... messages broken up in protocols!!\<close>
  50.547  declare parts.Body [rule del]
  50.548  
  50.549  
  50.550 -text{*Rewrites to push in Key and Crypt messages, so that other messages can
  50.551 -    be pulled out using the @{text analz_insert} rules*}
  50.552 +text\<open>Rewrites to push in Key and Crypt messages, so that other messages can
  50.553 +    be pulled out using the @{text analz_insert} rules\<close>
  50.554  
  50.555  lemmas pushKeys =
  50.556    insert_commute [of "Key K" "Agent C"]
  50.557 @@ -800,14 +800,14 @@
  50.558    insert_commute [of "Crypt X K" "MPair X' Y"]
  50.559    for X K C N X' Y
  50.560  
  50.561 -text{*Cannot be added with @{text "[simp]"} -- messages should not always be
  50.562 -  re-ordered. *}
  50.563 +text\<open>Cannot be added with @{text "[simp]"} -- messages should not always be
  50.564 +  re-ordered.\<close>
  50.565  lemmas pushes = pushKeys pushCrypts
  50.566  
  50.567  
  50.568 -subsection{*Tactics useful for many protocol proofs*}
  50.569 +subsection\<open>Tactics useful for many protocol proofs\<close>
  50.570  ML
  50.571 -{*
  50.572 +\<open>
  50.573  val invKey = @{thm invKey};
  50.574  val keysFor_def = @{thm keysFor_def};
  50.575  val symKeys_def = @{thm symKeys_def};
  50.576 @@ -858,11 +858,11 @@
  50.577         simp_tac ctxt 1,
  50.578         REPEAT (FIRSTGOAL (resolve_tac ctxt [allI,impI,notI,conjI,iffI])),
  50.579         DEPTH_SOLVE (atomic_spy_analz_tac ctxt 1)]) i);
  50.580 -*}
  50.581 +\<close>
  50.582  
  50.583 -text{*By default only @{text o_apply} is built-in.  But in the presence of
  50.584 +text\<open>By default only @{text o_apply} is built-in.  But in the presence of
  50.585  eta-expansion this means that some terms displayed as @{term "f o g"} will be
  50.586 -rewritten, and others will not!*}
  50.587 +rewritten, and others will not!\<close>
  50.588  declare o_def [simp]
  50.589  
  50.590  
  50.591 @@ -883,7 +883,7 @@
  50.592  apply (rule synth_analz_mono, blast)   
  50.593  done
  50.594  
  50.595 -text{*Two generalizations of @{text analz_insert_eq}*}
  50.596 +text\<open>Two generalizations of @{text analz_insert_eq}\<close>
  50.597  lemma gen_analz_insert_eq [rule_format]:
  50.598       "X \<in> analz H ==> ALL G. H \<subseteq> G --> analz (insert X G) = analz G"
  50.599  by (blast intro: analz_cut analz_insertI analz_mono [THEN [2] rev_subsetD])
  50.600 @@ -904,16 +904,16 @@
  50.601  
  50.602  lemmas Fake_parts_sing_imp_Un = Fake_parts_sing [THEN [2] rev_subsetD]
  50.603  
  50.604 -method_setup spy_analz = {*
  50.605 -    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac) *}
  50.606 +method_setup spy_analz = \<open>
  50.607 +    Scan.succeed (SIMPLE_METHOD' o spy_analz_tac)\<close>
  50.608      "for proving the Fake case when analz is involved"
  50.609  
  50.610 -method_setup atomic_spy_analz = {*
  50.611 -    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac) *}
  50.612 +method_setup atomic_spy_analz = \<open>
  50.613 +    Scan.succeed (SIMPLE_METHOD' o atomic_spy_analz_tac)\<close>
  50.614      "for debugging spy_analz"
  50.615  
  50.616 -method_setup Fake_insert_simp = {*
  50.617 -    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac) *}
  50.618 +method_setup Fake_insert_simp = \<open>
  50.619 +    Scan.succeed (SIMPLE_METHOD' o Fake_insert_simp_tac)\<close>
  50.620      "for debugging spy_analz"
  50.621  
  50.622  
    51.1 --- a/src/Doc/Tutorial/Protocol/NS_Public.thy	Thu Jan 11 13:48:17 2018 +0100
    51.2 +++ b/src/Doc/Tutorial/Protocol/NS_Public.thy	Fri Jan 12 14:08:53 2018 +0100
    51.3 @@ -6,12 +6,12 @@
    51.4  *)(*<*)
    51.5  theory NS_Public imports Public begin(*>*)
    51.6  
    51.7 -section{* Modelling the Protocol \label{sec:modelling} *}
    51.8 +section\<open>Modelling the Protocol \label{sec:modelling}\<close>
    51.9  
   51.10 -text_raw {*
   51.11 +text_raw \<open>
   51.12  \begin{figure}
   51.13  \begin{isabelle}
   51.14 -*}
   51.15 +\<close>
   51.16  
   51.17  inductive_set ns_public :: "event list set"
   51.18    where
   51.19 @@ -40,13 +40,13 @@
   51.20                \<in> set evs3\<rbrakk>
   51.21            \<Longrightarrow> Says A B (Crypt (pubK B) (Nonce NB)) # evs3 \<in> ns_public"
   51.22  
   51.23 -text_raw {*
   51.24 +text_raw \<open>
   51.25  \end{isabelle}
   51.26  \caption{An Inductive Protocol Definition}\label{fig:ns_public}
   51.27  \end{figure}
   51.28 -*}
   51.29 +\<close>
   51.30  
   51.31 -text {*
   51.32 +text \<open>
   51.33  Let us formalize the Needham-Schroeder public-key protocol, as corrected by
   51.34  Lowe:
   51.35  \begin{alignat*%
   51.36 @@ -84,9 +84,9 @@
   51.37  Benefits of this approach are simplicity and clarity.  The semantic model
   51.38  is set theory, proofs are by induction and the translation from the informal
   51.39  notation to the inductive rules is straightforward. 
   51.40 -*}
   51.41 +\<close>
   51.42  
   51.43 -section{* Proving Elementary Properties \label{sec:regularity} *}
   51.44 +section\<open>Proving Elementary Properties \label{sec:regularity}\<close>
   51.45  
   51.46  (*<*)
   51.47  declare knows_Spy_partsEs [elim]
   51.48 @@ -109,7 +109,7 @@
   51.49  (*Spy never sees another agent's private key! (unless it's bad at start)*)
   51.50  (*>*)
   51.51  
   51.52 -text {*
   51.53 +text \<open>
   51.54  Secrecy properties can be hard to prove.  The conclusion of a typical
   51.55  secrecy theorem is 
   51.56  @{term "X \<notin> analz (knows Spy evs)"}.  The difficulty arises from
   51.57 @@ -124,13 +124,13 @@
   51.58  @{text A}'s private key in a message, whether protected by encryption or
   51.59  not, is enough to confirm that @{text A} is compromised.  The proof, like
   51.60  nearly all protocol proofs, is by induction over traces.
   51.61 -*}
   51.62 +\<close>
   51.63  
   51.64  lemma Spy_see_priK [simp]:
   51.65        "evs \<in> ns_public
   51.66         \<Longrightarrow> (Key (priK A) \<in> parts (knows Spy evs)) = (A \<in> bad)"
   51.67  apply (erule ns_public.induct, simp_all)
   51.68 -txt {*
   51.69 +txt \<open>
   51.70  The induction yields five subgoals, one for each rule in the definition of
   51.71  @{text ns_public}.  The idea is to prove that the protocol property holds initially
   51.72  (rule @{text Nil}), is preserved by each of the legitimate protocol steps (rules
   51.73 @@ -141,7 +141,7 @@
   51.74  at all, so only @{text Fake} is relevant. Indeed, simplification leaves
   51.75  only the @{text Fake} case, as indicated by the variable name @{text evsf}:
   51.76  @{subgoals[display,indent=0,margin=65]}
   51.77 -*}
   51.78 +\<close>
   51.79  by blast
   51.80  (*<*)
   51.81  lemma Spy_analz_priK [simp]:
   51.82 @@ -149,7 +149,7 @@
   51.83  by auto
   51.84  (*>*)
   51.85  
   51.86 -text {*
   51.87 +text \<open>
   51.88  The @{text Fake} case is proved automatically.  If
   51.89  @{term "priK A"} is in the extended trace then either (1) it was already in the
   51.90  original trace or (2) it was
   51.91 @@ -165,7 +165,7 @@
   51.92  induction, simplification, @{text blast}.  The first line uses the rule
   51.93  @{text rev_mp} to prepare the induction by moving two assumptions into the 
   51.94  induction formula.
   51.95 -*}
   51.96 +\<close>
   51.97  
   51.98  lemma no_nonce_NS1_NS2:
   51.99      "\<lbrakk>Crypt (pubK C) \<lbrace>NA', Nonce NA, Agent D\<rbrace> \<in> parts (knows Spy evs);
  51.100 @@ -177,11 +177,11 @@
  51.101  apply (blast intro: analz_insertI)+
  51.102  done
  51.103  
  51.104 -text {*
  51.105 +text \<open>
  51.106  The following unicity lemma states that, if \isa{NA} is secret, then its
  51.107  appearance in any instance of message~1 determines the other components. 
  51.108  The proof is similar to the previous one.
  51.109 -*}
  51.110 +\<close>
  51.111  
  51.112  lemma unique_NA:
  51.113       "\<lbrakk>Crypt(pubK B)  \<lbrace>Nonce NA, Agent A \<rbrace> \<in> parts(knows Spy evs);
  51.114 @@ -196,7 +196,7 @@
  51.115  done
  51.116  (*>*)
  51.117  
  51.118 -section{* Proving Secrecy Theorems \label{sec:secrecy} *}
  51.119 +section\<open>Proving Secrecy Theorems \label{sec:secrecy}\<close>
  51.120  
  51.121  (*<*)
  51.122  (*Secrecy: Spy does not see the nonce sent in msg NS1 if A and B are secure
  51.123 @@ -264,21 +264,21 @@
  51.124  done
  51.125  (*>*)
  51.126  
  51.127 -text {*
  51.128 +text \<open>
  51.129  The secrecy theorems for Bob (the second participant) are especially
  51.130  important because they fail for the original protocol.  The following
  51.131  theorem states that if Bob sends message~2 to Alice, and both agents are
  51.132  uncompromised, then Bob's nonce will never reach the spy.
  51.133 -*}
  51.134 +\<close>
  51.135  
  51.136  theorem Spy_not_see_NB [dest]:
  51.137   "\<lbrakk>Says B A (Crypt (pubK A) \<lbrace>Nonce NA, Nonce NB, Agent B\<rbrace>) \<in> set evs;
  51.138     A \<notin> bad;  B \<notin> bad;  evs \<in> ns_public\<rbrakk>
  51.139    \<Longrightarrow> Nonce NB \<notin> analz (knows Spy evs)"
  51.140 -txt {*
  51.141 +txt \<open>
  51.142  To prove it, we must formulate the induction properly (one of the
  51.143  assumptions mentions~@{text evs}), apply induction, and simplify:
  51.144 -*}
  51.145 +\<close>
  51.146  
  51.147  apply (erule rev_mp, erule ns_public.induct, simp_all)
  51.148  (*<*)
  51.149 @@ -288,7 +288,7 @@
  51.150  apply (blast intro: no_nonce_NS1_NS2)
  51.151  (*>*)
  51.152  
  51.153 -txt {*
  51.154 +txt \<open>
  51.155  The proof states are too complicated to present in full.  
  51.156  Let's examine the simplest subgoal, that for message~1.  The following
  51.157  event has just occurred:
  51.158 @@ -335,7 +335,7 @@
  51.159  @{text B} has sent an instance of message~2 to~@{text A} and has received the
  51.160  expected reply, then that reply really originated with~@{text A}.  The
  51.161  proof is a simple induction.
  51.162 -*}
  51.163 +\<close>
  51.164  
  51.165  (*<*)
  51.166  by (blast intro: no_nonce_NS1_NS2)
  51.167 @@ -368,7 +368,7 @@
  51.168  by (erule ns_public.induct, auto)
  51.169  (*>*)
  51.170  
  51.171 -text {*
  51.172 +text \<open>
  51.173  From similar assumptions, we can prove that @{text A} started the protocol
  51.174  run by sending an instance of message~1 involving the nonce~@{text NA}\@. 
  51.175  For this theorem, the conclusion is 
  51.176 @@ -395,6 +395,6 @@
  51.177  the strategy illustrated above, but the subgoals can
  51.178  be much bigger and there are more of them.
  51.179  \index{protocols!security|)}
  51.180 -*}
  51.181 +\<close>
  51.182  
  51.183  (*<*)end(*>*)
    52.1 --- a/src/Doc/Tutorial/Protocol/Public.thy	Thu Jan 11 13:48:17 2018 +0100
    52.2 +++ b/src/Doc/Tutorial/Protocol/Public.thy	Fri Jan 12 14:08:53 2018 +0100
    52.3 @@ -10,13 +10,13 @@
    52.4  begin
    52.5  (*>*)
    52.6  
    52.7 -text {*
    52.8 +text \<open>
    52.9  The function
   52.10  @{text pubK} maps agents to their public keys.  The function
   52.11  @{text priK} maps agents to their private keys.  It is merely
   52.12  an abbreviation (cf.\ \S\ref{sec:abbreviations}) defined in terms of
   52.13  @{text invKey} and @{text pubK}.
   52.14 -*}
   52.15 +\<close>
   52.16  
   52.17  consts pubK :: "agent \<Rightarrow> key"
   52.18  abbreviation priK :: "agent \<Rightarrow> key"
   52.19 @@ -37,7 +37,7 @@
   52.20  end
   52.21  (*>*)
   52.22  
   52.23 -text {*
   52.24 +text \<open>
   52.25  \noindent
   52.26  The set @{text bad} consists of those agents whose private keys are known to
   52.27  the spy.
   52.28 @@ -45,7 +45,7 @@
   52.29  Two axioms are asserted about the public-key cryptosystem. 
   52.30  No two agents have the same public key, and no private key equals
   52.31  any public key.
   52.32 -*}
   52.33 +\<close>
   52.34  
   52.35  axiomatization where
   52.36    inj_pubK:        "inj pubK" and
   52.37 @@ -156,16 +156,16 @@
   52.38  (*Specialized methods*)
   52.39  
   52.40  (*Tactic for possibility theorems*)
   52.41 -ML {*
   52.42 +ML \<open>
   52.43  fun possibility_tac ctxt =
   52.44      REPEAT (*omit used_Says so that Nonces start from different traces!*)
   52.45      (ALLGOALS (simp_tac (ctxt delsimps [used_Says]))
   52.46       THEN
   52.47       REPEAT_FIRST (eq_assume_tac ORELSE' 
   52.48                     resolve_tac ctxt [refl, conjI, @{thm Nonce_supply}]));
   52.49 -*}
   52.50 +\<close>
   52.51  
   52.52 -method_setup possibility = {* Scan.succeed (SIMPLE_METHOD o possibility_tac) *}
   52.53 +method_setup possibility = \<open>Scan.succeed (SIMPLE_METHOD o possibility_tac)\<close>
   52.54      "for proving possibility theorems"
   52.55  
   52.56  end
    53.1 --- a/src/Doc/Tutorial/Recdef/Induction.thy	Thu Jan 11 13:48:17 2018 +0100
    53.2 +++ b/src/Doc/Tutorial/Recdef/Induction.thy	Fri Jan 12 14:08:53 2018 +0100
    53.3 @@ -2,7 +2,7 @@
    53.4  theory Induction imports examples simplification begin
    53.5  (*>*)
    53.6  
    53.7 -text{*
    53.8 +text\<open>
    53.9  Assuming we have defined our function such that Isabelle could prove
   53.10  termination and that the recursion equations (or some suitable derived
   53.11  equations) are simplification rules, we might like to prove something about
   53.12 @@ -17,29 +17,29 @@
   53.13  you are trying to establish holds for the left-hand side provided it holds
   53.14  for all recursive calls on the right-hand side. Here is a simple example
   53.15  involving the predefined @{term"map"} functional on lists:
   53.16 -*}
   53.17 +\<close>
   53.18  
   53.19  lemma "map f (sep(x,xs)) = sep(f x, map f xs)"
   53.20  
   53.21 -txt{*\noindent
   53.22 +txt\<open>\noindent
   53.23  Note that @{term"map f xs"}
   53.24  is the result of applying @{term"f"} to all elements of @{term"xs"}. We prove
   53.25  this lemma by recursion induction over @{term"sep"}:
   53.26 -*}
   53.27 +\<close>
   53.28  
   53.29  apply(induct_tac x xs rule: sep.induct)
   53.30  
   53.31 -txt{*\noindent
   53.32 +txt\<open>\noindent
   53.33  The resulting proof state has three subgoals corresponding to the three
   53.34  clauses for @{term"sep"}:
   53.35  @{subgoals[display,indent=0]}
   53.36  The rest is pure simplification:
   53.37 -*}
   53.38 +\<close>
   53.39  
   53.40  apply simp_all
   53.41  done
   53.42  
   53.43 -text{*
   53.44 +text\<open>
   53.45  Try proving the above lemma by structural induction, and you find that you
   53.46  need an additional case distinction. What is worse, the names of variables
   53.47  are invented by Isabelle and have nothing to do with the names in the
   53.48 @@ -64,7 +64,7 @@
   53.49  empty list, the singleton list, and the list with at least two elements.
   53.50  The final case has an induction hypothesis:  you may assume that @{term"P"}
   53.51  holds for the tail of that list.
   53.52 -*}
   53.53 +\<close>
   53.54  
   53.55  (*<*)
   53.56  end
    54.1 --- a/src/Doc/Tutorial/Recdef/Nested0.thy	Thu Jan 11 13:48:17 2018 +0100
    54.2 +++ b/src/Doc/Tutorial/Recdef/Nested0.thy	Fri Jan 12 14:08:53 2018 +0100
    54.3 @@ -2,14 +2,14 @@
    54.4  theory Nested0 imports Main begin
    54.5  (*>*)
    54.6  
    54.7 -text{*
    54.8 +text\<open>
    54.9  \index{datatypes!nested}%
   54.10  In \S\ref{sec:nested-datatype} we defined the datatype of terms
   54.11 -*}
   54.12 +\<close>
   54.13  
   54.14  datatype ('a,'b)"term" = Var 'a | App 'b "('a,'b)term list"
   54.15  
   54.16 -text{*\noindent
   54.17 +text\<open>\noindent
   54.18  and closed with the observation that the associated schema for the definition
   54.19  of primitive recursive functions leads to overly verbose definitions. Moreover,
   54.20  if you have worked exercise~\ref{ex:trev-trev} you will have noticed that
   54.21 @@ -18,7 +18,7 @@
   54.22  We will now show you how \isacommand{recdef} can simplify
   54.23  definitions and proofs about nested recursive datatypes. As an example we
   54.24  choose exercise~\ref{ex:trev-trev}:
   54.25 -*}
   54.26 +\<close>
   54.27  
   54.28  consts trev  :: "('a,'b)term \<Rightarrow> ('a,'b)term"
   54.29  (*<*)end(*>*)
    55.1 --- a/src/Doc/Tutorial/Recdef/Nested1.thy	Thu Jan 11 13:48:17 2018 +0100
    55.2 +++ b/src/Doc/Tutorial/Recdef/Nested1.thy	Fri Jan 12 14:08:53 2018 +0100
    55.3 @@ -2,7 +2,7 @@
    55.4  theory Nested1 imports Nested0 begin
    55.5  (*>*)
    55.6  
    55.7 -text{*\noindent
    55.8 +text\<open>\noindent
    55.9  Although the definition of @{term trev} below is quite natural, we will have
   55.10  to overcome a minor difficulty in convincing Isabelle of its termination.
   55.11  It is precisely this difficulty that is the \textit{raison d'\^etre} of
   55.12 @@ -11,13 +11,13 @@
   55.13  Defining @{term trev} by \isacommand{recdef} rather than \isacommand{primrec}
   55.14  simplifies matters because we are now free to use the recursion equation
   55.15  suggested at the end of \S\ref{sec:nested-datatype}:
   55.16 -*}
   55.17 +\<close>
   55.18  
   55.19  recdef (*<*)(permissive)(*>*)trev "measure size"
   55.20   "trev (Var x)    = Var x"
   55.21   "trev (App f ts) = App f (rev(map trev ts))"
   55.22  
   55.23 -text{*\noindent
   55.24 +text\<open>\noindent
   55.25  Remember that function @{term size} is defined for each \isacommand{datatype}.
   55.26  However, the definition does not succeed. Isabelle complains about an
   55.27  unproved termination condition
   55.28 @@ -36,7 +36,7 @@
   55.29  \isacommand{recdef} knows about @{term map}.
   55.30  
   55.31  The termination condition is easily proved by induction:
   55.32 -*}
   55.33 +\<close>
   55.34  
   55.35  (*<*)
   55.36  end
    56.1 --- a/src/Doc/Tutorial/Recdef/Nested2.thy	Thu Jan 11 13:48:17 2018 +0100
    56.2 +++ b/src/Doc/Tutorial/Recdef/Nested2.thy	Fri Jan 12 14:08:53 2018 +0100
    56.3 @@ -9,25 +9,25 @@
    56.4   "trev (Var x) = Var x"
    56.5   "trev (App f ts) = App f (rev(map trev ts))"
    56.6  (*>*)
    56.7 -text{*\noindent
    56.8 +text\<open>\noindent
    56.9  By making this theorem a simplification rule, \isacommand{recdef}
   56.10  applies it automatically and the definition of @{term"trev"}
   56.11  succeeds now. As a reward for our effort, we can now prove the desired
   56.12  lemma directly.  We no longer need the verbose
   56.13  induction schema for type @{text"term"} and can use the simpler one arising from
   56.14  @{term"trev"}:
   56.15 -*}
   56.16 +\<close>
   56.17  
   56.18  lemma "trev(trev t) = t"
   56.19  apply(induct_tac t rule: trev.induct)
   56.20 -txt{*
   56.21 +txt\<open>
   56.22  @{subgoals[display,indent=0]}
   56.23  Both the base case and the induction step fall to simplification:
   56.24 -*}
   56.25 +\<close>
   56.26  
   56.27  by(simp_all add: rev_map sym[OF map_compose] cong: map_cong)
   56.28  
   56.29 -text{*\noindent
   56.30 +text\<open>\noindent
   56.31  If the proof of the induction step mystifies you, we recommend that you go through
   56.32  the chain of simplification steps in detail; you will probably need the help of
   56.33  @{text"simp_trace"}. Theorem @{thm[source]map_cong} is discussed below.
   56.34 @@ -65,7 +65,7 @@
   56.35  into a situation where you need to supply \isacommand{recdef} with new
   56.36  congruence rules, you can append a hint after the end of
   56.37  the recursion equations:\cmmdx{hints}
   56.38 -*}
   56.39 +\<close>
   56.40  (*<*)
   56.41  consts dummy :: "nat => nat"
   56.42  recdef dummy "{}"
   56.43 @@ -73,19 +73,19 @@
   56.44  (*>*)
   56.45  (hints recdef_cong: map_cong)
   56.46  
   56.47 -text{*\noindent
   56.48 +text\<open>\noindent
   56.49  Or you can declare them globally
   56.50  by giving them the \attrdx{recdef_cong} attribute:
   56.51 -*}
   56.52 +\<close>
   56.53  
   56.54  declare map_cong[recdef_cong]
   56.55  
   56.56 -text{*
   56.57 +text\<open>
   56.58  The @{text cong} and @{text recdef_cong} attributes are
   56.59  intentionally kept apart because they control different activities, namely
   56.60  simplification and making recursive definitions.
   56.61  %The simplifier's congruence rules cannot be used by recdef.
   56.62  %For example the weak congruence rules for if and case would prevent
   56.63  %recdef from generating sensible termination conditions.
   56.64 -*}
   56.65 +\<close>
   56.66  (*<*)end(*>*)
    57.1 --- a/src/Doc/Tutorial/Recdef/examples.thy	Thu Jan 11 13:48:17 2018 +0100
    57.2 +++ b/src/Doc/Tutorial/Recdef/examples.thy	Fri Jan 12 14:08:53 2018 +0100
    57.3 @@ -2,9 +2,9 @@
    57.4  theory examples imports Main begin
    57.5  (*>*)
    57.6  
    57.7 -text{*
    57.8 +text\<open>
    57.9  Here is a simple example, the \rmindex{Fibonacci function}:
   57.10 -*}
   57.11 +\<close>
   57.12  
   57.13  consts fib :: "nat \<Rightarrow> nat"
   57.14  recdef fib "measure(\<lambda>n. n)"
   57.15 @@ -12,7 +12,7 @@
   57.16    "fib (Suc 0) = 1"
   57.17    "fib (Suc(Suc x)) = fib x + fib (Suc x)"
   57.18  
   57.19 -text{*\noindent
   57.20 +text\<open>\noindent
   57.21  \index{measure functions}%
   57.22  The definition of @{term"fib"} is accompanied by a \textbf{measure function}
   57.23  @{term"%n. n"} which maps the argument of @{term"fib"} to a
   57.24 @@ -25,7 +25,7 @@
   57.25  
   57.26  Slightly more interesting is the insertion of a fixed element
   57.27  between any two elements of a list:
   57.28 -*}
   57.29 +\<close>
   57.30  
   57.31  consts sep :: "'a \<times> 'a list \<Rightarrow> 'a list"
   57.32  recdef sep "measure (\<lambda>(a,xs). length xs)"
   57.33 @@ -33,7 +33,7 @@
   57.34    "sep(a, [x])    = [x]"
   57.35    "sep(a, x#y#zs) = x # a # sep(a,y#zs)"
   57.36  
   57.37 -text{*\noindent
   57.38 +text\<open>\noindent
   57.39  This time the measure is the length of the list, which decreases with the
   57.40  recursive call; the first component of the argument tuple is irrelevant.
   57.41  The details of tupled $\lambda$-abstractions @{text"\<lambda>(x\<^sub>1,\<dots>,x\<^sub>n)"} are
   57.42 @@ -41,24 +41,24 @@
   57.43  
   57.44  Pattern matching\index{pattern matching!and \isacommand{recdef}}
   57.45  need not be exhaustive:
   57.46 -*}
   57.47 +\<close>
   57.48  
   57.49  consts last :: "'a list \<Rightarrow> 'a"