src/Doc/Tutorial/CTL/CTL.thy
changeset 58860 fee7cfa69c50
parent 58620 7435b6a3f72e
child 67406 23307fd33906
equal deleted inserted replaced
58859:d5ff8b782b29 58860:fee7cfa69c50
     1 (*<*)theory CTL imports Base begin(*>*)
     1 (*<*)theory CTL imports Base begin(*>*)
     2 
     2 
     3 subsection{*Computation Tree Logic --- CTL*};
     3 subsection{*Computation Tree Logic --- CTL*}
     4 
     4 
     5 text{*\label{sec:CTL}
     5 text{*\label{sec:CTL}
     6 \index{CTL|(}%
     6 \index{CTL|(}%
     7 The semantics of PDL only needs reflexive transitive closure.
     7 The semantics of PDL only needs reflexive transitive closure.
     8 Let us be adventurous and introduce a more expressive temporal operator.
     8 Let us be adventurous and introduce a more expressive temporal operator.
     9 We extend the datatype
     9 We extend the datatype
    10 @{text formula} by a new constructor
    10 @{text formula} by a new constructor
    11 *};
    11 *}
    12 (*<*)
    12 (*<*)
    13 datatype formula = Atom "atom"
    13 datatype formula = Atom "atom"
    14                   | Neg formula
    14                   | Neg formula
    15                   | And formula formula
    15                   | And formula formula
    16                   | AX formula
    16                   | AX formula
    17                   | EF formula(*>*)
    17                   | EF formula(*>*)
    18                   | AF formula;
    18                   | AF formula
    19 
    19 
    20 text{*\noindent
    20 text{*\noindent
    21 which stands for ``\emph{A}lways in the \emph{F}uture'':
    21 which stands for ``\emph{A}lways in the \emph{F}uture'':
    22 on all infinite paths, at some point the formula holds.
    22 on all infinite paths, at some point the formula holds.
    23 Formalizing the notion of an infinite path is easy
    23 Formalizing the notion of an infinite path is easy
    24 in HOL: it is simply a function from @{typ nat} to @{typ state}.
    24 in HOL: it is simply a function from @{typ nat} to @{typ state}.
    25 *};
    25 *}
    26 
    26 
    27 definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
    27 definition Paths :: "state \<Rightarrow> (nat \<Rightarrow> state)set" where
    28 "Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
    28 "Paths s \<equiv> {p. s = p 0 \<and> (\<forall>i. (p i, p(i+1)) \<in> M)}"
    29 
    29 
    30 text{*\noindent
    30 text{*\noindent
    31 This definition allows a succinct statement of the semantics of @{const AF}:
    31 This definition allows a succinct statement of the semantics of @{const AF}:
    32 \footnote{Do not be misled: neither datatypes nor recursive functions can be
    32 \footnote{Do not be misled: neither datatypes nor recursive functions can be
    33 extended by new constructors or equations. This is just a trick of the
    33 extended by new constructors or equations. This is just a trick of the
    34 presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
    34 presentation (see \S\ref{sec:doc-prep-suppress}). In reality one has to define
    35 a new datatype and a new function.}
    35 a new datatype and a new function.}
    36 *};
    36 *}
    37 (*<*)
    37 (*<*)
    38 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80,80] 80) where
    38 primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80,80] 80) where
    39 "s \<Turnstile> Atom a  =  (a \<in> L s)" |
    39 "s \<Turnstile> Atom a  =  (a \<in> L s)" |
    40 "s \<Turnstile> Neg f   = (~(s \<Turnstile> f))" |
    40 "s \<Turnstile> Neg f   = (~(s \<Turnstile> f))" |
    41 "s \<Turnstile> And f g = (s \<Turnstile> f \<and> s \<Turnstile> g)" |
    41 "s \<Turnstile> And f g = (s \<Turnstile> f \<and> s \<Turnstile> g)" |
    45 "s \<Turnstile> AF f    = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
    45 "s \<Turnstile> AF f    = (\<forall>p \<in> Paths s. \<exists>i. p i \<Turnstile> f)"
    46 
    46 
    47 text{*\noindent
    47 text{*\noindent
    48 Model checking @{const AF} involves a function which
    48 Model checking @{const AF} involves a function which
    49 is just complicated enough to warrant a separate definition:
    49 is just complicated enough to warrant a separate definition:
    50 *};
    50 *}
    51 
    51 
    52 definition af :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
    52 definition af :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
    53 "af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
    53 "af A T \<equiv> A \<union> {s. \<forall>t. (s, t) \<in> M \<longrightarrow> t \<in> T}"
    54 
    54 
    55 text{*\noindent
    55 text{*\noindent
    56 Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
    56 Now we define @{term "mc(AF f)"} as the least set @{term T} that includes
    57 @{term"mc f"} and all states all of whose direct successors are in @{term T}:
    57 @{term"mc f"} and all states all of whose direct successors are in @{term T}:
    58 *};
    58 *}
    59 (*<*)
    59 (*<*)
    60 primrec mc :: "formula \<Rightarrow> state set" where
    60 primrec mc :: "formula \<Rightarrow> state set" where
    61 "mc(Atom a)  = {s. a \<in> L s}" |
    61 "mc(Atom a)  = {s. a \<in> L s}" |
    62 "mc(Neg f)   = -mc f" |
    62 "mc(Neg f)   = -mc f" |
    63 "mc(And f g) = mc f \<inter> mc g" |
    63 "mc(And f g) = mc f \<inter> mc g" |
    64 "mc(AX f)    = {s. \<forall>t. (s,t) \<in> M  \<longrightarrow> t \<in> mc f}" |
    64 "mc(AX f)    = {s. \<forall>t. (s,t) \<in> M  \<longrightarrow> t \<in> mc f}" |
    65 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> M\<inverse> `` T)"|(*>*)
    65 "mc(EF f)    = lfp(\<lambda>T. mc f \<union> M\<inverse> `` T)"|(*>*)
    66 "mc(AF f)    = lfp(af(mc f))";
    66 "mc(AF f)    = lfp(af(mc f))"
    67 
    67 
    68 text{*\noindent
    68 text{*\noindent
    69 Because @{const af} is monotone in its second argument (and also its first, but
    69 Because @{const af} is monotone in its second argument (and also its first, but
    70 that is irrelevant), @{term"af A"} has a least fixed point:
    70 that is irrelevant), @{term"af A"} has a least fixed point:
    71 *};
    71 *}
    72 
    72 
    73 lemma mono_af: "mono(af A)";
    73 lemma mono_af: "mono(af A)"
    74 apply(simp add: mono_def af_def);
    74 apply(simp add: mono_def af_def)
    75 apply blast;
    75 apply blast
    76 done
    76 done
    77 (*<*)
    77 (*<*)
    78 lemma mono_ef: "mono(\<lambda>T. A \<union> M\<inverse> `` T)";
    78 lemma mono_ef: "mono(\<lambda>T. A \<union> M\<inverse> `` T)"
    79 apply(rule monoI);
    79 apply(rule monoI)
    80 by(blast);
    80 by(blast)
    81 
    81 
    82 lemma EF_lemma:
    82 lemma EF_lemma:
    83   "lfp(\<lambda>T. A \<union> M\<inverse> `` T) = {s. \<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<in> A}";
    83   "lfp(\<lambda>T. A \<union> M\<inverse> `` T) = {s. \<exists>t. (s,t) \<in> M\<^sup>* \<and> t \<in> A}"
    84 apply(rule equalityI);
    84 apply(rule equalityI)
    85  apply(rule subsetI);
    85  apply(rule subsetI)
    86  apply(simp);
    86  apply(simp)
    87  apply(erule lfp_induct_set);
    87  apply(erule lfp_induct_set)
    88   apply(rule mono_ef);
    88   apply(rule mono_ef)
    89  apply(simp);
    89  apply(simp)
    90  apply(blast intro: rtrancl_trans);
    90  apply(blast intro: rtrancl_trans)
    91 apply(rule subsetI);
    91 apply(rule subsetI)
    92 apply(simp, clarify);
    92 apply(simp, clarify)
    93 apply(erule converse_rtrancl_induct);
    93 apply(erule converse_rtrancl_induct)
    94  apply(subst lfp_unfold[OF mono_ef]);
    94  apply(subst lfp_unfold[OF mono_ef])
    95  apply(blast);
    95  apply(blast)
    96 apply(subst lfp_unfold[OF mono_ef]);
    96 apply(subst lfp_unfold[OF mono_ef])
    97 by(blast);
    97 by(blast)
    98 (*>*)
    98 (*>*)
    99 text{*
    99 text{*
   100 All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
   100 All we need to prove now is  @{prop"mc(AF f) = {s. s \<Turnstile> AF f}"}, which states
   101 that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
   101 that @{term mc} and @{text"\<Turnstile>"} agree for @{const AF}\@.
   102 This time we prove the two inclusions separately, starting
   102 This time we prove the two inclusions separately, starting
   103 with the easy one:
   103 with the easy one:
   104 *};
   104 *}
   105 
   105 
   106 theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
   106 theorem AF_lemma1: "lfp(af A) \<subseteq> {s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A}"
   107 
   107 
   108 txt{*\noindent
   108 txt{*\noindent
   109 In contrast to the analogous proof for @{const EF}, and just
   109 In contrast to the analogous proof for @{const EF}, and just
   112 \begin{center}
   112 \begin{center}
   113 @{thm lfp_lowerbound[of _ "S",no_vars]} \hfill (@{thm[source]lfp_lowerbound})
   113 @{thm lfp_lowerbound[of _ "S",no_vars]} \hfill (@{thm[source]lfp_lowerbound})
   114 \end{center}
   114 \end{center}
   115 The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
   115 The instance of the premise @{prop"f S \<subseteq> S"} is proved pointwise,
   116 a decision that \isa{auto} takes for us:
   116 a decision that \isa{auto} takes for us:
   117 *};
   117 *}
   118 apply(rule lfp_lowerbound);
   118 apply(rule lfp_lowerbound)
   119 apply(auto simp add: af_def Paths_def);
   119 apply(auto simp add: af_def Paths_def)
   120 
   120 
   121 txt{*
   121 txt{*
   122 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   122 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   123 In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
   123 In this remaining case, we set @{term t} to @{term"p(1::nat)"}.
   124 The rest is automatic, which is surprising because it involves
   124 The rest is automatic, which is surprising because it involves
   125 finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
   125 finding the instantiation @{term"\<lambda>i::nat. p(i+1)"}
   126 for @{text"\<forall>p"}.
   126 for @{text"\<forall>p"}.
   127 *};
   127 *}
   128 
   128 
   129 apply(erule_tac x = "p 1" in allE);
   129 apply(erule_tac x = "p 1" in allE)
   130 apply(auto);
   130 apply(auto)
   131 done;
   131 done
   132 
   132 
   133 
   133 
   134 text{*
   134 text{*
   135 The opposite inclusion is proved by contradiction: if some state
   135 The opposite inclusion is proved by contradiction: if some state
   136 @{term s} is not in @{term"lfp(af A)"}, then we can construct an
   136 @{term s} is not in @{term"lfp(af A)"}, then we can construct an
   141 A)"}}. Iterating this argument yields the promised infinite
   141 A)"}}. Iterating this argument yields the promised infinite
   142 @{term A}-avoiding path. Let us formalize this sketch.
   142 @{term A}-avoiding path. Let us formalize this sketch.
   143 
   143 
   144 The one-step argument in the sketch above
   144 The one-step argument in the sketch above
   145 is proved by a variant of contraposition:
   145 is proved by a variant of contraposition:
   146 *};
   146 *}
   147 
   147 
   148 lemma not_in_lfp_afD:
   148 lemma not_in_lfp_afD:
   149  "s \<notin> lfp(af A) \<Longrightarrow> s \<notin> A \<and> (\<exists> t. (s,t) \<in> M \<and> t \<notin> lfp(af A))";
   149  "s \<notin> lfp(af A) \<Longrightarrow> s \<notin> A \<and> (\<exists> t. (s,t) \<in> M \<and> t \<notin> lfp(af A))"
   150 apply(erule contrapos_np);
   150 apply(erule contrapos_np)
   151 apply(subst lfp_unfold[OF mono_af]);
   151 apply(subst lfp_unfold[OF mono_af])
   152 apply(simp add: af_def);
   152 apply(simp add: af_def)
   153 done;
   153 done
   154 
   154 
   155 text{*\noindent
   155 text{*\noindent
   156 We assume the negation of the conclusion and prove @{term"s : lfp(af A)"}.
   156 We assume the negation of the conclusion and prove @{term"s : lfp(af A)"}.
   157 Unfolding @{const lfp} once and
   157 Unfolding @{const lfp} once and
   158 simplifying with the definition of @{const af} finishes the proof.
   158 simplifying with the definition of @{const af} finishes the proof.
   159 
   159 
   160 Now we iterate this process. The following construction of the desired
   160 Now we iterate this process. The following construction of the desired
   161 path is parameterized by a predicate @{term Q} that should hold along the path:
   161 path is parameterized by a predicate @{term Q} that should hold along the path:
   162 *};
   162 *}
   163 
   163 
   164 primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
   164 primrec path :: "state \<Rightarrow> (state \<Rightarrow> bool) \<Rightarrow> (nat \<Rightarrow> state)" where
   165 "path s Q 0 = s" |
   165 "path s Q 0 = s" |
   166 "path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
   166 "path s Q (Suc n) = (SOME t. (path s Q n,t) \<in> M \<and> Q t)"
   167 
   167 
   173 concern to us since we will only use @{const path} when a
   173 concern to us since we will only use @{const path} when a
   174 suitable @{term t} does exist.
   174 suitable @{term t} does exist.
   175 
   175 
   176 Let us show that if each state @{term s} that satisfies @{term Q}
   176 Let us show that if each state @{term s} that satisfies @{term Q}
   177 has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
   177 has a successor that again satisfies @{term Q}, then there exists an infinite @{term Q}-path:
   178 *};
   178 *}
   179 
   179 
   180 lemma infinity_lemma:
   180 lemma infinity_lemma:
   181   "\<lbrakk> Q s; \<forall>s. Q s \<longrightarrow> (\<exists> t. (s,t) \<in> M \<and> Q t) \<rbrakk> \<Longrightarrow>
   181   "\<lbrakk> Q s; \<forall>s. Q s \<longrightarrow> (\<exists> t. (s,t) \<in> M \<and> Q t) \<rbrakk> \<Longrightarrow>
   182    \<exists>p\<in>Paths s. \<forall>i. Q(p i)";
   182    \<exists>p\<in>Paths s. \<forall>i. Q(p i)"
   183 
   183 
   184 txt{*\noindent
   184 txt{*\noindent
   185 First we rephrase the conclusion slightly because we need to prove simultaneously
   185 First we rephrase the conclusion slightly because we need to prove simultaneously
   186 both the path property and the fact that @{term Q} holds:
   186 both the path property and the fact that @{term Q} holds:
   187 *};
   187 *}
   188 
   188 
   189 apply(subgoal_tac
   189 apply(subgoal_tac
   190   "\<exists>p. s = p 0 \<and> (\<forall>i::nat. (p i, p(i+1)) \<in> M \<and> Q(p i))");
   190   "\<exists>p. s = p 0 \<and> (\<forall>i::nat. (p i, p(i+1)) \<in> M \<and> Q(p i))")
   191 
   191 
   192 txt{*\noindent
   192 txt{*\noindent
   193 From this proposition the original goal follows easily:
   193 From this proposition the original goal follows easily:
   194 *};
   194 *}
   195 
   195 
   196  apply(simp add: Paths_def, blast);
   196  apply(simp add: Paths_def, blast)
   197 
   197 
   198 txt{*\noindent
   198 txt{*\noindent
   199 The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
   199 The new subgoal is proved by providing the witness @{term "path s Q"} for @{term p}:
   200 *};
   200 *}
   201 
   201 
   202 apply(rule_tac x = "path s Q" in exI);
   202 apply(rule_tac x = "path s Q" in exI)
   203 apply(clarsimp);
   203 apply(clarsimp)
   204 
   204 
   205 txt{*\noindent
   205 txt{*\noindent
   206 After simplification and clarification, the subgoal has the following form:
   206 After simplification and clarification, the subgoal has the following form:
   207 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   207 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   208 It invites a proof by induction on @{term i}:
   208 It invites a proof by induction on @{term i}:
   209 *};
   209 *}
   210 
   210 
   211 apply(induct_tac i);
   211 apply(induct_tac i)
   212  apply(simp);
   212  apply(simp)
   213 
   213 
   214 txt{*\noindent
   214 txt{*\noindent
   215 After simplification, the base case boils down to
   215 After simplification, the base case boils down to
   216 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   216 @{subgoals[display,indent=0,margin=70,goals_limit=1]}
   217 The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t):M"}
   217 The conclusion looks exceedingly trivial: after all, @{term t} is chosen such that @{prop"(s,t):M"}
   221 When we apply this theorem as an introduction rule, @{text"?P x"} becomes
   221 When we apply this theorem as an introduction rule, @{text"?P x"} becomes
   222 @{prop"(s, x) : M & Q x"} and @{text"?Q x"} becomes @{prop"(s,x) : M"} and we have to prove
   222 @{prop"(s, x) : M & Q x"} and @{text"?Q x"} becomes @{prop"(s,x) : M"} and we have to prove
   223 two subgoals: @{prop"EX a. (s, a) : M & Q a"}, which follows from the assumptions, and
   223 two subgoals: @{prop"EX a. (s, a) : M & Q a"}, which follows from the assumptions, and
   224 @{prop"(s, x) : M & Q x ==> (s,x) : M"}, which is trivial. Thus it is not surprising that
   224 @{prop"(s, x) : M & Q x ==> (s,x) : M"}, which is trivial. Thus it is not surprising that
   225 @{text fast} can prove the base case quickly:
   225 @{text fast} can prove the base case quickly:
   226 *};
   226 *}
   227 
   227 
   228  apply(fast intro: someI2_ex);
   228  apply(fast intro: someI2_ex)
   229 
   229 
   230 txt{*\noindent
   230 txt{*\noindent
   231 What is worth noting here is that we have used \methdx{fast} rather than
   231 What is worth noting here is that we have used \methdx{fast} rather than
   232 @{text blast}.  The reason is that @{text blast} would fail because it cannot
   232 @{text blast}.  The reason is that @{text blast} would fail because it cannot
   233 cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
   233 cope with @{thm[source]someI2_ex}: unifying its conclusion with the current
   240 
   240 
   241 The induction step is similar, but more involved, because now we face nested
   241 The induction step is similar, but more involved, because now we face nested
   242 occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
   242 occurrences of @{text SOME}. As a result, @{text fast} is no longer able to
   243 solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
   243 solve the subgoal and we apply @{thm[source]someI2_ex} by hand.  We merely
   244 show the proof commands but do not describe the details:
   244 show the proof commands but do not describe the details:
   245 *};
   245 *}
   246 
   246 
   247 apply(simp);
   247 apply(simp)
   248 apply(rule someI2_ex);
   248 apply(rule someI2_ex)
   249  apply(blast);
   249  apply(blast)
   250 apply(rule someI2_ex);
   250 apply(rule someI2_ex)
   251  apply(blast);
   251  apply(blast)
   252 apply(blast);
   252 apply(blast)
   253 done;
   253 done
   254 
   254 
   255 text{*
   255 text{*
   256 Function @{const path} has fulfilled its purpose now and can be forgotten.
   256 Function @{const path} has fulfilled its purpose now and can be forgotten.
   257 It was merely defined to provide the witness in the proof of the
   257 It was merely defined to provide the witness in the proof of the
   258 @{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
   258 @{thm[source]infinity_lemma}. Aficionados of minimal proofs might like to know
   259 that we could have given the witness without having to define a new function:
   259 that we could have given the witness without having to define a new function:
   260 the term
   260 the term
   261 @{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
   261 @{term[display]"rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)"}
   262 is extensionally equal to @{term"path s Q"},
   262 is extensionally equal to @{term"path s Q"},
   263 where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
   263 where @{term rec_nat} is the predefined primitive recursor on @{typ nat}.
   264 *};
   264 *}
   265 (*<*)
   265 (*<*)
   266 lemma
   266 lemma
   267 "\<lbrakk> Q s; \<forall> s. Q s \<longrightarrow> (\<exists> t. (s,t)\<in>M \<and> Q t) \<rbrakk> \<Longrightarrow>
   267 "\<lbrakk> Q s; \<forall> s. Q s \<longrightarrow> (\<exists> t. (s,t)\<in>M \<and> Q t) \<rbrakk> \<Longrightarrow>
   268  \<exists> p\<in>Paths s. \<forall> i. Q(p i)";
   268  \<exists> p\<in>Paths s. \<forall> i. Q(p i)"
   269 apply(subgoal_tac
   269 apply(subgoal_tac
   270  "\<exists> p. s = p 0 \<and> (\<forall> i. (p i,p(Suc i))\<in>M \<and> Q(p i))");
   270  "\<exists> p. s = p 0 \<and> (\<forall> i. (p i,p(Suc i))\<in>M \<and> Q(p i))")
   271  apply(simp add: Paths_def);
   271  apply(simp add: Paths_def)
   272  apply(blast);
   272  apply(blast)
   273 apply(rule_tac x = "rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)" in exI);
   273 apply(rule_tac x = "rec_nat s (\<lambda>n t. SOME u. (t,u)\<in>M \<and> Q u)" in exI)
   274 apply(simp);
   274 apply(simp)
   275 apply(intro strip);
   275 apply(intro strip)
   276 apply(induct_tac i);
   276 apply(induct_tac i)
   277  apply(simp);
   277  apply(simp)
   278  apply(fast intro: someI2_ex);
   278  apply(fast intro: someI2_ex)
   279 apply(simp);
   279 apply(simp)
   280 apply(rule someI2_ex);
   280 apply(rule someI2_ex)
   281  apply(blast);
   281  apply(blast)
   282 apply(rule someI2_ex);
   282 apply(rule someI2_ex)
   283  apply(blast);
   283  apply(blast)
   284 by(blast);
   284 by(blast)
   285 (*>*)
   285 (*>*)
   286 
   286 
   287 text{*
   287 text{*
   288 At last we can prove the opposite direction of @{thm[source]AF_lemma1}:
   288 At last we can prove the opposite direction of @{thm[source]AF_lemma1}:
   289 *};
   289 *}
   290 
   290 
   291 theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A} \<subseteq> lfp(af A)";
   291 theorem AF_lemma2: "{s. \<forall>p \<in> Paths s. \<exists>i. p i \<in> A} \<subseteq> lfp(af A)"
   292 
   292 
   293 txt{*\noindent
   293 txt{*\noindent
   294 The proof is again pointwise and then by contraposition:
   294 The proof is again pointwise and then by contraposition:
   295 *};
   295 *}
   296 
   296 
   297 apply(rule subsetI);
   297 apply(rule subsetI)
   298 apply(erule contrapos_pp);
   298 apply(erule contrapos_pp)
   299 apply simp;
   299 apply simp
   300 
   300 
   301 txt{*
   301 txt{*
   302 @{subgoals[display,indent=0,goals_limit=1]}
   302 @{subgoals[display,indent=0,goals_limit=1]}
   303 Applying the @{thm[source]infinity_lemma} as a destruction rule leaves two subgoals, the second
   303 Applying the @{thm[source]infinity_lemma} as a destruction rule leaves two subgoals, the second
   304 premise of @{thm[source]infinity_lemma} and the original subgoal:
   304 premise of @{thm[source]infinity_lemma} and the original subgoal:
   305 *};
   305 *}
   306 
   306 
   307 apply(drule infinity_lemma);
   307 apply(drule infinity_lemma)
   308 
   308 
   309 txt{*
   309 txt{*
   310 @{subgoals[display,indent=0,margin=65]}
   310 @{subgoals[display,indent=0,margin=65]}
   311 Both are solved automatically:
   311 Both are solved automatically:
   312 *};
   312 *}
   313 
   313 
   314  apply(auto dest: not_in_lfp_afD);
   314  apply(auto dest: not_in_lfp_afD)
   315 done;
   315 done
   316 
   316 
   317 text{*
   317 text{*
   318 If you find these proofs too complicated, we recommend that you read
   318 If you find these proofs too complicated, we recommend that you read
   319 \S\ref{sec:CTL-revisited}, where we show how inductive definitions lead to
   319 \S\ref{sec:CTL-revisited}, where we show how inductive definitions lead to
   320 simpler arguments.
   320 simpler arguments.
   322 The main theorem is proved as for PDL, except that we also derive the
   322 The main theorem is proved as for PDL, except that we also derive the
   323 necessary equality @{text"lfp(af A) = ..."} by combining
   323 necessary equality @{text"lfp(af A) = ..."} by combining
   324 @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
   324 @{thm[source]AF_lemma1} and @{thm[source]AF_lemma2} on the spot:
   325 *}
   325 *}
   326 
   326 
   327 theorem "mc f = {s. s \<Turnstile> f}";
   327 theorem "mc f = {s. s \<Turnstile> f}"
   328 apply(induct_tac f);
   328 apply(induct_tac f)
   329 apply(auto simp add: EF_lemma equalityI[OF AF_lemma1 AF_lemma2]);
   329 apply(auto simp add: EF_lemma equalityI[OF AF_lemma1 AF_lemma2])
   330 done
   330 done
   331 
   331 
   332 text{*
   332 text{*
   333 
   333 
   334 The language defined above is not quite CTL\@. The latter also includes an
   334 The language defined above is not quite CTL\@. The latter also includes an
   375  apply simp
   375  apply simp
   376 apply(rule_tac x = "xa#xb" in exI)
   376 apply(rule_tac x = "xa#xb" in exI)
   377 apply simp
   377 apply simp
   378 done
   378 done
   379 
   379 
   380 lemma mono_eufix: "mono(eufix A B)";
   380 lemma mono_eufix: "mono(eufix A B)"
   381 apply(simp add: mono_def eufix_def);
   381 apply(simp add: mono_def eufix_def)
   382 apply blast;
   382 apply blast
   383 done
   383 done
   384 
   384 
   385 lemma "eusem A B \<subseteq> lfp(eufix A B)";
   385 lemma "eusem A B \<subseteq> lfp(eufix A B)"
   386 apply(clarsimp simp add: eusem_def);
   386 apply(clarsimp simp add: eusem_def)
   387 apply(erule rev_mp);
   387 apply(erule rev_mp)
   388 apply(rule_tac x = x in spec);
   388 apply(rule_tac x = x in spec)
   389 apply(induct_tac p);
   389 apply(induct_tac p)
   390  apply(subst lfp_unfold[OF mono_eufix])
   390  apply(subst lfp_unfold[OF mono_eufix])
   391  apply(simp add: eufix_def);
   391  apply(simp add: eufix_def)
   392 apply(clarsimp);
   392 apply(clarsimp)
   393 apply(subst lfp_unfold[OF mono_eufix])
   393 apply(subst lfp_unfold[OF mono_eufix])
   394 apply(simp add: eufix_def);
   394 apply(simp add: eufix_def)
   395 apply blast;
   395 apply blast
   396 done
   396 done
   397 
   397 
   398 (*
   398 (*
   399 definition eusem :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
   399 definition eusem :: "state set \<Rightarrow> state set \<Rightarrow> state set" where
   400 "eusem A B \<equiv> {s. \<exists>p\<in>Paths s. \<exists>j. p j \<in> B \<and> (\<forall>i < j. p i \<in> A)}"
   400 "eusem A B \<equiv> {s. \<exists>p\<in>Paths s. \<exists>j. p j \<in> B \<and> (\<forall>i < j. p i \<in> A)}"