1 (*<*)theory Logic = Main:(*>*) |
1 (*<*)theory Logic = Main:(*>*) |
2 |
2 text{* We begin by looking at a simplified grammar for Isar proofs: |
3 text{* |
|
4 We begin by looking at a simplified grammar for Isar proofs: |
|
5 \begin{center} |
3 \begin{center} |
6 \begin{tabular}{lrl} |
4 \begin{tabular}{lrl} |
7 \emph{proof} & ::= & \isacommand{proof} \emph{method}$^?$ |
5 \emph{proof} & ::= & \isakeyword{proof} \emph{method}$^?$ |
8 \emph{statement}* |
6 \emph{statement}* |
9 \isacommand{qed} \\ |
7 \isakeyword{qed} \\ |
10 &$\mid$& \isacommand{by} \emph{method}\\[1ex] |
8 &$\mid$& \isakeyword{by} \emph{method}\\[1ex] |
11 \emph{statement} &::= & \isacommand{fix} \emph{variables} \\ |
9 \emph{statement} &::= & \isakeyword{fix} \emph{variables} \\ |
12 &$\mid$& \isacommand{assume} proposition* \\ |
10 &$\mid$& \isakeyword{assume} proposition* \\ |
13 &$\mid$& (\isacommand{from} \emph{label}$^*$ $\mid$ |
11 &$\mid$& (\isakeyword{from} \emph{label}$^*$ $\mid$ |
14 \isacommand{then})$^?$ \\ |
12 \isakeyword{then})$^?$ \\ |
15 && (\isacommand{show} $\mid$ \isacommand{have}) |
13 && (\isakeyword{show} $\mid$ \isakeyword{have}) |
16 \emph{proposition} \emph{proof} \\[1ex] |
14 \emph{proposition} \emph{proof} \\[1ex] |
17 \emph{proposition} &::=& (\emph{label}{\bf:})$^?$ \emph{string} |
15 \emph{proposition} &::=& (\emph{label}{\bf:})$^?$ \emph{string} |
18 \end{tabular} |
16 \end{tabular} |
19 \end{center} |
17 \end{center} |
20 where paranthesis are used for grouping and $^?$ indicates an optional item. |
18 where paranthesis are used for grouping and $^?$ indicates an optional item. |
21 |
19 |
22 This is a typical proof skeleton: |
20 This is a typical proof skeleton: |
23 |
21 |
24 \begin{center} |
22 \begin{center} |
25 \begin{tabular}{@ {}ll} |
23 \begin{tabular}{@ {}ll} |
26 \isacommand{proof}\\ |
24 \isakeyword{proof}\\ |
27 \hspace*{3ex}\isacommand{assume} "\emph{the-assm}"\\ |
25 \hspace*{3ex}\isakeyword{assume} @{text"\""}\emph{the-assm}@{text"\""}\\ |
28 \hspace*{3ex}\isacommand{have} "\dots" & -- "intermediate result"\\ |
26 \hspace*{3ex}\isakeyword{have} @{text"\""}\dots@{text"\""} & -- "intermediate result"\\ |
29 \hspace*{3ex}\vdots\\ |
27 \hspace*{3ex}\vdots\\ |
30 \hspace*{3ex}\isacommand{have} "\dots" & -- "intermediate result"\\ |
28 \hspace*{3ex}\isakeyword{have} @{text"\""}\dots@{text"\""} & -- "intermediate result"\\ |
31 \hspace*{3ex}\isacommand{show} "\emph{the-concl}"\\ |
29 \hspace*{3ex}\isakeyword{show} @{text"\""}\emph{the-concl}@{text"\""}\\ |
32 \isacommand{qed} |
30 \isakeyword{qed} |
33 \end{tabular} |
31 \end{tabular} |
34 \end{center} |
32 \end{center} |
35 It proves \emph{the-assm}~@{text"\<Longrightarrow>"}~\emph{the-concl}. |
33 It proves \emph{the-assm}~@{text"\<Longrightarrow>"}~\emph{the-concl}.*} |
36 *} |
|
37 |
34 |
38 section{*Logic*} |
35 section{*Logic*} |
39 |
36 |
40 subsection{*Propositional logic*} |
37 subsection{*Propositional logic*} |
41 |
38 |
42 subsubsection{*Introduction rules*} |
39 subsubsection{*Introduction rules*} |
43 |
40 |
44 lemma "A \<longrightarrow> A" |
41 lemma "A \<longrightarrow> A" |
45 proof (rule impI) |
42 proof (rule impI) |
46 assume A: "A" |
43 assume a: "A" |
47 show "A" by(rule A) |
44 show "A" by(rule a) |
48 qed |
45 qed |
49 |
46 |
50 text{*\noindent |
47 text{*\noindent |
51 The operational reading: the assume-show block proves @{prop"A \<Longrightarrow> A"}, |
48 The operational reading: the assume-show block proves @{prop"A \<Longrightarrow> A"}, |
52 which rule @{thm[source]impI} turns into the desired @{prop"A \<longrightarrow> A"}. |
49 which rule @{thm[source]impI} turns into the desired @{prop"A \<longrightarrow> A"}. |
53 However, this text is much too detailled for comfort. Therefore Isar |
50 However, this text is much too detailled for comfort. Therefore Isar |
54 implements the following principle: |
51 implements the following principle: |
55 \begin{quote}\em |
52 \begin{quote}\em |
56 Command \isacommand{proof} automatically tries select an introduction rule |
53 Command \isakeyword{proof} automatically tries select an introduction rule |
57 based on the goal and a predefined list of rules. |
54 based on the goal and a predefined list of rules. |
58 \end{quote} |
55 \end{quote} |
59 Here @{thm[source]impI} is applied automatically: |
56 Here @{thm[source]impI} is applied automatically: |
60 *} |
57 *} |
61 |
58 |
62 lemma "A \<longrightarrow> A" |
59 lemma "A \<longrightarrow> A" |
63 proof |
60 proof |
64 assume A: "A" |
61 assume a: "A" |
65 show "A" by(rule A) |
62 show "A" by(rule a) |
66 qed |
63 qed |
67 |
64 |
68 text{* Trivial proofs, in particular those by assumption, should be trivial |
65 text{* Trivial proofs, in particular those by assumption, should be trivial |
69 to perform. Method "." does just that (and a bit more --- see later). Thus |
66 to perform. Method ``.'' does just that (and a bit more --- see later). Thus |
70 naming of assumptions is often superfluous: *} |
67 naming of assumptions is often superfluous: *} |
71 |
68 |
72 lemma "A \<longrightarrow> A" |
69 lemma "A \<longrightarrow> A" |
73 proof |
70 proof |
74 assume "A" |
71 assume "A" |
75 have "A" . |
72 show "A" . |
76 qed |
73 qed |
77 |
74 |
78 text{* To hide proofs by assumption further, \isacommand{by}@{text"(method)"} |
75 text{* To hide proofs by assumption further, \isakeyword{by}@{text"(method)"} |
79 first applies @{text method} and then tries to solve all remaining subgoals |
76 first applies @{text method} and then tries to solve all remaining subgoals |
80 by assumption: *} |
77 by assumption: *} |
81 |
78 |
82 lemma "A \<longrightarrow> A & A" |
79 lemma "A \<longrightarrow> A \<and> A" |
83 proof |
80 proof |
84 assume A |
81 assume A |
85 show "A & A" by(rule conjI) |
82 show "A \<and> A" by(rule conjI) |
86 qed |
83 qed |
87 |
84 |
88 text{*\noindent A drawback of these implicit proofs by assumption is that it |
85 text{*\noindent A drawback of these implicit proofs by assumption is that it |
89 is no longer obvious where an assumption is used. |
86 is no longer obvious where an assumption is used. |
90 |
87 |
91 Proofs of the form \isacommand{by}@{text"(rule <name>)"} can be abbreviated |
88 Proofs of the form \isakeyword{by}@{text"(rule <name>)"} can be abbreviated |
92 to ".." if @{text"<name>"} is one of the predefined introduction rules. Thus |
89 to ``..'' if @{text"<name>"} is one of the predefined introduction rules. Thus |
93 *} |
90 *} |
94 |
91 |
95 lemma "A \<longrightarrow> A \<and> A" |
92 lemma "A \<longrightarrow> A \<and> A" |
96 proof |
93 proof |
97 assume A |
94 assume A |
183 |
180 |
184 text{*\noindent |
181 text{*\noindent |
185 It is worth examining this text in detail because it exhibits a number of new features. |
182 It is worth examining this text in detail because it exhibits a number of new features. |
186 |
183 |
187 For a start, this is the first time we have proved intermediate propositions |
184 For a start, this is the first time we have proved intermediate propositions |
188 (\isacommand{have}) on the way to the final \isacommand{show}. This is the |
185 (\isakeyword{have}) on the way to the final \isakeyword{show}. This is the |
189 norm in any nontrival proof where one cannot bridge the gap between the |
186 norm in any nontrival proof where one cannot bridge the gap between the |
190 assumptions and the conclusion in one step. Both \isacommand{have}s above are |
187 assumptions and the conclusion in one step. Both \isakeyword{have}s above are |
191 proved automatically via @{thm[source]conjE} triggered by |
188 proved automatically via @{thm[source]conjE} triggered by |
192 \isacommand{from}~@{text ab}. |
189 \isakeyword{from}~@{text ab}. |
193 |
190 |
194 The \isacommand{show} command illustrates two things: |
191 The \isakeyword{show} command illustrates two things: |
195 \begin{itemize} |
192 \begin{itemize} |
196 \item \isacommand{from} can be followed by any number of facts. |
193 \item \isakeyword{from} can be followed by any number of facts. |
197 Given \isacommand{from}~@{text f}$_1$~\dots~@{text f}$_n$, \isacommand{show} |
194 Given \isakeyword{from}~@{text f}$_1$~\dots~@{text f}$_n$, \isakeyword{show} |
198 tries to find an elimination rule whose first $n$ premises can be proved |
195 tries to find an elimination rule whose first $n$ premises can be proved |
199 by the given facts in the given order. |
196 by the given facts in the given order. |
200 \item If there is no matching elimination rule, introduction rules are tried, |
197 \item If there is no matching elimination rule, introduction rules are tried, |
201 again using the facts to prove the premises. |
198 again using the facts to prove the premises. |
202 \end{itemize} |
199 \end{itemize} |
203 In this case, the proof succeeds with @{thm[source]conjI}. Note that the proof would fail if we had written \isacommand{from}~@{text"a b"} |
200 In this case, the proof succeeds with @{thm[source]conjI}. Note that the proof would fail if we had written \isakeyword{from}~@{text"a b"} |
204 instead of \isacommand{from}~@{text"b a"}. |
201 instead of \isakeyword{from}~@{text"b a"}. |
205 |
202 |
206 This treatment of facts fed into a proof is not restricted to implicit |
203 This treatment of facts fed into a proof is not restricted to implicit |
207 application of introduction and elimination rules but applies to explicit |
204 application of introduction and elimination rules but applies to explicit |
208 application of rules as well. Thus you could replace the final ``..'' above |
205 application of rules as well. Thus you could replace the final ``..'' above |
209 with \isacommand{by}@{text"(rule conjI)"}. |
206 with \isakeyword{by}@{text"(rule conjI)"}. |
210 |
207 |
211 Note Isar's predefined introduction and elimination rules include all the |
208 Note Isar's predefined introduction and elimination rules include all the |
212 usual natural deduction rules for propositional logic. We conclude this |
209 usual natural deduction rules for propositional logic. We conclude this |
213 section with an extended example --- which rules are used implicitly where? |
210 section with an extended example --- which rules are used implicitly where? |
214 Rule @{thm[source]ccontr} is @{thm ccontr[no_vars]}. |
211 Rule @{thm[source]ccontr} is @{thm ccontr[no_vars]}. |
470 qed |
467 qed |
471 |
468 |
472 text{*\noindent You may need to resort to this technique if an automatic step |
469 text{*\noindent You may need to resort to this technique if an automatic step |
473 fails to prove the desired proposition. *} |
470 fails to prove the desired proposition. *} |
474 |
471 |
475 section{*Induction*} |
472 |
|
473 section{*Case distinction and induction*} |
|
474 |
|
475 |
|
476 subsection{*Case distinction*} |
|
477 |
|
478 text{* We have already met the @{text cases} method for performing |
|
479 binary case splits. Here is another example: *} |
|
480 |
|
481 lemma "P \<or> \<not> P" |
|
482 proof cases |
|
483 assume "P" thus ?thesis .. |
|
484 next |
|
485 assume "\<not> P" thus ?thesis .. |
|
486 qed |
|
487 |
|
488 text{*\noindent As always, the cases can be tackled in any order. |
|
489 |
|
490 This form is appropriate if @{term P} is textually small. However, if |
|
491 @{term P} is large, we don't want to repeat it. This can be avoided by |
|
492 the following idiom *} |
|
493 |
|
494 lemma "P \<or> \<not> P" |
|
495 proof (cases "P") |
|
496 case True thus ?thesis .. |
|
497 next |
|
498 case False thus ?thesis .. |
|
499 qed |
|
500 |
|
501 text{*\noindent which is simply a shorthand for the previous |
|
502 proof. More precisely, the phrases \isakeyword{case}~@{prop |
|
503 True}/@{prop False} abbreviate the corresponding assumptions @{prop P} |
|
504 and @{prop"\<not>P"}. |
|
505 |
|
506 The same game can be played with other datatypes, for example lists: |
|
507 *} |
|
508 (*<*)declare length_tl[simp del](*>*) |
|
509 lemma "length(tl xs) = length xs - 1" |
|
510 proof (cases xs) |
|
511 case Nil thus ?thesis by simp |
|
512 next |
|
513 case Cons thus ?thesis by simp |
|
514 qed |
|
515 |
|
516 text{*\noindent Here \isakeyword{case}~@{text Nil} abbreviates |
|
517 \isakeyword{assume}~@{prop"x = []"} and \isakeyword{case}~@{text Cons} |
|
518 abbreviates \isakeyword{assume}~@{text"xs = a # list"}. However, we |
|
519 cannot refer to @{term a} and @{term list} because this would lead to |
|
520 brittle proofs: these names have been chosen by the system and have |
|
521 not been introduced via \isakeyword{fix}. Luckily, the proof is simple |
|
522 enough we do not need to refer to @{term a} and @{term list}. However, |
|
523 in general one may have to. Hence Isar offers a simple scheme for |
|
524 naming those variables: Replace the anonymous @{text Cons} by, for |
|
525 example, @{text"(Cons y ys)"}, which abbreviates |
|
526 \isakeyword{fix}~@{text"y ys"} \isakeyword{assume}~@{text"xs = Cons y |
|
527 ys"}, i.e.\ @{prop"xs = y # ys"}. Here is a (somewhat contrived) |
|
528 example: *} |
|
529 |
|
530 lemma "length(tl xs) = length xs - 1" |
|
531 proof (cases xs) |
|
532 case Nil thus ?thesis by simp |
|
533 next |
|
534 case (Cons y ys) |
|
535 hence "length xs = length ys + 1 & length(tl xs) = length ys" by simp |
|
536 thus ?thesis by simp |
|
537 qed |
|
538 |
|
539 text{* New case distincion rules can be declared any time, even with |
|
540 named cases. *} |
|
541 |
|
542 |
|
543 subsection{*Induction*} |
|
544 |
|
545 |
|
546 subsubsection{*Structural induction*} |
|
547 |
|
548 text{* We start with a simple inductive proof where both cases are proved automatically: *} |
476 |
549 |
477 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" |
550 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" |
478 by (induct n, simp_all) |
551 by (induct n, simp_all) |
|
552 |
|
553 text{*\noindent If we want to expose more of the structure of the |
|
554 proof, we can use pattern matching to avoid having to repeat the goal |
|
555 statement: *} |
479 |
556 |
480 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" (is "?P n") |
557 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" (is "?P n") |
481 proof (induct n) |
558 proof (induct n) |
482 show "?P 0" by simp |
559 show "?P 0" by simp |
483 next |
560 next |
484 fix n assume "?P n" |
561 fix n assume "?P n" |
485 thus "?P(Suc n)" by simp |
562 thus "?P(Suc n)" by simp |
486 qed |
563 qed |
487 |
564 |
488 (* Could refine further, but not necessary *) |
565 text{* \noindent We could refine this further to show more of the equational |
|
566 proof. Instead we explore the same avenue as for case distinctions: |
|
567 introducing context via the \isakeyword{case} command: *} |
489 |
568 |
490 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" |
569 lemma "2 * (\<Sum>i<n+1. i) = n*(n+1)" |
491 proof (induct n) |
570 proof (induct n) |
492 case 0 show ?case by simp |
571 case 0 show ?case by simp |
493 next |
572 next |
494 case Suc thus ?case by simp |
573 case Suc thus ?case by simp |
495 qed |
574 qed |
496 |
575 |
497 |
576 text{* \noindent The implicitly defined @{text ?case} refers to the |
498 |
577 corresponding case to be proved, i.e.\ @{text"?P 0"} in the first case |
499 |
578 and @{text"?P(Suc n)"} in the second case. Context |
500 consts itrev :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" |
579 \isakeyword{case}~@{text 0} is empty whereas \isakeyword{case}~@{text |
501 primrec |
580 Suc} assumes @{text"?P n"}. Again we have the same problem as with |
502 "itrev [] ys = ys" |
581 case distinctions: we cannot refer to @{term n} in the induction step |
503 "itrev (x#xs) ys = itrev xs (x#ys)" |
582 because it has not been introduced via \isakeyword{fix} (in contrast |
504 |
583 to the previous proof). The solution is the same as above: @{text"(Suc |
505 lemma "\<And>ys. itrev xs ys = rev xs @ ys" |
584 i)"} instead of just @{term Suc}: *} |
506 by (induct xs, simp_all) |
585 |
507 |
586 lemma fixes n::nat shows "n < n*n + 1" |
508 (* The !! just disappears. Even more pronounced for \<Longrightarrow> *) |
587 proof (induct n) |
509 |
588 case 0 show ?case by simp |
510 lemma r: assumes A: "(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n)" |
589 next |
|
590 case (Suc i) thus "Suc i < Suc i * Suc i + 1" by simp |
|
591 qed |
|
592 |
|
593 text{* \noindent Of course we could again have written |
|
594 \isakeyword{thus}~@{text ?case} instead of giving the term explicitly |
|
595 but we wanted to use @{term i} somewehere. |
|
596 |
|
597 Let us now tackle a more ambitious lemma: proving complete induction |
|
598 @{prop[display,indent=5]"(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n) \<Longrightarrow> P n"} |
|
599 via structural induction. It is well known that one needs to prove |
|
600 something more general first: *} |
|
601 |
|
602 lemma cind_lemma: |
|
603 assumes A: "(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n)" |
511 shows "\<And>m. m < n \<Longrightarrow> P(m::nat)" |
604 shows "\<And>m. m < n \<Longrightarrow> P(m::nat)" |
512 proof (induct n) |
605 proof (induct n) |
513 case 0 hence False by simp thus ?case .. |
606 case 0 thus ?case by simp |
514 next |
607 next |
515 case (Suc n m) |
608 case (Suc n m) |
516 show ?case |
609 show ?case |
517 proof (cases) |
610 proof (cases) |
518 assume eq: "m = n" |
611 assume eq: "m = n" |
519 have "P n" sorry |
612 from Suc A have "P n" by blast |
520 with eq show "P m" by simp |
613 with eq show "P m" by simp |
521 next |
614 next |
522 assume neq: "m \<noteq> n" |
615 assume neq: "m \<noteq> n" |
523 with Suc have "m < n" by simp |
616 with Suc have "m < n" by simp |
524 with Suc show "P m" by blast |
617 with Suc show "P m" by blast |
525 qed |
618 qed |
526 |
619 qed |
527 |
620 |
528 |
621 text{* \noindent In contrast to the style advertized in the |
529 thm r |
622 Tutorial~\cite{LNCS2283}, structured Isar proofs do not need to |
530 thm r[of _ n "Suc n", simplified] |
623 introduce @{text"\<forall>"} or @{text"\<longrightarrow>"} into a theorem to strengthen it |
531 |
624 for induction --- we use @{text"\<And>"} and @{text"\<Longrightarrow>"} instead. This |
532 lemma "(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n) \<Longrightarrow> P n" |
625 simplifies the proof and means we don't have to convert between the |
533 |
626 two kinds of connectives. As usual, at the end of the proof |
534 lemma assumes P0: "P 0" and P1: "P(Suc 0)" |
627 @{text"\<And>"} disappears and the bound variable is turned into a |
535 and P2: "\<And>k. P k \<Longrightarrow> P(Suc (Suc k))" shows "P n" |
628 @{text"?"}-variable. Thus @{thm[source]cind_lemma} becomes |
536 proof (induct n rule: nat_less_induct) |
629 @{thm[display,indent=5] cind_lemma} Complete induction is an easy |
537 thm nat_less_induct |
630 corollary: instantiation followed by |
538 fix n assume "\<forall>m. m < n \<longrightarrow> P m" |
631 simplification @{thm[source] cind_lemma[of _ n "Suc n", simplified]} |
539 show "P n" |
632 yields @{thm[display,indent=5] cind_lemma[of _ n "Suc n", simplified]} |
540 proof (cases n) |
633 |
541 assume "n=0" thus "P n" by simp |
634 So what is this funny case @{text"(Suc n m)"} in the proof of |
542 next |
635 @{thm[source] cind_lemma}? It looks confusing at first and reveals |
543 fix m assume n: "n = Suc m" |
636 that the very suggestive @{text"(Suc n)"} used above is not the whole |
544 show "P n" |
637 truth. The variable names after the case name (here: @{term Suc}) are |
545 proof (cases m) |
638 the names of the parameters of that subgoal. So far the only |
546 assume "m=0" with n show "P n" by simp |
639 parameters where the arguments to the constructor, but now we have an |
547 next |
640 additonal parameter coming from the @{text"\<And>m"} in the |
548 fix l assume "m = Suc l" |
641 \isakeyword{shows} clause. Thus @{text"(Suc n m)"} does not mean that |
549 with n show "P n" apply simp |
642 constructor @{term Suc} is applied to two arguments but that the two |
550 |
643 parameters in the @{term Suc} case are to be named @{text n} and |
551 by (case_tac "n" 1); |
644 @{text m}. *} |
552 by (case_tac "nat" 2); |
645 |
553 by (ALLGOALS (blast_tac (claset() addIs prems@[less_trans]))); |
646 subsubsection{*Rule induction*} |
554 qed "nat_induct2"; |
647 |
|
648 text{* We define our own version of reflexive transitive closure of a |
|
649 relation *} |
555 |
650 |
556 consts rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set" ("_*" [1000] 999) |
651 consts rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set" ("_*" [1000] 999) |
557 inductive "r*" |
652 inductive "r*" |
558 intros |
653 intros |
559 rtc_refl[iff]: "(x,x) \<in> r*" |
654 refl: "(x,x) \<in> r*" |
560 rtc_step: "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*" |
655 step: "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*" |
561 |
656 |
562 lemma [intro]: "(x,y) : r \<Longrightarrow> (x,y) \<in> r*" |
657 text{* \noindent and prove that @{term"r*"} is indeed transitive: *} |
563 by(blast intro: rtc_step); |
658 |
564 |
659 lemma assumes A: "(x,y) \<in> r*" |
565 lemma assumes A:"(x,y) \<in> r*" and B:"(y,z) \<in> r*" shows "(x,z) \<in> r*" |
660 shows "(y,z) \<in> r* \<Longrightarrow> (x,z) \<in> r*" (is "PROP ?P x y") |
|
661 proof - |
|
662 from A show "PROP ?P x y" |
|
663 proof induct |
|
664 fix x show "PROP ?P x x" . |
|
665 next |
|
666 fix x' x y |
|
667 assume "(x',x) \<in> r" and |
|
668 "PROP ?P x y" -- "The induction hypothesis" |
|
669 thus "PROP ?P x' y" by(blast intro: rtc.step) |
|
670 qed |
|
671 qed |
|
672 |
|
673 text{*\noindent Rule induction is triggered by a fact $(x_1,\dots,x_n) \in R$ |
|
674 piped into the proof, here \isakeyword{from}~@{text A}. The proof |
|
675 itself follows the two rules of the inductive definition very closely. |
|
676 The only surprising thing should be the keyword @{text PROP}: because |
|
677 of certain syntactic subleties it is required in front of terms of |
|
678 type @{typ prop} (the type of meta-level propositions) which are not |
|
679 obviously of type @{typ prop} because they do not contain a tell-tale |
|
680 constant such as @{text"\<And>"} or @{text"\<Longrightarrow>"}. |
|
681 |
|
682 However, the proof is rather terse. Here is a more detailed version: |
|
683 *} |
|
684 |
|
685 lemma assumes A:"(x,y) \<in> r*" and B:"(y,z) \<in> r*" |
|
686 shows "(x,z) \<in> r*" |
566 proof - |
687 proof - |
567 from A B show ?thesis |
688 from A B show ?thesis |
568 proof (induct) |
689 proof induct |
569 fix x assume "(x,z) \<in> r*" thus "(x,z) \<in> r*" . |
690 fix x assume "(x,z) \<in> r*" -- "B[x := z]" |
|
691 thus "(x,z) \<in> r*" . |
570 next |
692 next |
571 fix x y |
693 fix x' x y |
572 qed |
694 assume step: "(x',x) \<in> r" and |
573 |
695 IH: "(y,z) \<in> r* \<Longrightarrow> (x,z) \<in> r*" and |
574 text{* |
696 B: "(y,z) \<in> r*" |
575 \begin{exercise} |
697 from step IH[OF B] show "(x',z) \<in> r*" by(rule rtc.step) |
576 Show that the converse of @{thm[source]rtc_step} also holds: |
698 qed |
577 @{prop[display]"[| (x,y) : r*; (y,z) : r |] ==> (x,z) : r*"} |
699 qed |
578 \end{exercise}*} |
700 |
579 |
701 text{*\noindent We start the proof with \isakeyword{from}~@{text"A |
580 |
702 B"}. Only @{text A} is ``consumed'' by the first proof step, here |
581 |
703 induction. Since @{text B} is left over we don't just prove @{text |
582 text{*As always, the different cases can be tackled in any order.*} |
704 ?thesis} but in fact @{text"B \<Longrightarrow> ?thesis"}, just as in the previous |
|
705 proof, only more elegantly. The base case is trivial. In the |
|
706 assumptions for the induction step we can see very clearly how things |
|
707 fit together and permit ourselves the obvious forward step |
|
708 @{text"IH[OF B]"}. *} |
583 |
709 |
584 (*<*)end(*>*) |
710 (*<*)end(*>*) |