diff -r f47de9e82b0f -r b266e7a86485 src/Doc/Isar-Ref/Synopsis.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Doc/Isar-Ref/Synopsis.thy Sat Apr 05 11:37:00 2014 +0200 @@ -0,0 +1,1108 @@ +theory Synopsis +imports Base Main +begin + +chapter {* Synopsis *} + +section {* Notepad *} + +text {* + An Isar proof body serves as mathematical notepad to compose logical + content, consisting of types, terms, facts. +*} + + +subsection {* Types and terms *} + +notepad +begin + txt {* Locally fixed entities: *} + fix x -- {* local constant, without any type information yet *} + fix x :: 'a -- {* variant with explicit type-constraint for subsequent use*} + + fix a b + assume "a = b" -- {* type assignment at first occurrence in concrete term *} + + txt {* Definitions (non-polymorphic): *} + def x \ "t::'a" + + txt {* Abbreviations (polymorphic): *} + let ?f = "\x. x" + term "?f ?f" + + txt {* Notation: *} + write x ("***") +end + + +subsection {* Facts *} + +text {* + A fact is a simultaneous list of theorems. +*} + + +subsubsection {* Producing facts *} + +notepad +begin + + txt {* Via assumption (``lambda''): *} + assume a: A + + txt {* Via proof (``let''): *} + have b: B sorry + + txt {* Via abbreviation (``let''): *} + note c = a b + +end + + +subsubsection {* Referencing facts *} + +notepad +begin + txt {* Via explicit name: *} + assume a: A + note a + + txt {* Via implicit name: *} + assume A + note this + + txt {* Via literal proposition (unification with results from the proof text): *} + assume A + note `A` + + assume "\x. B x" + note `B a` + note `B b` +end + + +subsubsection {* Manipulating facts *} + +notepad +begin + txt {* Instantiation: *} + assume a: "\x. B x" + note a + note a [of b] + note a [where x = b] + + txt {* Backchaining: *} + assume 1: A + assume 2: "A \ C" + note 2 [OF 1] + note 1 [THEN 2] + + txt {* Symmetric results: *} + assume "x = y" + note this [symmetric] + + assume "x \ y" + note this [symmetric] + + txt {* Adhoc-simplification (take care!): *} + assume "P ([] @ xs)" + note this [simplified] +end + + +subsubsection {* Projections *} + +text {* + Isar facts consist of multiple theorems. There is notation to project + interval ranges. +*} + +notepad +begin + assume stuff: A B C D + note stuff(1) + note stuff(2-3) + note stuff(2-) +end + + +subsubsection {* Naming conventions *} + +text {* + \begin{itemize} + + \item Lower-case identifiers are usually preferred. + + \item Facts can be named after the main term within the proposition. + + \item Facts should \emph{not} be named after the command that + introduced them (@{command "assume"}, @{command "have"}). This is + misleading and hard to maintain. + + \item Natural numbers can be used as ``meaningless'' names (more + appropriate than @{text "a1"}, @{text "a2"} etc.) + + \item Symbolic identifiers are supported (e.g. @{text "*"}, @{text + "**"}, @{text "***"}). + + \end{itemize} +*} + + +subsection {* Block structure *} + +text {* + The formal notepad is block structured. The fact produced by the last + entry of a block is exported into the outer context. +*} + +notepad +begin + { + have a: A sorry + have b: B sorry + note a b + } + note this + note `A` + note `B` +end + +text {* Explicit blocks as well as implicit blocks of nested goal + statements (e.g.\ @{command have}) automatically introduce one extra + pair of parentheses in reserve. The @{command next} command allows + to ``jump'' between these sub-blocks. *} + +notepad +begin + + { + have a: A sorry + next + have b: B + proof - + show B sorry + next + have c: C sorry + next + have d: D sorry + qed + } + + txt {* Alternative version with explicit parentheses everywhere: *} + + { + { + have a: A sorry + } + { + have b: B + proof - + { + show B sorry + } + { + have c: C sorry + } + { + have d: D sorry + } + qed + } + } + +end + + +section {* Calculational reasoning \label{sec:calculations-synopsis} *} + +text {* + For example, see @{file "~~/src/HOL/Isar_Examples/Group.thy"}. +*} + + +subsection {* Special names in Isar proofs *} + +text {* + \begin{itemize} + + \item term @{text "?thesis"} --- the main conclusion of the + innermost pending claim + + \item term @{text "\"} --- the argument of the last explicitly + stated result (for infix application this is the right-hand side) + + \item fact @{text "this"} --- the last result produced in the text + + \end{itemize} +*} + +notepad +begin + have "x = y" + proof - + term ?thesis + show ?thesis sorry + term ?thesis -- {* static! *} + qed + term "\" + thm this +end + +text {* Calculational reasoning maintains the special fact called + ``@{text calculation}'' in the background. Certain language + elements combine primary @{text this} with secondary @{text + calculation}. *} + + +subsection {* Transitive chains *} + +text {* The Idea is to combine @{text this} and @{text calculation} + via typical @{text trans} rules (see also @{command + print_trans_rules}): *} + +thm trans +thm less_trans +thm less_le_trans + +notepad +begin + txt {* Plain bottom-up calculation: *} + have "a = b" sorry + also + have "b = c" sorry + also + have "c = d" sorry + finally + have "a = d" . + + txt {* Variant using the @{text "\"} abbreviation: *} + have "a = b" sorry + also + have "\ = c" sorry + also + have "\ = d" sorry + finally + have "a = d" . + + txt {* Top-down version with explicit claim at the head: *} + have "a = d" + proof - + have "a = b" sorry + also + have "\ = c" sorry + also + have "\ = d" sorry + finally + show ?thesis . + qed +next + txt {* Mixed inequalities (require suitable base type): *} + fix a b c d :: nat + + have "a < b" sorry + also + have "b \ c" sorry + also + have "c = d" sorry + finally + have "a < d" . +end + + +subsubsection {* Notes *} + +text {* + \begin{itemize} + + \item The notion of @{text trans} rule is very general due to the + flexibility of Isabelle/Pure rule composition. + + \item User applications may declare their own rules, with some care + about the operational details of higher-order unification. + + \end{itemize} +*} + + +subsection {* Degenerate calculations and bigstep reasoning *} + +text {* The Idea is to append @{text this} to @{text calculation}, + without rule composition. *} + +notepad +begin + txt {* A vacuous proof: *} + have A sorry + moreover + have B sorry + moreover + have C sorry + ultimately + have A and B and C . +next + txt {* Slightly more content (trivial bigstep reasoning): *} + have A sorry + moreover + have B sorry + moreover + have C sorry + ultimately + have "A \ B \ C" by blast +next + txt {* More ambitious bigstep reasoning involving structured results: *} + have "A \ B \ C" sorry + moreover + { assume A have R sorry } + moreover + { assume B have R sorry } + moreover + { assume C have R sorry } + ultimately + have R by blast -- {* ``big-bang integration'' of proof blocks (occasionally fragile) *} +end + + +section {* Induction *} + +subsection {* Induction as Natural Deduction *} + +text {* In principle, induction is just a special case of Natural + Deduction (see also \secref{sec:natural-deduction-synopsis}). For + example: *} + +thm nat.induct +print_statement nat.induct + +notepad +begin + fix n :: nat + have "P n" + proof (rule nat.induct) -- {* fragile rule application! *} + show "P 0" sorry + next + fix n :: nat + assume "P n" + show "P (Suc n)" sorry + qed +end + +text {* + In practice, much more proof infrastructure is required. + + The proof method @{method induct} provides: + \begin{itemize} + + \item implicit rule selection and robust instantiation + + \item context elements via symbolic case names + + \item support for rule-structured induction statements, with local + parameters, premises, etc. + + \end{itemize} +*} + +notepad +begin + fix n :: nat + have "P n" + proof (induct n) + case 0 + show ?case sorry + next + case (Suc n) + from Suc.hyps show ?case sorry + qed +end + + +subsubsection {* Example *} + +text {* + The subsequent example combines the following proof patterns: + \begin{itemize} + + \item outermost induction (over the datatype structure of natural + numbers), to decompose the proof problem in top-down manner + + \item calculational reasoning (\secref{sec:calculations-synopsis}) + to compose the result in each case + + \item solving local claims within the calculation by simplification + + \end{itemize} +*} + +lemma + fixes n :: nat + shows "(\i=0..n. i) = n * (n + 1) div 2" +proof (induct n) + case 0 + have "(\i=0..0. i) = (0::nat)" by simp + also have "\ = 0 * (0 + 1) div 2" by simp + finally show ?case . +next + case (Suc n) + have "(\i=0..Suc n. i) = (\i=0..n. i) + (n + 1)" by simp + also have "\ = n * (n + 1) div 2 + (n + 1)" by (simp add: Suc.hyps) + also have "\ = (n * (n + 1) + 2 * (n + 1)) div 2" by simp + also have "\ = (Suc n * (Suc n + 1)) div 2" by simp + finally show ?case . +qed + +text {* This demonstrates how induction proofs can be done without + having to consider the raw Natural Deduction structure. *} + + +subsection {* Induction with local parameters and premises *} + +text {* Idea: Pure rule statements are passed through the induction + rule. This achieves convenient proof patterns, thanks to some + internal trickery in the @{method induct} method. + + Important: Using compact HOL formulae with @{text "\/\"} is a + well-known anti-pattern! It would produce useless formal noise. +*} + +notepad +begin + fix n :: nat + fix P :: "nat \ bool" + fix Q :: "'a \ nat \ bool" + + have "P n" + proof (induct n) + case 0 + show "P 0" sorry + next + case (Suc n) + from `P n` show "P (Suc n)" sorry + qed + + have "A n \ P n" + proof (induct n) + case 0 + from `A 0` show "P 0" sorry + next + case (Suc n) + from `A n \ P n` + and `A (Suc n)` show "P (Suc n)" sorry + qed + + have "\x. Q x n" + proof (induct n) + case 0 + show "Q x 0" sorry + next + case (Suc n) + from `\x. Q x n` show "Q x (Suc n)" sorry + txt {* Local quantification admits arbitrary instances: *} + note `Q a n` and `Q b n` + qed +end + + +subsection {* Implicit induction context *} + +text {* The @{method induct} method can isolate local parameters and + premises directly from the given statement. This is convenient in + practical applications, but requires some understanding of what is + going on internally (as explained above). *} + +notepad +begin + fix n :: nat + fix Q :: "'a \ nat \ bool" + + fix x :: 'a + assume "A x n" + then have "Q x n" + proof (induct n arbitrary: x) + case 0 + from `A x 0` show "Q x 0" sorry + next + case (Suc n) + from `\x. A x n \ Q x n` -- {* arbitrary instances can be produced here *} + and `A x (Suc n)` show "Q x (Suc n)" sorry + qed +end + + +subsection {* Advanced induction with term definitions *} + +text {* Induction over subexpressions of a certain shape are delicate + to formalize. The Isar @{method induct} method provides + infrastructure for this. + + Idea: sub-expressions of the problem are turned into a defined + induction variable; often accompanied with fixing of auxiliary + parameters in the original expression. *} + +notepad +begin + fix a :: "'a \ nat" + fix A :: "nat \ bool" + + assume "A (a x)" + then have "P (a x)" + proof (induct "a x" arbitrary: x) + case 0 + note prem = `A (a x)` + and defn = `0 = a x` + show "P (a x)" sorry + next + case (Suc n) + note hyp = `\x. n = a x \ A (a x) \ P (a x)` + and prem = `A (a x)` + and defn = `Suc n = a x` + show "P (a x)" sorry + qed +end + + +section {* Natural Deduction \label{sec:natural-deduction-synopsis} *} + +subsection {* Rule statements *} + +text {* + Isabelle/Pure ``theorems'' are always natural deduction rules, + which sometimes happen to consist of a conclusion only. + + The framework connectives @{text "\"} and @{text "\"} indicate the + rule structure declaratively. For example: *} + +thm conjI +thm impI +thm nat.induct + +text {* + The object-logic is embedded into the Pure framework via an implicit + derivability judgment @{term "Trueprop :: bool \ prop"}. + + Thus any HOL formulae appears atomic to the Pure framework, while + the rule structure outlines the corresponding proof pattern. + + This can be made explicit as follows: +*} + +notepad +begin + write Trueprop ("Tr") + + thm conjI + thm impI + thm nat.induct +end + +text {* + Isar provides first-class notation for rule statements as follows. +*} + +print_statement conjI +print_statement impI +print_statement nat.induct + + +subsubsection {* Examples *} + +text {* + Introductions and eliminations of some standard connectives of + the object-logic can be written as rule statements as follows. (The + proof ``@{command "by"}~@{method blast}'' serves as sanity check.) +*} + +lemma "(P \ False) \ \ P" by blast +lemma "\ P \ P \ Q" by blast + +lemma "P \ Q \ P \ Q" by blast +lemma "P \ Q \ (P \ Q \ R) \ R" by blast + +lemma "P \ P \ Q" by blast +lemma "Q \ P \ Q" by blast +lemma "P \ Q \ (P \ R) \ (Q \ R) \ R" by blast + +lemma "(\x. P x) \ (\x. P x)" by blast +lemma "(\x. P x) \ P x" by blast + +lemma "P x \ (\x. P x)" by blast +lemma "(\x. P x) \ (\x. P x \ R) \ R" by blast + +lemma "x \ A \ x \ B \ x \ A \ B" by blast +lemma "x \ A \ B \ (x \ A \ x \ B \ R) \ R" by blast + +lemma "x \ A \ x \ A \ B" by blast +lemma "x \ B \ x \ A \ B" by blast +lemma "x \ A \ B \ (x \ A \ R) \ (x \ B \ R) \ R" by blast + + +subsection {* Isar context elements *} + +text {* We derive some results out of the blue, using Isar context + elements and some explicit blocks. This illustrates their meaning + wrt.\ Pure connectives, without goal states getting in the way. *} + +notepad +begin + { + fix x + have "B x" sorry + } + have "\x. B x" by fact + +next + + { + assume A + have B sorry + } + have "A \ B" by fact + +next + + { + def x \ t + have "B x" sorry + } + have "B t" by fact + +next + + { + obtain x :: 'a where "B x" sorry + have C sorry + } + have C by fact + +end + + +subsection {* Pure rule composition *} + +text {* + The Pure framework provides means for: + + \begin{itemize} + + \item backward-chaining of rules by @{inference resolution} + + \item closing of branches by @{inference assumption} + + \end{itemize} + + Both principles involve higher-order unification of @{text \}-terms + modulo @{text "\\\"}-equivalence (cf.\ Huet and Miller). *} + +notepad +begin + assume a: A and b: B + thm conjI + thm conjI [of A B] -- "instantiation" + thm conjI [of A B, OF a b] -- "instantiation and composition" + thm conjI [OF a b] -- "composition via unification (trivial)" + thm conjI [OF `A` `B`] + + thm conjI [OF disjI1] +end + +text {* Note: Low-level rule composition is tedious and leads to + unreadable~/ unmaintainable expressions in the text. *} + + +subsection {* Structured backward reasoning *} + +text {* Idea: Canonical proof decomposition via @{command fix}~/ + @{command assume}~/ @{command show}, where the body produces a + natural deduction rule to refine some goal. *} + +notepad +begin + fix A B :: "'a \ bool" + + have "\x. A x \ B x" + proof - + fix x + assume "A x" + show "B x" sorry + qed + + have "\x. A x \ B x" + proof - + { + fix x + assume "A x" + show "B x" sorry + } -- "implicit block structure made explicit" + note `\x. A x \ B x` + -- "side exit for the resulting rule" + qed +end + + +subsection {* Structured rule application *} + +text {* + Idea: Previous facts and new claims are composed with a rule from + the context (or background library). +*} + +notepad +begin + assume r1: "A \ B \ C" -- {* simple rule (Horn clause) *} + + have A sorry -- "prefix of facts via outer sub-proof" + then have C + proof (rule r1) + show B sorry -- "remaining rule premises via inner sub-proof" + qed + + have C + proof (rule r1) + show A sorry + show B sorry + qed + + have A and B sorry + then have C + proof (rule r1) + qed + + have A and B sorry + then have C by (rule r1) + +next + + assume r2: "A \ (\x. B1 x \ B2 x) \ C" -- {* nested rule *} + + have A sorry + then have C + proof (rule r2) + fix x + assume "B1 x" + show "B2 x" sorry + qed + + txt {* The compound rule premise @{prop "\x. B1 x \ B2 x"} is better + addressed via @{command fix}~/ @{command assume}~/ @{command show} + in the nested proof body. *} +end + + +subsection {* Example: predicate logic *} + +text {* + Using the above principles, standard introduction and elimination proofs + of predicate logic connectives of HOL work as follows. +*} + +notepad +begin + have "A \ B" and A sorry + then have B .. + + have A sorry + then have "A \ B" .. + + have B sorry + then have "A \ B" .. + + have "A \ B" sorry + then have C + proof + assume A + then show C sorry + next + assume B + then show C sorry + qed + + have A and B sorry + then have "A \ B" .. + + have "A \ B" sorry + then have A .. + + have "A \ B" sorry + then have B .. + + have False sorry + then have A .. + + have True .. + + have "\ A" + proof + assume A + then show False sorry + qed + + have "\ A" and A sorry + then have B .. + + have "\x. P x" + proof + fix x + show "P x" sorry + qed + + have "\x. P x" sorry + then have "P a" .. + + have "\x. P x" + proof + show "P a" sorry + qed + + have "\x. P x" sorry + then have C + proof + fix a + assume "P a" + show C sorry + qed + + txt {* Less awkward version using @{command obtain}: *} + have "\x. P x" sorry + then obtain a where "P a" .. +end + +text {* Further variations to illustrate Isar sub-proofs involving + @{command show}: *} + +notepad +begin + have "A \ B" + proof -- {* two strictly isolated subproofs *} + show A sorry + next + show B sorry + qed + + have "A \ B" + proof -- {* one simultaneous sub-proof *} + show A and B sorry + qed + + have "A \ B" + proof -- {* two subproofs in the same context *} + show A sorry + show B sorry + qed + + have "A \ B" + proof -- {* swapped order *} + show B sorry + show A sorry + qed + + have "A \ B" + proof -- {* sequential subproofs *} + show A sorry + show B using `A` sorry + qed +end + + +subsubsection {* Example: set-theoretic operators *} + +text {* There is nothing special about logical connectives (@{text + "\"}, @{text "\"}, @{text "\"}, @{text "\"} etc.). Operators from + set-theory or lattice-theory work analogously. It is only a matter + of rule declarations in the library; rules can be also specified + explicitly. +*} + +notepad +begin + have "x \ A" and "x \ B" sorry + then have "x \ A \ B" .. + + have "x \ A" sorry + then have "x \ A \ B" .. + + have "x \ B" sorry + then have "x \ A \ B" .. + + have "x \ A \ B" sorry + then have C + proof + assume "x \ A" + then show C sorry + next + assume "x \ B" + then show C sorry + qed + +next + have "x \ \A" + proof + fix a + assume "a \ A" + show "x \ a" sorry + qed + + have "x \ \A" sorry + then have "x \ a" + proof + show "a \ A" sorry + qed + + have "a \ A" and "x \ a" sorry + then have "x \ \A" .. + + have "x \ \A" sorry + then obtain a where "a \ A" and "x \ a" .. +end + + +section {* Generalized elimination and cases *} + +subsection {* General elimination rules *} + +text {* + The general format of elimination rules is illustrated by the + following typical representatives: +*} + +thm exE -- {* local parameter *} +thm conjE -- {* local premises *} +thm disjE -- {* split into cases *} + +text {* + Combining these characteristics leads to the following general scheme + for elimination rules with cases: + + \begin{itemize} + + \item prefix of assumptions (or ``major premises'') + + \item one or more cases that enable to establish the main conclusion + in an augmented context + + \end{itemize} +*} + +notepad +begin + assume r: + "A1 \ A2 \ (* assumptions *) + (\x y. B1 x y \ C1 x y \ R) \ (* case 1 *) + (\x y. B2 x y \ C2 x y \ R) \ (* case 2 *) + R (* main conclusion *)" + + have A1 and A2 sorry + then have R + proof (rule r) + fix x y + assume "B1 x y" and "C1 x y" + show ?thesis sorry + next + fix x y + assume "B2 x y" and "C2 x y" + show ?thesis sorry + qed +end + +text {* Here @{text "?thesis"} is used to refer to the unchanged goal + statement. *} + + +subsection {* Rules with cases *} + +text {* + Applying an elimination rule to some goal, leaves that unchanged + but allows to augment the context in the sub-proof of each case. + + Isar provides some infrastructure to support this: + + \begin{itemize} + + \item native language elements to state eliminations + + \item symbolic case names + + \item method @{method cases} to recover this structure in a + sub-proof + + \end{itemize} +*} + +print_statement exE +print_statement conjE +print_statement disjE + +lemma + assumes A1 and A2 -- {* assumptions *} + obtains + (case1) x y where "B1 x y" and "C1 x y" + | (case2) x y where "B2 x y" and "C2 x y" + sorry + + +subsubsection {* Example *} + +lemma tertium_non_datur: + obtains + (T) A + | (F) "\ A" + by blast + +notepad +begin + fix x y :: 'a + have C + proof (cases "x = y" rule: tertium_non_datur) + case T + from `x = y` show ?thesis sorry + next + case F + from `x \ y` show ?thesis sorry + qed +end + + +subsubsection {* Example *} + +text {* + Isabelle/HOL specification mechanisms (datatype, inductive, etc.) + provide suitable derived cases rules. +*} + +datatype foo = Foo | Bar foo + +notepad +begin + fix x :: foo + have C + proof (cases x) + case Foo + from `x = Foo` show ?thesis sorry + next + case (Bar a) + from `x = Bar a` show ?thesis sorry + qed +end + + +subsection {* Obtaining local contexts *} + +text {* A single ``case'' branch may be inlined into Isar proof text + via @{command obtain}. This proves @{prop "(\x. B x \ thesis) \ + thesis"} on the spot, and augments the context afterwards. *} + +notepad +begin + fix B :: "'a \ bool" + + obtain x where "B x" sorry + note `B x` + + txt {* Conclusions from this context may not mention @{term x} again! *} + { + obtain x where "B x" sorry + from `B x` have C sorry + } + note `C` +end + +end