# HG changeset patch # User wenzelm # Date 1256060229 -7200 # Node ID 8f35633c492244048ecd6367e5e15abd0cd768cc # Parent cc038dc8f412d2de3797454812e3bde5b491caf6 modernized session Isar_Examples; diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/IsaMakefile --- a/src/HOL/IsaMakefile Tue Oct 20 19:36:52 2009 +0200 +++ b/src/HOL/IsaMakefile Tue Oct 20 19:37:09 2009 +0200 @@ -25,7 +25,7 @@ HOL-IOA \ HOL-Imperative_HOL \ HOL-Induct \ - HOL-Isar_examples \ + HOL-Isar_Examples \ HOL-Lambda \ HOL-Lattice \ HOL-Matrix \ @@ -914,22 +914,22 @@ @$(ISABELLE_TOOL) usedir $(OUT)/HOL ex -## HOL-Isar_examples +## HOL-Isar_Examples -HOL-Isar_examples: HOL $(LOG)/HOL-Isar_examples.gz +HOL-Isar_Examples: HOL $(LOG)/HOL-Isar_Examples.gz -$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/Basic_Logic.thy \ - Isar_examples/Cantor.thy Isar_examples/Drinker.thy \ - Isar_examples/Expr_Compiler.thy Isar_examples/Fibonacci.thy \ - Isar_examples/Group.thy Isar_examples/Hoare.thy \ - Isar_examples/Hoare_Ex.thy Isar_examples/Knaster_Tarski.thy \ - Isar_examples/Mutilated_Checkerboard.thy \ - Isar_examples/Nested_Datatype.thy Isar_examples/Peirce.thy \ - Isar_examples/Puzzle.thy Isar_examples/Summation.thy \ - Isar_examples/ROOT.ML Isar_examples/document/proof.sty \ - Isar_examples/document/root.bib Isar_examples/document/root.tex \ - Isar_examples/document/style.tex Hoare/hoare_tac.ML - @$(ISABELLE_TOOL) usedir $(OUT)/HOL Isar_examples +$(LOG)/HOL-Isar_Examples.gz: $(OUT)/HOL Isar_Examples/Basic_Logic.thy \ + Isar_Examples/Cantor.thy Isar_Examples/Drinker.thy \ + Isar_Examples/Expr_Compiler.thy Isar_Examples/Fibonacci.thy \ + Isar_Examples/Group.thy Isar_Examples/Hoare.thy \ + Isar_Examples/Hoare_Ex.thy Isar_Examples/Knaster_Tarski.thy \ + Isar_Examples/Mutilated_Checkerboard.thy \ + Isar_Examples/Nested_Datatype.thy Isar_Examples/Peirce.thy \ + Isar_Examples/Puzzle.thy Isar_Examples/Summation.thy \ + Isar_Examples/ROOT.ML Isar_Examples/document/proof.sty \ + Isar_Examples/document/root.bib Isar_Examples/document/root.tex \ + Isar_Examples/document/style.tex Hoare/hoare_tac.ML + @$(ISABELLE_TOOL) usedir $(OUT)/HOL Isar_Examples ## HOL-SET-Protocol @@ -1304,7 +1304,7 @@ clean: @rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL \ $(OUT)/HOL-Nominal $(OUT)/TLA $(LOG)/HOL.gz \ - $(LOG)/TLA.gz $(LOG)/HOL-Isar_examples.gz \ + $(LOG)/TLA.gz $(LOG)/HOL-Isar_Examples.gz \ $(LOG)/HOL-Induct.gz $(LOG)/HOL-ex.gz \ $(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz \ $(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz \ diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Basic_Logic.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Basic_Logic.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,448 @@ +(* Title: HOL/Isar_Examples/Basic_Logic.thy + Author: Markus Wenzel, TU Muenchen + +Basic propositional and quantifier reasoning. +*) + +header {* Basic logical reasoning *} + +theory Basic_Logic +imports Main +begin + + +subsection {* Pure backward reasoning *} + +text {* + In order to get a first idea of how Isabelle/Isar proof documents + may look like, we consider the propositions @{text I}, @{text K}, + and @{text S}. The following (rather explicit) proofs should + require little extra explanations. +*} + +lemma I: "A --> A" +proof + assume A + show A by fact +qed + +lemma K: "A --> B --> A" +proof + assume A + show "B --> A" + proof + show A by fact + qed +qed + +lemma S: "(A --> B --> C) --> (A --> B) --> A --> C" +proof + assume "A --> B --> C" + show "(A --> B) --> A --> C" + proof + assume "A --> B" + show "A --> C" + proof + assume A + show C + proof (rule mp) + show "B --> C" by (rule mp) fact+ + show B by (rule mp) fact+ + qed + qed + qed +qed + +text {* + Isar provides several ways to fine-tune the reasoning, avoiding + excessive detail. Several abbreviated language elements are + available, enabling the writer to express proofs in a more concise + way, even without referring to any automated proof tools yet. + + First of all, proof by assumption may be abbreviated as a single + dot. +*} + +lemma "A --> A" +proof + assume A + show A by fact+ +qed + +text {* + In fact, concluding any (sub-)proof already involves solving any + remaining goals by assumption\footnote{This is not a completely + trivial operation, as proof by assumption may involve full + higher-order unification.}. Thus we may skip the rather vacuous + body of the above proof as well. +*} + +lemma "A --> A" +proof +qed + +text {* + Note that the \isacommand{proof} command refers to the @{text rule} + method (without arguments) by default. Thus it implicitly applies a + single rule, as determined from the syntactic form of the statements + involved. The \isacommand{by} command abbreviates any proof with + empty body, so the proof may be further pruned. +*} + +lemma "A --> A" + by rule + +text {* + Proof by a single rule may be abbreviated as double-dot. +*} + +lemma "A --> A" .. + +text {* + Thus we have arrived at an adequate representation of the proof of a + tautology that holds by a single standard rule.\footnote{Apparently, + the rule here is implication introduction.} +*} + +text {* + Let us also reconsider @{text K}. Its statement is composed of + iterated connectives. Basic decomposition is by a single rule at a + time, which is why our first version above was by nesting two + proofs. + + The @{text intro} proof method repeatedly decomposes a goal's + conclusion.\footnote{The dual method is @{text elim}, acting on a + goal's premises.} +*} + +lemma "A --> B --> A" +proof (intro impI) + assume A + show A by fact +qed + +text {* + Again, the body may be collapsed. +*} + +lemma "A --> B --> A" + by (intro impI) + +text {* + Just like @{text rule}, the @{text intro} and @{text elim} proof + methods pick standard structural rules, in case no explicit + arguments are given. While implicit rules are usually just fine for + single rule application, this may go too far with iteration. Thus + in practice, @{text intro} and @{text elim} would be typically + restricted to certain structures by giving a few rules only, e.g.\ + \isacommand{proof}~@{text "(intro impI allI)"} to strip implications + and universal quantifiers. + + Such well-tuned iterated decomposition of certain structures is the + prime application of @{text intro} and @{text elim}. In contrast, + terminal steps that solve a goal completely are usually performed by + actual automated proof methods (such as \isacommand{by}~@{text + blast}. +*} + + +subsection {* Variations of backward vs.\ forward reasoning *} + +text {* + Certainly, any proof may be performed in backward-style only. On + the other hand, small steps of reasoning are often more naturally + expressed in forward-style. Isar supports both backward and forward + reasoning as a first-class concept. In order to demonstrate the + difference, we consider several proofs of @{text "A \ B \ B \ A"}. + + The first version is purely backward. +*} + +lemma "A & B --> B & A" +proof + assume "A & B" + show "B & A" + proof + show B by (rule conjunct2) fact + show A by (rule conjunct1) fact + qed +qed + +text {* + Above, the @{text "conjunct_1/2"} projection rules had to be named + explicitly, since the goals @{text B} and @{text A} did not provide + any structural clue. This may be avoided using \isacommand{from} to + focus on the @{text "A \ B"} assumption as the current facts, + enabling the use of double-dot proofs. Note that \isacommand{from} + already does forward-chaining, involving the \name{conjE} rule here. +*} + +lemma "A & B --> B & A" +proof + assume "A & B" + show "B & A" + proof + from `A & B` show B .. + from `A & B` show A .. + qed +qed + +text {* + In the next version, we move the forward step one level upwards. + Forward-chaining from the most recent facts is indicated by the + \isacommand{then} command. Thus the proof of @{text "B \ A"} from + @{text "A \ B"} actually becomes an elimination, rather than an + introduction. The resulting proof structure directly corresponds to + that of the @{text conjE} rule, including the repeated goal + proposition that is abbreviated as @{text ?thesis} below. +*} + +lemma "A & B --> B & A" +proof + assume "A & B" + then show "B & A" + proof -- {* rule @{text conjE} of @{text "A \ B"} *} + assume B A + then show ?thesis .. -- {* rule @{text conjI} of @{text "B \ A"} *} + qed +qed + +text {* + In the subsequent version we flatten the structure of the main body + by doing forward reasoning all the time. Only the outermost + decomposition step is left as backward. +*} + +lemma "A & B --> B & A" +proof + assume "A & B" + from `A & B` have A .. + from `A & B` have B .. + from `B` `A` show "B & A" .. +qed + +text {* + We can still push forward-reasoning a bit further, even at the risk + of getting ridiculous. Note that we force the initial proof step to + do nothing here, by referring to the ``-'' proof method. +*} + +lemma "A & B --> B & A" +proof - + { + assume "A & B" + from `A & B` have A .. + from `A & B` have B .. + from `B` `A` have "B & A" .. + } + then show ?thesis .. -- {* rule \name{impI} *} +qed + +text {* + \medskip With these examples we have shifted through a whole range + from purely backward to purely forward reasoning. Apparently, in + the extreme ends we get slightly ill-structured proofs, which also + require much explicit naming of either rules (backward) or local + facts (forward). + + The general lesson learned here is that good proof style would + achieve just the \emph{right} balance of top-down backward + decomposition, and bottom-up forward composition. In general, there + is no single best way to arrange some pieces of formal reasoning, of + course. Depending on the actual applications, the intended audience + etc., rules (and methods) on the one hand vs.\ facts on the other + hand have to be emphasized in an appropriate way. This requires the + proof writer to develop good taste, and some practice, of course. +*} + +text {* + For our example the most appropriate way of reasoning is probably + the middle one, with conjunction introduction done after + elimination. +*} + +lemma "A & B --> B & A" +proof + assume "A & B" + then show "B & A" + proof + assume B A + then show ?thesis .. + qed +qed + + + +subsection {* A few examples from ``Introduction to Isabelle'' *} + +text {* + We rephrase some of the basic reasoning examples of + \cite{isabelle-intro}, using HOL rather than FOL. +*} + +subsubsection {* A propositional proof *} + +text {* + We consider the proposition @{text "P \ P \ P"}. The proof below + involves forward-chaining from @{text "P \ P"}, followed by an + explicit case-analysis on the two \emph{identical} cases. +*} + +lemma "P | P --> P" +proof + assume "P | P" + then show P + proof -- {* + rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$} + *} + assume P show P by fact + next + assume P show P by fact + qed +qed + +text {* + Case splits are \emph{not} hardwired into the Isar language as a + special feature. The \isacommand{next} command used to separate the + cases above is just a short form of managing block structure. + + \medskip In general, applying proof methods may split up a goal into + separate ``cases'', i.e.\ new subgoals with individual local + assumptions. The corresponding proof text typically mimics this by + establishing results in appropriate contexts, separated by blocks. + + In order to avoid too much explicit parentheses, the Isar system + implicitly opens an additional block for any new goal, the + \isacommand{next} statement then closes one block level, opening a + new one. The resulting behavior is what one would expect from + separating cases, only that it is more flexible. E.g.\ an induction + base case (which does not introduce local assumptions) would + \emph{not} require \isacommand{next} to separate the subsequent step + case. + + \medskip In our example the situation is even simpler, since the two + cases actually coincide. Consequently the proof may be rephrased as + follows. +*} + +lemma "P | P --> P" +proof + assume "P | P" + then show P + proof + assume P + show P by fact + show P by fact + qed +qed + +text {* + Again, the rather vacuous body of the proof may be collapsed. Thus + the case analysis degenerates into two assumption steps, which are + implicitly performed when concluding the single rule step of the + double-dot proof as follows. +*} + +lemma "P | P --> P" +proof + assume "P | P" + then show P .. +qed + + +subsubsection {* A quantifier proof *} + +text {* + To illustrate quantifier reasoning, let us prove @{text "(\x. P (f + x)) \ (\y. P y)"}. Informally, this holds because any @{text a} + with @{text "P (f a)"} may be taken as a witness for the second + existential statement. + + The first proof is rather verbose, exhibiting quite a lot of + (redundant) detail. It gives explicit rules, even with some + instantiation. Furthermore, we encounter two new language elements: + the \isacommand{fix} command augments the context by some new + ``arbitrary, but fixed'' element; the \isacommand{is} annotation + binds term abbreviations by higher-order pattern matching. +*} + +lemma "(EX x. P (f x)) --> (EX y. P y)" +proof + assume "EX x. P (f x)" + then show "EX y. P y" + proof (rule exE) -- {* + rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$} + *} + fix a + assume "P (f a)" (is "P ?witness") + then show ?thesis by (rule exI [of P ?witness]) + qed +qed + +text {* + While explicit rule instantiation may occasionally improve + readability of certain aspects of reasoning, it is usually quite + redundant. Above, the basic proof outline gives already enough + structural clues for the system to infer both the rules and their + instances (by higher-order unification). Thus we may as well prune + the text as follows. +*} + +lemma "(EX x. P (f x)) --> (EX y. P y)" +proof + assume "EX x. P (f x)" + then show "EX y. P y" + proof + fix a + assume "P (f a)" + then show ?thesis .. + qed +qed + +text {* + Explicit @{text \}-elimination as seen above can become quite + cumbersome in practice. The derived Isar language element + ``\isakeyword{obtain}'' provides a more handsome way to do + generalized existence reasoning. +*} + +lemma "(EX x. P (f x)) --> (EX y. P y)" +proof + assume "EX x. P (f x)" + then obtain a where "P (f a)" .. + then show "EX y. P y" .. +qed + +text {* + Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and + \isakeyword{assume} together with a soundness proof of the + elimination involved. Thus it behaves similar to any other forward + proof element. Also note that due to the nature of general + existence reasoning involved here, any result exported from the + context of an \isakeyword{obtain} statement may \emph{not} refer to + the parameters introduced there. +*} + + + +subsubsection {* Deriving rules in Isabelle *} + +text {* + We derive the conjunction elimination rule from the corresponding + projections. The proof is quite straight-forward, since + Isabelle/Isar supports non-atomic goals and assumptions fully + transparently. +*} + +theorem conjE: "A & B ==> (A ==> B ==> C) ==> C" +proof - + assume "A & B" + assume r: "A ==> B ==> C" + show C + proof (rule r) + show A by (rule conjunct1) fact + show B by (rule conjunct2) fact + qed +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Cantor.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Cantor.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,71 @@ +(* Title: HOL/Isar_Examples/Cantor.thy + Author: Markus Wenzel, TU Muenchen +*) + +header {* Cantor's Theorem *} + +theory Cantor +imports Main +begin + +text_raw {* + \footnote{This is an Isar version of the final example of the + Isabelle/HOL manual \cite{isabelle-HOL}.} +*} + +text {* + Cantor's Theorem states that every set has more subsets than it has + elements. It has become a favorite basic example in pure + higher-order logic since it is so easily expressed: \[\all{f::\alpha + \To \alpha \To \idt{bool}} \ex{S::\alpha \To \idt{bool}} + \all{x::\alpha} f \ap x \not= S\] + + Viewing types as sets, $\alpha \To \idt{bool}$ represents the + powerset of $\alpha$. This version of the theorem states that for + every function from $\alpha$ to its powerset, some subset is outside + its range. The Isabelle/Isar proofs below uses HOL's set theory, + with the type $\alpha \ap \idt{set}$ and the operator + $\idt{range}::(\alpha \To \beta) \To \beta \ap \idt{set}$. +*} + +theorem "EX S. S ~: range (f :: 'a => 'a set)" +proof + let ?S = "{x. x ~: f x}" + show "?S ~: range f" + proof + assume "?S : range f" + then obtain y where "?S = f y" .. + then show False + proof (rule equalityCE) + assume "y : f y" + assume "y : ?S" then have "y ~: f y" .. + with `y : f y` show ?thesis by contradiction + next + assume "y ~: ?S" + assume "y ~: f y" then have "y : ?S" .. + with `y ~: ?S` show ?thesis by contradiction + qed + qed +qed + +text {* + How much creativity is required? As it happens, Isabelle can prove + this theorem automatically using best-first search. Depth-first + search would diverge, but best-first search successfully navigates + through the large search space. The context of Isabelle's classical + prover contains rules for the relevant constructs of HOL's set + theory. +*} + +theorem "EX S. S ~: range (f :: 'a => 'a set)" + by best + +text {* + While this establishes the same theorem internally, we do not get + any idea of how the proof actually works. There is currently no way + to transform internal system-level representations of Isabelle + proofs back into Isar text. Writing intelligible proof documents + really is a creative process, after all. +*} + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Drinker.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Drinker.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,54 @@ +(* Title: HOL/Isar_Examples/Drinker.thy + Author: Makarius +*) + +header {* The Drinker's Principle *} + +theory Drinker +imports Main +begin + +text {* + Here is another example of classical reasoning: the Drinker's + Principle says that for some person, if he is drunk, everybody else + is drunk! + + We first prove a classical part of de-Morgan's law. +*} + +lemma deMorgan: + assumes "\ (\x. P x)" + shows "\x. \ P x" + using prems +proof (rule contrapos_np) + assume a: "\ (\x. \ P x)" + show "\x. P x" + proof + fix x + show "P x" + proof (rule classical) + assume "\ P x" + then have "\x. \ P x" .. + with a show ?thesis by contradiction + qed + qed +qed + +theorem Drinker's_Principle: "\x. drunk x \ (\x. drunk x)" +proof cases + fix a assume "\x. drunk x" + then have "drunk a \ (\x. drunk x)" .. + then show ?thesis .. +next + assume "\ (\x. drunk x)" + then have "\x. \ drunk x" by (rule deMorgan) + then obtain a where a: "\ drunk a" .. + have "drunk a \ (\x. drunk x)" + proof + assume "drunk a" + with a show "\x. drunk x" by (contradiction) + qed + then show ?thesis .. +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Expr_Compiler.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Expr_Compiler.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,231 @@ +(* Title: HOL/Isar_Examples/Expr_Compiler.thy + Author: Markus Wenzel, TU Muenchen + +Correctness of a simple expression/stack-machine compiler. +*) + +header {* Correctness of a simple expression compiler *} + +theory Expr_Compiler +imports Main +begin + +text {* + This is a (rather trivial) example of program verification. We model + a compiler for translating expressions to stack machine instructions, + and prove its correctness wrt.\ some evaluation semantics. +*} + + +subsection {* Binary operations *} + +text {* + Binary operations are just functions over some type of values. This + is both for abstract syntax and semantics, i.e.\ we use a ``shallow + embedding'' here. +*} + +types + 'val binop = "'val => 'val => 'val" + + +subsection {* Expressions *} + +text {* + The language of expressions is defined as an inductive type, + consisting of variables, constants, and binary operations on + expressions. +*} + +datatype ('adr, 'val) expr = + Variable 'adr | + Constant 'val | + Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr" + +text {* + Evaluation (wrt.\ some environment of variable assignments) is + defined by primitive recursion over the structure of expressions. +*} + +consts + eval :: "('adr, 'val) expr => ('adr => 'val) => 'val" + +primrec + "eval (Variable x) env = env x" + "eval (Constant c) env = c" + "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)" + + +subsection {* Machine *} + +text {* + Next we model a simple stack machine, with three instructions. +*} + +datatype ('adr, 'val) instr = + Const 'val | + Load 'adr | + Apply "'val binop" + +text {* + Execution of a list of stack machine instructions is easily defined + as follows. +*} + +consts + exec :: "(('adr, 'val) instr) list + => 'val list => ('adr => 'val) => 'val list" + +primrec + "exec [] stack env = stack" + "exec (instr # instrs) stack env = + (case instr of + Const c => exec instrs (c # stack) env + | Load x => exec instrs (env x # stack) env + | Apply f => exec instrs (f (hd stack) (hd (tl stack)) + # (tl (tl stack))) env)" + +constdefs + execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val" + "execute instrs env == hd (exec instrs [] env)" + + +subsection {* Compiler *} + +text {* + We are ready to define the compilation function of expressions to + lists of stack machine instructions. +*} + +consts + compile :: "('adr, 'val) expr => (('adr, 'val) instr) list" + +primrec + "compile (Variable x) = [Load x]" + "compile (Constant c) = [Const c]" + "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]" + + +text {* + The main result of this development is the correctness theorem for + $\idt{compile}$. We first establish a lemma about $\idt{exec}$ and + list append. +*} + +lemma exec_append: + "exec (xs @ ys) stack env = + exec ys (exec xs stack env) env" +proof (induct xs arbitrary: stack) + case Nil + show ?case by simp +next + case (Cons x xs) + show ?case + proof (induct x) + case Const + from Cons show ?case by simp + next + case Load + from Cons show ?case by simp + next + case Apply + from Cons show ?case by simp + qed +qed + +theorem correctness: "execute (compile e) env = eval e env" +proof - + have "\stack. exec (compile e) stack env = eval e env # stack" + proof (induct e) + case Variable show ?case by simp + next + case Constant show ?case by simp + next + case Binop then show ?case by (simp add: exec_append) + qed + then show ?thesis by (simp add: execute_def) +qed + + +text {* + \bigskip In the proofs above, the \name{simp} method does quite a lot + of work behind the scenes (mostly ``functional program execution''). + Subsequently, the same reasoning is elaborated in detail --- at most + one recursive function definition is used at a time. Thus we get a + better idea of what is actually going on. +*} + +lemma exec_append': + "exec (xs @ ys) stack env = exec ys (exec xs stack env) env" +proof (induct xs arbitrary: stack) + case (Nil s) + have "exec ([] @ ys) s env = exec ys s env" by simp + also have "... = exec ys (exec [] s env) env" by simp + finally show ?case . +next + case (Cons x xs s) + show ?case + proof (induct x) + case (Const val) + have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env" + by simp + also have "... = exec (xs @ ys) (val # s) env" by simp + also from Cons have "... = exec ys (exec xs (val # s) env) env" . + also have "... = exec ys (exec (Const val # xs) s env) env" by simp + finally show ?case . + next + case (Load adr) + from Cons show ?case by simp -- {* same as above *} + next + case (Apply fn) + have "exec ((Apply fn # xs) @ ys) s env = + exec (Apply fn # xs @ ys) s env" by simp + also have "... = + exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp + also from Cons have "... = + exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" . + also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp + finally show ?case . + qed +qed + +theorem correctness': "execute (compile e) env = eval e env" +proof - + have exec_compile: "\stack. exec (compile e) stack env = eval e env # stack" + proof (induct e) + case (Variable adr s) + have "exec (compile (Variable adr)) s env = exec [Load adr] s env" + by simp + also have "... = env adr # s" by simp + also have "env adr = eval (Variable adr) env" by simp + finally show ?case . + next + case (Constant val s) + show ?case by simp -- {* same as above *} + next + case (Binop fn e1 e2 s) + have "exec (compile (Binop fn e1 e2)) s env = + exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp + also have "... = exec [Apply fn] + (exec (compile e1) (exec (compile e2) s env) env) env" + by (simp only: exec_append) + also have "exec (compile e2) s env = eval e2 env # s" by fact + also have "exec (compile e1) ... env = eval e1 env # ..." by fact + also have "exec [Apply fn] ... env = + fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp + also have "... = fn (eval e1 env) (eval e2 env) # s" by simp + also have "fn (eval e1 env) (eval e2 env) = + eval (Binop fn e1 e2) env" + by simp + finally show ?case . + qed + + have "execute (compile e) env = hd (exec (compile e) [] env)" + by (simp add: execute_def) + also from exec_compile + have "exec (compile e) [] env = [eval e env]" . + also have "hd ... = eval e env" by simp + finally show ?thesis . +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Fibonacci.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Fibonacci.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,165 @@ +(* Title: HOL/Isar_Examples/Fibonacci.thy + Author: Gertrud Bauer + Copyright 1999 Technische Universitaet Muenchen + +The Fibonacci function. Demonstrates the use of recdef. Original +tactic script by Lawrence C Paulson. + +Fibonacci numbers: proofs of laws taken from + + R. L. Graham, D. E. Knuth, O. Patashnik. + Concrete Mathematics. + (Addison-Wesley, 1989) +*) + +header {* Fib and Gcd commute *} + +theory Fibonacci +imports Primes +begin + +text_raw {* + \footnote{Isar version by Gertrud Bauer. Original tactic script by + Larry Paulson. A few proofs of laws taken from + \cite{Concrete-Math}.} +*} + + +subsection {* Fibonacci numbers *} + +fun fib :: "nat \ nat" where + "fib 0 = 0" + | "fib (Suc 0) = 1" + | "fib (Suc (Suc x)) = fib x + fib (Suc x)" + +lemma [simp]: "0 < fib (Suc n)" + by (induct n rule: fib.induct) simp_all + + +text {* Alternative induction rule. *} + +theorem fib_induct: + "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P (n::nat)" + by (induct rule: fib.induct) simp_all + + +subsection {* Fib and gcd commute *} + +text {* A few laws taken from \cite{Concrete-Math}. *} + +lemma fib_add: + "fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n" + (is "?P n") + -- {* see \cite[page 280]{Concrete-Math} *} +proof (induct n rule: fib_induct) + show "?P 0" by simp + show "?P 1" by simp + fix n + have "fib (n + 2 + k + 1) + = fib (n + k + 1) + fib (n + 1 + k + 1)" by simp + also assume "fib (n + k + 1) + = fib (k + 1) * fib (n + 1) + fib k * fib n" + (is " _ = ?R1") + also assume "fib (n + 1 + k + 1) + = fib (k + 1) * fib (n + 1 + 1) + fib k * fib (n + 1)" + (is " _ = ?R2") + also have "?R1 + ?R2 + = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)" + by (simp add: add_mult_distrib2) + finally show "?P (n + 2)" . +qed + +lemma gcd_fib_Suc_eq_1: "gcd (fib n) (fib (n + 1)) = 1" (is "?P n") +proof (induct n rule: fib_induct) + show "?P 0" by simp + show "?P 1" by simp + fix n + have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)" + by simp + also have "gcd (fib (n + 2)) ... = gcd (fib (n + 2)) (fib (n + 1))" + by (simp only: gcd_add2') + also have "... = gcd (fib (n + 1)) (fib (n + 1 + 1))" + by (simp add: gcd_commute) + also assume "... = 1" + finally show "?P (n + 2)" . +qed + +lemma gcd_mult_add: "0 < n ==> gcd (n * k + m) n = gcd m n" +proof - + assume "0 < n" + then have "gcd (n * k + m) n = gcd n (m mod n)" + by (simp add: gcd_non_0 add_commute) + also from `0 < n` have "... = gcd m n" by (simp add: gcd_non_0) + finally show ?thesis . +qed + +lemma gcd_fib_add: "gcd (fib m) (fib (n + m)) = gcd (fib m) (fib n)" +proof (cases m) + case 0 + then show ?thesis by simp +next + case (Suc k) + then have "gcd (fib m) (fib (n + m)) = gcd (fib (n + k + 1)) (fib (k + 1))" + by (simp add: gcd_commute) + also have "fib (n + k + 1) + = fib (k + 1) * fib (n + 1) + fib k * fib n" + by (rule fib_add) + also have "gcd ... (fib (k + 1)) = gcd (fib k * fib n) (fib (k + 1))" + by (simp add: gcd_mult_add) + also have "... = gcd (fib n) (fib (k + 1))" + by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel) + also have "... = gcd (fib m) (fib n)" + using Suc by (simp add: gcd_commute) + finally show ?thesis . +qed + +lemma gcd_fib_diff: + assumes "m <= n" + shows "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib n)" +proof - + have "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib (n - m + m))" + by (simp add: gcd_fib_add) + also from `m <= n` have "n - m + m = n" by simp + finally show ?thesis . +qed + +lemma gcd_fib_mod: + assumes "0 < m" + shows "gcd (fib m) (fib (n mod m)) = gcd (fib m) (fib n)" +proof (induct n rule: nat_less_induct) + case (1 n) note hyp = this + show ?case + proof - + have "n mod m = (if n < m then n else (n - m) mod m)" + by (rule mod_if) + also have "gcd (fib m) (fib ...) = gcd (fib m) (fib n)" + proof (cases "n < m") + case True then show ?thesis by simp + next + case False then have "m <= n" by simp + from `0 < m` and False have "n - m < n" by simp + with hyp have "gcd (fib m) (fib ((n - m) mod m)) + = gcd (fib m) (fib (n - m))" by simp + also have "... = gcd (fib m) (fib n)" + using `m <= n` by (rule gcd_fib_diff) + finally have "gcd (fib m) (fib ((n - m) mod m)) = + gcd (fib m) (fib n)" . + with False show ?thesis by simp + qed + finally show ?thesis . + qed +qed + + +theorem fib_gcd: "fib (gcd m n) = gcd (fib m) (fib n)" (is "?P m n") +proof (induct m n rule: gcd_induct) + fix m show "fib (gcd m 0) = gcd (fib m) (fib 0)" by simp + fix n :: nat assume n: "0 < n" + then have "gcd m n = gcd n (m mod n)" by (rule gcd_non_0) + also assume hyp: "fib ... = gcd (fib n) (fib (m mod n))" + also from n have "... = gcd (fib n) (fib m)" by (rule gcd_fib_mod) + also have "... = gcd (fib m) (fib n)" by (rule gcd_commute) + finally show "fib (gcd m n) = gcd (fib m) (fib n)" . +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Group.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Group.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,267 @@ +(* Title: HOL/Isar_Examples/Group.thy + Author: Markus Wenzel, TU Muenchen +*) + +header {* Basic group theory *} + +theory Group +imports Main +begin + +subsection {* Groups and calculational reasoning *} + +text {* + Groups over signature $({\times} :: \alpha \To \alpha \To \alpha, + \idt{one} :: \alpha, \idt{inverse} :: \alpha \To \alpha)$ are defined + as an axiomatic type class as follows. Note that the parent class + $\idt{times}$ is provided by the basic HOL theory. +*} + +consts + one :: "'a" + inverse :: "'a => 'a" + +axclass + group < times + group_assoc: "(x * y) * z = x * (y * z)" + group_left_one: "one * x = x" + group_left_inverse: "inverse x * x = one" + +text {* + The group axioms only state the properties of left one and inverse, + the right versions may be derived as follows. +*} + +theorem group_right_inverse: "x * inverse x = (one::'a::group)" +proof - + have "x * inverse x = one * (x * inverse x)" + by (simp only: group_left_one) + also have "... = one * x * inverse x" + by (simp only: group_assoc) + also have "... = inverse (inverse x) * inverse x * x * inverse x" + by (simp only: group_left_inverse) + also have "... = inverse (inverse x) * (inverse x * x) * inverse x" + by (simp only: group_assoc) + also have "... = inverse (inverse x) * one * inverse x" + by (simp only: group_left_inverse) + also have "... = inverse (inverse x) * (one * inverse x)" + by (simp only: group_assoc) + also have "... = inverse (inverse x) * inverse x" + by (simp only: group_left_one) + also have "... = one" + by (simp only: group_left_inverse) + finally show ?thesis . +qed + +text {* + With \name{group-right-inverse} already available, + \name{group-right-one}\label{thm:group-right-one} is now established + much easier. +*} + +theorem group_right_one: "x * one = (x::'a::group)" +proof - + have "x * one = x * (inverse x * x)" + by (simp only: group_left_inverse) + also have "... = x * inverse x * x" + by (simp only: group_assoc) + also have "... = one * x" + by (simp only: group_right_inverse) + also have "... = x" + by (simp only: group_left_one) + finally show ?thesis . +qed + +text {* + \medskip The calculational proof style above follows typical + presentations given in any introductory course on algebra. The basic + technique is to form a transitive chain of equations, which in turn + are established by simplifying with appropriate rules. The low-level + logical details of equational reasoning are left implicit. + + Note that ``$\dots$'' is just a special term variable that is bound + automatically to the argument\footnote{The argument of a curried + infix expression happens to be its right-hand side.} of the last fact + achieved by any local assumption or proven statement. In contrast to + $\var{thesis}$, the ``$\dots$'' variable is bound \emph{after} the + proof is finished, though. + + There are only two separate Isar language elements for calculational + proofs: ``\isakeyword{also}'' for initial or intermediate + calculational steps, and ``\isakeyword{finally}'' for exhibiting the + result of a calculation. These constructs are not hardwired into + Isabelle/Isar, but defined on top of the basic Isar/VM interpreter. + Expanding the \isakeyword{also} and \isakeyword{finally} derived + language elements, calculations may be simulated by hand as + demonstrated below. +*} + +theorem "x * one = (x::'a::group)" +proof - + have "x * one = x * (inverse x * x)" + by (simp only: group_left_inverse) + + note calculation = this + -- {* first calculational step: init calculation register *} + + have "... = x * inverse x * x" + by (simp only: group_assoc) + + note calculation = trans [OF calculation this] + -- {* general calculational step: compose with transitivity rule *} + + have "... = one * x" + by (simp only: group_right_inverse) + + note calculation = trans [OF calculation this] + -- {* general calculational step: compose with transitivity rule *} + + have "... = x" + by (simp only: group_left_one) + + note calculation = trans [OF calculation this] + -- {* final calculational step: compose with transitivity rule ... *} + from calculation + -- {* ... and pick up the final result *} + + show ?thesis . +qed + +text {* + Note that this scheme of calculations is not restricted to plain + transitivity. Rules like anti-symmetry, or even forward and backward + substitution work as well. For the actual implementation of + \isacommand{also} and \isacommand{finally}, Isabelle/Isar maintains + separate context information of ``transitivity'' rules. Rule + selection takes place automatically by higher-order unification. +*} + + +subsection {* Groups as monoids *} + +text {* + Monoids over signature $({\times} :: \alpha \To \alpha \To \alpha, + \idt{one} :: \alpha)$ are defined like this. +*} + +axclass monoid < times + monoid_assoc: "(x * y) * z = x * (y * z)" + monoid_left_one: "one * x = x" + monoid_right_one: "x * one = x" + +text {* + Groups are \emph{not} yet monoids directly from the definition. For + monoids, \name{right-one} had to be included as an axiom, but for + groups both \name{right-one} and \name{right-inverse} are derivable + from the other axioms. With \name{group-right-one} derived as a + theorem of group theory (see page~\pageref{thm:group-right-one}), we + may still instantiate $\idt{group} \subseteq \idt{monoid}$ properly + as follows. +*} + +instance group < monoid + by (intro_classes, + rule group_assoc, + rule group_left_one, + rule group_right_one) + +text {* + The \isacommand{instance} command actually is a version of + \isacommand{theorem}, setting up a goal that reflects the intended + class relation (or type constructor arity). Thus any Isar proof + language element may be involved to establish this statement. When + concluding the proof, the result is transformed into the intended + type signature extension behind the scenes. +*} + +subsection {* More theorems of group theory *} + +text {* + The one element is already uniquely determined by preserving an + \emph{arbitrary} group element. +*} + +theorem group_one_equality: "e * x = x ==> one = (e::'a::group)" +proof - + assume eq: "e * x = x" + have "one = x * inverse x" + by (simp only: group_right_inverse) + also have "... = (e * x) * inverse x" + by (simp only: eq) + also have "... = e * (x * inverse x)" + by (simp only: group_assoc) + also have "... = e * one" + by (simp only: group_right_inverse) + also have "... = e" + by (simp only: group_right_one) + finally show ?thesis . +qed + +text {* + Likewise, the inverse is already determined by the cancel property. +*} + +theorem group_inverse_equality: + "x' * x = one ==> inverse x = (x'::'a::group)" +proof - + assume eq: "x' * x = one" + have "inverse x = one * inverse x" + by (simp only: group_left_one) + also have "... = (x' * x) * inverse x" + by (simp only: eq) + also have "... = x' * (x * inverse x)" + by (simp only: group_assoc) + also have "... = x' * one" + by (simp only: group_right_inverse) + also have "... = x'" + by (simp only: group_right_one) + finally show ?thesis . +qed + +text {* + The inverse operation has some further characteristic properties. +*} + +theorem group_inverse_times: + "inverse (x * y) = inverse y * inverse (x::'a::group)" +proof (rule group_inverse_equality) + show "(inverse y * inverse x) * (x * y) = one" + proof - + have "(inverse y * inverse x) * (x * y) = + (inverse y * (inverse x * x)) * y" + by (simp only: group_assoc) + also have "... = (inverse y * one) * y" + by (simp only: group_left_inverse) + also have "... = inverse y * y" + by (simp only: group_right_one) + also have "... = one" + by (simp only: group_left_inverse) + finally show ?thesis . + qed +qed + +theorem inverse_inverse: "inverse (inverse x) = (x::'a::group)" +proof (rule group_inverse_equality) + show "x * inverse x = one" + by (simp only: group_right_inverse) +qed + +theorem inverse_inject: "inverse x = inverse y ==> x = (y::'a::group)" +proof - + assume eq: "inverse x = inverse y" + have "x = x * one" + by (simp only: group_right_one) + also have "... = x * (inverse y * y)" + by (simp only: group_left_inverse) + also have "... = x * (inverse x * y)" + by (simp only: eq) + also have "... = (x * inverse x) * y" + by (simp only: group_assoc) + also have "... = one * y" + by (simp only: group_right_inverse) + also have "... = y" + by (simp only: group_left_one) + finally show ?thesis . +qed + +end \ No newline at end of file diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Hoare.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Hoare.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,463 @@ +(* Title: HOL/Isar_Examples/Hoare.thy + Author: Markus Wenzel, TU Muenchen + +A formulation of Hoare logic suitable for Isar. +*) + +header {* Hoare Logic *} + +theory Hoare +imports Main +uses ("~~/src/HOL/Hoare/hoare_tac.ML") +begin + +subsection {* Abstract syntax and semantics *} + +text {* + The following abstract syntax and semantics of Hoare Logic over + \texttt{WHILE} programs closely follows the existing tradition in + Isabelle/HOL of formalizing the presentation given in + \cite[\S6]{Winskel:1993}. See also + \url{http://isabelle.in.tum.de/library/Hoare/} and + \cite{Nipkow:1998:Winskel}. +*} + +types + 'a bexp = "'a set" + 'a assn = "'a set" + +datatype 'a com = + Basic "'a => 'a" + | Seq "'a com" "'a com" ("(_;/ _)" [60, 61] 60) + | Cond "'a bexp" "'a com" "'a com" + | While "'a bexp" "'a assn" "'a com" + +abbreviation + Skip ("SKIP") where + "SKIP == Basic id" + +types + 'a sem = "'a => 'a => bool" + +consts + iter :: "nat => 'a bexp => 'a sem => 'a sem" +primrec + "iter 0 b S s s' = (s ~: b & s = s')" + "iter (Suc n) b S s s' = + (s : b & (EX s''. S s s'' & iter n b S s'' s'))" + +consts + Sem :: "'a com => 'a sem" +primrec + "Sem (Basic f) s s' = (s' = f s)" + "Sem (c1; c2) s s' = (EX s''. Sem c1 s s'' & Sem c2 s'' s')" + "Sem (Cond b c1 c2) s s' = + (if s : b then Sem c1 s s' else Sem c2 s s')" + "Sem (While b x c) s s' = (EX n. iter n b (Sem c) s s')" + +constdefs + Valid :: "'a bexp => 'a com => 'a bexp => bool" + ("(3|- _/ (2_)/ _)" [100, 55, 100] 50) + "|- P c Q == ALL s s'. Sem c s s' --> s : P --> s' : Q" + +syntax (xsymbols) + Valid :: "'a bexp => 'a com => 'a bexp => bool" + ("(3\ _/ (2_)/ _)" [100, 55, 100] 50) + +lemma ValidI [intro?]: + "(!!s s'. Sem c s s' ==> s : P ==> s' : Q) ==> |- P c Q" + by (simp add: Valid_def) + +lemma ValidD [dest?]: + "|- P c Q ==> Sem c s s' ==> s : P ==> s' : Q" + by (simp add: Valid_def) + + +subsection {* Primitive Hoare rules *} + +text {* + From the semantics defined above, we derive the standard set of + primitive Hoare rules; e.g.\ see \cite[\S6]{Winskel:1993}. Usually, + variant forms of these rules are applied in actual proof, see also + \S\ref{sec:hoare-isar} and \S\ref{sec:hoare-vcg}. + + \medskip The \name{basic} rule represents any kind of atomic access + to the state space. This subsumes the common rules of \name{skip} + and \name{assign}, as formulated in \S\ref{sec:hoare-isar}. +*} + +theorem basic: "|- {s. f s : P} (Basic f) P" +proof + fix s s' assume s: "s : {s. f s : P}" + assume "Sem (Basic f) s s'" + hence "s' = f s" by simp + with s show "s' : P" by simp +qed + +text {* + The rules for sequential commands and semantic consequences are + established in a straight forward manner as follows. +*} + +theorem seq: "|- P c1 Q ==> |- Q c2 R ==> |- P (c1; c2) R" +proof + assume cmd1: "|- P c1 Q" and cmd2: "|- Q c2 R" + fix s s' assume s: "s : P" + assume "Sem (c1; c2) s s'" + then obtain s'' where sem1: "Sem c1 s s''" and sem2: "Sem c2 s'' s'" + by auto + from cmd1 sem1 s have "s'' : Q" .. + with cmd2 sem2 show "s' : R" .. +qed + +theorem conseq: "P' <= P ==> |- P c Q ==> Q <= Q' ==> |- P' c Q'" +proof + assume P'P: "P' <= P" and QQ': "Q <= Q'" + assume cmd: "|- P c Q" + fix s s' :: 'a + assume sem: "Sem c s s'" + assume "s : P'" with P'P have "s : P" .. + with cmd sem have "s' : Q" .. + with QQ' show "s' : Q'" .. +qed + +text {* + The rule for conditional commands is directly reflected by the + corresponding semantics; in the proof we just have to look closely + which cases apply. +*} + +theorem cond: + "|- (P Int b) c1 Q ==> |- (P Int -b) c2 Q ==> |- P (Cond b c1 c2) Q" +proof + assume case_b: "|- (P Int b) c1 Q" and case_nb: "|- (P Int -b) c2 Q" + fix s s' assume s: "s : P" + assume sem: "Sem (Cond b c1 c2) s s'" + show "s' : Q" + proof cases + assume b: "s : b" + from case_b show ?thesis + proof + from sem b show "Sem c1 s s'" by simp + from s b show "s : P Int b" by simp + qed + next + assume nb: "s ~: b" + from case_nb show ?thesis + proof + from sem nb show "Sem c2 s s'" by simp + from s nb show "s : P Int -b" by simp + qed + qed +qed + +text {* + The \name{while} rule is slightly less trivial --- it is the only one + based on recursion, which is expressed in the semantics by a + Kleene-style least fixed-point construction. The auxiliary statement + below, which is by induction on the number of iterations is the main + point to be proven; the rest is by routine application of the + semantics of \texttt{WHILE}. +*} + +theorem while: + assumes body: "|- (P Int b) c P" + shows "|- P (While b X c) (P Int -b)" +proof + fix s s' assume s: "s : P" + assume "Sem (While b X c) s s'" + then obtain n where "iter n b (Sem c) s s'" by auto + from this and s show "s' : P Int -b" + proof (induct n arbitrary: s) + case 0 + thus ?case by auto + next + case (Suc n) + then obtain s'' where b: "s : b" and sem: "Sem c s s''" + and iter: "iter n b (Sem c) s'' s'" + by auto + from Suc and b have "s : P Int b" by simp + with body sem have "s'' : P" .. + with iter show ?case by (rule Suc) + qed +qed + + +subsection {* Concrete syntax for assertions *} + +text {* + We now introduce concrete syntax for describing commands (with + embedded expressions) and assertions. The basic technique is that of + semantic ``quote-antiquote''. A \emph{quotation} is a syntactic + entity delimited by an implicit abstraction, say over the state + space. An \emph{antiquotation} is a marked expression within a + quotation that refers the implicit argument; a typical antiquotation + would select (or even update) components from the state. + + We will see some examples later in the concrete rules and + applications. +*} + +text {* + The following specification of syntax and translations is for + Isabelle experts only; feel free to ignore it. + + While the first part is still a somewhat intelligible specification + of the concrete syntactic representation of our Hoare language, the + actual ``ML drivers'' is quite involved. Just note that the we + re-use the basic quote/antiquote translations as already defined in + Isabelle/Pure (see \verb,Syntax.quote_tr, and + \verb,Syntax.quote_tr',). +*} + +syntax + "_quote" :: "'b => ('a => 'b)" ("(.'(_').)" [0] 1000) + "_antiquote" :: "('a => 'b) => 'b" ("\_" [1000] 1000) + "_Subst" :: "'a bexp \ 'b \ idt \ 'a bexp" + ("_[_'/\_]" [1000] 999) + "_Assert" :: "'a => 'a set" ("(.{_}.)" [0] 1000) + "_Assign" :: "idt => 'b => 'a com" ("(\_ :=/ _)" [70, 65] 61) + "_Cond" :: "'a bexp => 'a com => 'a com => 'a com" + ("(0IF _/ THEN _/ ELSE _/ FI)" [0, 0, 0] 61) + "_While_inv" :: "'a bexp => 'a assn => 'a com => 'a com" + ("(0WHILE _/ INV _ //DO _ /OD)" [0, 0, 0] 61) + "_While" :: "'a bexp => 'a com => 'a com" + ("(0WHILE _ //DO _ /OD)" [0, 0] 61) + +syntax (xsymbols) + "_Assert" :: "'a => 'a set" ("(\_\)" [0] 1000) + +translations + ".{b}." => "Collect .(b)." + "B [a/\x]" => ".{\(_update_name x (\_. a)) \ B}." + "\x := a" => "Basic .(\(_update_name x (\_. a)))." + "IF b THEN c1 ELSE c2 FI" => "Cond .{b}. c1 c2" + "WHILE b INV i DO c OD" => "While .{b}. i c" + "WHILE b DO c OD" == "WHILE b INV CONST undefined DO c OD" + +parse_translation {* + let + fun quote_tr [t] = Syntax.quote_tr "_antiquote" t + | quote_tr ts = raise TERM ("quote_tr", ts); + in [("_quote", quote_tr)] end +*} + +text {* + As usual in Isabelle syntax translations, the part for printing is + more complicated --- we cannot express parts as macro rules as above. + Don't look here, unless you have to do similar things for yourself. +*} + +print_translation {* + let + fun quote_tr' f (t :: ts) = + Term.list_comb (f $ Syntax.quote_tr' "_antiquote" t, ts) + | quote_tr' _ _ = raise Match; + + val assert_tr' = quote_tr' (Syntax.const "_Assert"); + + fun bexp_tr' name ((Const ("Collect", _) $ t) :: ts) = + quote_tr' (Syntax.const name) (t :: ts) + | bexp_tr' _ _ = raise Match; + + fun upd_tr' (x_upd, T) = + (case try (unsuffix Record.updateN) x_upd of + SOME x => (x, if T = dummyT then T else Term.domain_type T) + | NONE => raise Match); + + fun update_name_tr' (Free x) = Free (upd_tr' x) + | update_name_tr' ((c as Const ("_free", _)) $ Free x) = + c $ Free (upd_tr' x) + | update_name_tr' (Const x) = Const (upd_tr' x) + | update_name_tr' _ = raise Match; + + fun K_tr' (Abs (_,_,t)) = if null (loose_bnos t) then t else raise Match + | K_tr' (Abs (_,_,Abs (_,_,t)$Bound 0)) = if null (loose_bnos t) then t else raise Match + | K_tr' _ = raise Match; + + fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) = + quote_tr' (Syntax.const "_Assign" $ update_name_tr' f) + (Abs (x, dummyT, K_tr' k) :: ts) + | assign_tr' _ = raise Match; + in + [("Collect", assert_tr'), ("Basic", assign_tr'), + ("Cond", bexp_tr' "_Cond"), ("While", bexp_tr' "_While_inv")] + end +*} + + +subsection {* Rules for single-step proof \label{sec:hoare-isar} *} + +text {* + We are now ready to introduce a set of Hoare rules to be used in + single-step structured proofs in Isabelle/Isar. We refer to the + concrete syntax introduce above. + + \medskip Assertions of Hoare Logic may be manipulated in + calculational proofs, with the inclusion expressed in terms of sets + or predicates. Reversed order is supported as well. +*} + +lemma [trans]: "|- P c Q ==> P' <= P ==> |- P' c Q" + by (unfold Valid_def) blast +lemma [trans] : "P' <= P ==> |- P c Q ==> |- P' c Q" + by (unfold Valid_def) blast + +lemma [trans]: "Q <= Q' ==> |- P c Q ==> |- P c Q'" + by (unfold Valid_def) blast +lemma [trans]: "|- P c Q ==> Q <= Q' ==> |- P c Q'" + by (unfold Valid_def) blast + +lemma [trans]: + "|- .{\P}. c Q ==> (!!s. P' s --> P s) ==> |- .{\P'}. c Q" + by (simp add: Valid_def) +lemma [trans]: + "(!!s. P' s --> P s) ==> |- .{\P}. c Q ==> |- .{\P'}. c Q" + by (simp add: Valid_def) + +lemma [trans]: + "|- P c .{\Q}. ==> (!!s. Q s --> Q' s) ==> |- P c .{\Q'}." + by (simp add: Valid_def) +lemma [trans]: + "(!!s. Q s --> Q' s) ==> |- P c .{\Q}. ==> |- P c .{\Q'}." + by (simp add: Valid_def) + + +text {* + Identity and basic assignments.\footnote{The $\idt{hoare}$ method + introduced in \S\ref{sec:hoare-vcg} is able to provide proper + instances for any number of basic assignments, without producing + additional verification conditions.} +*} + +lemma skip [intro?]: "|- P SKIP P" +proof - + have "|- {s. id s : P} SKIP P" by (rule basic) + thus ?thesis by simp +qed + +lemma assign: "|- P [\a/\x] \x := \a P" + by (rule basic) + +text {* + Note that above formulation of assignment corresponds to our + preferred way to model state spaces, using (extensible) record types + in HOL \cite{Naraschewski-Wenzel:1998:HOOL}. For any record field + $x$, Isabelle/HOL provides a functions $x$ (selector) and + $\idt{x{\dsh}update}$ (update). Above, there is only a place-holder + appearing for the latter kind of function: due to concrete syntax + \isa{\'x := \'a} also contains \isa{x\_update}.\footnote{Note that due + to the external nature of HOL record fields, we could not even state + a general theorem relating selector and update functions (if this + were required here); this would only work for any particular instance + of record fields introduced so far.} +*} + +text {* + Sequential composition --- normalizing with associativity achieves + proper of chunks of code verified separately. +*} + +lemmas [trans, intro?] = seq + +lemma seq_assoc [simp]: "( |- P c1;(c2;c3) Q) = ( |- P (c1;c2);c3 Q)" + by (auto simp add: Valid_def) + +text {* + Conditional statements. +*} + +lemmas [trans, intro?] = cond + +lemma [trans, intro?]: + "|- .{\P & \b}. c1 Q + ==> |- .{\P & ~ \b}. c2 Q + ==> |- .{\P}. IF \b THEN c1 ELSE c2 FI Q" + by (rule cond) (simp_all add: Valid_def) + +text {* + While statements --- with optional invariant. +*} + +lemma [intro?]: + "|- (P Int b) c P ==> |- P (While b P c) (P Int -b)" + by (rule while) + +lemma [intro?]: + "|- (P Int b) c P ==> |- P (While b undefined c) (P Int -b)" + by (rule while) + + +lemma [intro?]: + "|- .{\P & \b}. c .{\P}. + ==> |- .{\P}. WHILE \b INV .{\P}. DO c OD .{\P & ~ \b}." + by (simp add: while Collect_conj_eq Collect_neg_eq) + +lemma [intro?]: + "|- .{\P & \b}. c .{\P}. + ==> |- .{\P}. WHILE \b DO c OD .{\P & ~ \b}." + by (simp add: while Collect_conj_eq Collect_neg_eq) + + +subsection {* Verification conditions \label{sec:hoare-vcg} *} + +text {* + We now load the \emph{original} ML file for proof scripts and tactic + definition for the Hoare Verification Condition Generator (see + \url{http://isabelle.in.tum.de/library/Hoare/}). As far as we are + concerned here, the result is a proof method \name{hoare}, which may + be applied to a Hoare Logic assertion to extract purely logical + verification conditions. It is important to note that the method + requires \texttt{WHILE} loops to be fully annotated with invariants + beforehand. Furthermore, only \emph{concrete} pieces of code are + handled --- the underlying tactic fails ungracefully if supplied with + meta-variables or parameters, for example. +*} + +lemma SkipRule: "p \ q \ Valid p (Basic id) q" + by (auto simp add: Valid_def) + +lemma BasicRule: "p \ {s. f s \ q} \ Valid p (Basic f) q" + by (auto simp: Valid_def) + +lemma SeqRule: "Valid P c1 Q \ Valid Q c2 R \ Valid P (c1;c2) R" + by (auto simp: Valid_def) + +lemma CondRule: + "p \ {s. (s \ b \ s \ w) \ (s \ b \ s \ w')} + \ Valid w c1 q \ Valid w' c2 q \ Valid p (Cond b c1 c2) q" + by (auto simp: Valid_def) + +lemma iter_aux: + "\s s'. Sem c s s' --> s : I & s : b --> s' : I ==> + (\s s'. s : I \ iter n b (Sem c) s s' \ s' : I & s' ~: b)" + apply(induct n) + apply clarsimp + apply (simp (no_asm_use)) + apply blast + done + +lemma WhileRule: + "p \ i \ Valid (i \ b) c i \ i \ (-b) \ q \ Valid p (While b i c) q" + apply (clarsimp simp: Valid_def) + apply (drule iter_aux) + prefer 2 + apply assumption + apply blast + apply blast + done + +lemma Compl_Collect: "- Collect b = {x. \ b x}" + by blast + +lemmas AbortRule = SkipRule -- "dummy version" + +use "~~/src/HOL/Hoare/hoare_tac.ML" + +method_setup hoare = {* + Scan.succeed (fn ctxt => + (SIMPLE_METHOD' + (hoare_tac ctxt (simp_tac (HOL_basic_ss addsimps [@{thm "Record.K_record_comp"}] ))))) *} + "verification condition generator for Hoare logic" + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Hoare_Ex.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Hoare_Ex.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,329 @@ +header {* Using Hoare Logic *} + +theory Hoare_Ex +imports Hoare +begin + +subsection {* State spaces *} + +text {* + First of all we provide a store of program variables that + occur in any of the programs considered later. Slightly unexpected + things may happen when attempting to work with undeclared variables. +*} + +record vars = + I :: nat + M :: nat + N :: nat + S :: nat + +text {* + While all of our variables happen to have the same type, nothing + would prevent us from working with many-sorted programs as well, or + even polymorphic ones. Also note that Isabelle/HOL's extensible + record types even provides simple means to extend the state space + later. +*} + + +subsection {* Basic examples *} + +text {* + We look at few trivialities involving assignment and sequential + composition, in order to get an idea of how to work with our + formulation of Hoare Logic. +*} + +text {* + Using the basic \name{assign} rule directly is a bit cumbersome. +*} + +lemma + "|- .{\(N_update (\_. (2 * \N))) : .{\N = 10}.}. \N := 2 * \N .{\N = 10}." + by (rule assign) + +text {* + Certainly we want the state modification already done, e.g.\ by + simplification. The \name{hoare} method performs the basic state + update for us; we may apply the Simplifier afterwards to achieve + ``obvious'' consequences as well. +*} + +lemma "|- .{True}. \N := 10 .{\N = 10}." + by hoare + +lemma "|- .{2 * \N = 10}. \N := 2 * \N .{\N = 10}." + by hoare + +lemma "|- .{\N = 5}. \N := 2 * \N .{\N = 10}." + by hoare simp + +lemma "|- .{\N + 1 = a + 1}. \N := \N + 1 .{\N = a + 1}." + by hoare + +lemma "|- .{\N = a}. \N := \N + 1 .{\N = a + 1}." + by hoare simp + +lemma "|- .{a = a & b = b}. \M := a; \N := b .{\M = a & \N = b}." + by hoare + +lemma "|- .{True}. \M := a; \N := b .{\M = a & \N = b}." + by hoare simp + +lemma +"|- .{\M = a & \N = b}. + \I := \M; \M := \N; \N := \I + .{\M = b & \N = a}." + by hoare simp + +text {* + It is important to note that statements like the following one can + only be proven for each individual program variable. Due to the + extra-logical nature of record fields, we cannot formulate a theorem + relating record selectors and updates schematically. +*} + +lemma "|- .{\N = a}. \N := \N .{\N = a}." + by hoare + +lemma "|- .{\x = a}. \x := \x .{\x = a}." + oops + +lemma + "Valid {s. x s = a} (Basic (\s. x_update (x s) s)) {s. x s = n}" + -- {* same statement without concrete syntax *} + oops + + +text {* + In the following assignments we make use of the consequence rule in + order to achieve the intended precondition. Certainly, the + \name{hoare} method is able to handle this case, too. +*} + +lemma "|- .{\M = \N}. \M := \M + 1 .{\M ~= \N}." +proof - + have ".{\M = \N}. <= .{\M + 1 ~= \N}." + by auto + also have "|- ... \M := \M + 1 .{\M ~= \N}." + by hoare + finally show ?thesis . +qed + +lemma "|- .{\M = \N}. \M := \M + 1 .{\M ~= \N}." +proof - + have "!!m n::nat. m = n --> m + 1 ~= n" + -- {* inclusion of assertions expressed in ``pure'' logic, *} + -- {* without mentioning the state space *} + by simp + also have "|- .{\M + 1 ~= \N}. \M := \M + 1 .{\M ~= \N}." + by hoare + finally show ?thesis . +qed + +lemma "|- .{\M = \N}. \M := \M + 1 .{\M ~= \N}." + by hoare simp + + +subsection {* Multiplication by addition *} + +text {* + We now do some basic examples of actual \texttt{WHILE} programs. + This one is a loop for calculating the product of two natural + numbers, by iterated addition. We first give detailed structured + proof based on single-step Hoare rules. +*} + +lemma + "|- .{\M = 0 & \S = 0}. + WHILE \M ~= a + DO \S := \S + b; \M := \M + 1 OD + .{\S = a * b}." +proof - + let "|- _ ?while _" = ?thesis + let ".{\?inv}." = ".{\S = \M * b}." + + have ".{\M = 0 & \S = 0}. <= .{\?inv}." by auto + also have "|- ... ?while .{\?inv & ~ (\M ~= a)}." + proof + let ?c = "\S := \S + b; \M := \M + 1" + have ".{\?inv & \M ~= a}. <= .{\S + b = (\M + 1) * b}." + by auto + also have "|- ... ?c .{\?inv}." by hoare + finally show "|- .{\?inv & \M ~= a}. ?c .{\?inv}." . + qed + also have "... <= .{\S = a * b}." by auto + finally show ?thesis . +qed + +text {* + The subsequent version of the proof applies the \name{hoare} method + to reduce the Hoare statement to a purely logical problem that can be + solved fully automatically. Note that we have to specify the + \texttt{WHILE} loop invariant in the original statement. +*} + +lemma + "|- .{\M = 0 & \S = 0}. + WHILE \M ~= a + INV .{\S = \M * b}. + DO \S := \S + b; \M := \M + 1 OD + .{\S = a * b}." + by hoare auto + + +subsection {* Summing natural numbers *} + +text {* + We verify an imperative program to sum natural numbers up to a given + limit. First some functional definition for proper specification of + the problem. +*} + +text {* + The following proof is quite explicit in the individual steps taken, + with the \name{hoare} method only applied locally to take care of + assignment and sequential composition. Note that we express + intermediate proof obligation in pure logic, without referring to the + state space. +*} + +declare atLeast0LessThan[symmetric,simp] + +theorem + "|- .{True}. + \S := 0; \I := 1; + WHILE \I ~= n + DO + \S := \S + \I; + \I := \I + 1 + OD + .{\S = (SUM jS := 0; \I := 1 .{?inv \S \I}." + proof - + have "True --> 0 = ?sum 1" + by simp + also have "|- .{...}. \S := 0; \I := 1 .{?inv \S \I}." + by hoare + finally show ?thesis . + qed + also have "|- ... ?while .{?inv \S \I & ~ \I ~= n}." + proof + let ?body = "\S := \S + \I; \I := \I + 1" + have "!!s i. ?inv s i & i ~= n --> ?inv (s + i) (i + 1)" + by simp + also have "|- .{\S + \I = ?sum (\I + 1)}. ?body .{?inv \S \I}." + by hoare + finally show "|- .{?inv \S \I & \I ~= n}. ?body .{?inv \S \I}." . + qed + also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n" + by simp + finally show ?thesis . +qed + +text {* + The next version uses the \name{hoare} method, while still explaining + the resulting proof obligations in an abstract, structured manner. +*} + +theorem + "|- .{True}. + \S := 0; \I := 1; + WHILE \I ~= n + INV .{\S = (SUM j<\I. j)}. + DO + \S := \S + \I; + \I := \I + 1 + OD + .{\S = (SUM jS := 0; \I := 1; + WHILE \I ~= n + INV .{\S = (SUM j<\I. j)}. + DO + \S := \S + \I; + \I := \I + 1 + OD + .{\S = (SUM j 'a time com" +primrec + "timeit (Basic f) = (Basic f; Basic(\s. s\time := Suc (time s)\))" + "timeit (c1; c2) = (timeit c1; timeit c2)" + "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)" + "timeit (While b iv c) = While b iv (timeit c)" + +record tvars = tstate + + I :: nat + J :: nat + +lemma lem: "(0::nat) < n \ n + n \ Suc (n * n)" + by (induct n) simp_all + +lemma "|- .{i = \I & \time = 0}. + timeit( + WHILE \I \ 0 + INV .{2*\time + \I*\I + 5*\I = i*i + 5*i}. + DO + \J := \I; + WHILE \J \ 0 + INV .{0 < \I & 2*\time + \I*\I + 3*\I + 2*\J - 2 = i*i + 5*i}. + DO \J := \J - 1 OD; + \I := \I - 1 + OD + ) .{2*\time = i*i + 5*i}." + apply simp + apply hoare + apply simp + apply clarsimp + apply clarsimp + apply arith + prefer 2 + apply clarsimp + apply (clarsimp simp: nat_distrib) + apply (frule lem) + apply arith + done + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Knaster_Tarski.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Knaster_Tarski.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,111 @@ +(* Title: HOL/Isar_Examples/Knaster_Tarski.thy + Author: Markus Wenzel, TU Muenchen + +Typical textbook proof example. +*) + +header {* Textbook-style reasoning: the Knaster-Tarski Theorem *} + +theory Knaster_Tarski +imports Main Lattice_Syntax +begin + + +subsection {* Prose version *} + +text {* + According to the textbook \cite[pages 93--94]{davey-priestley}, the + Knaster-Tarski fixpoint theorem is as follows.\footnote{We have + dualized the argument, and tuned the notation a little bit.} + + \textbf{The Knaster-Tarski Fixpoint Theorem.} Let @{text L} be a + complete lattice and @{text "f: L \ L"} an order-preserving map. + Then @{text "\{x \ L | f(x) \ x}"} is a fixpoint of @{text f}. + + \textbf{Proof.} Let @{text "H = {x \ L | f(x) \ x}"} and @{text "a = + \H"}. For all @{text "x \ H"} we have @{text "a \ x"}, so @{text + "f(a) \ f(x) \ x"}. Thus @{text "f(a)"} is a lower bound of @{text + H}, whence @{text "f(a) \ a"}. We now use this inequality to prove + the reverse one (!) and thereby complete the proof that @{text a} is + a fixpoint. Since @{text f} is order-preserving, @{text "f(f(a)) \ + f(a)"}. This says @{text "f(a) \ H"}, so @{text "a \ f(a)"}. +*} + + +subsection {* Formal versions *} + +text {* + The Isar proof below closely follows the original presentation. + Virtually all of the prose narration has been rephrased in terms of + formal Isar language elements. Just as many textbook-style proofs, + there is a strong bias towards forward proof, and several bends in + the course of reasoning. +*} + +theorem Knaster_Tarski: + fixes f :: "'a::complete_lattice \ 'a" + assumes "mono f" + shows "\a. f a = a" +proof + let ?H = "{u. f u \ u}" + let ?a = "\?H" + show "f ?a = ?a" + proof - + { + fix x + assume "x \ ?H" + then have "?a \ x" by (rule Inf_lower) + with `mono f` have "f ?a \ f x" .. + also from `x \ ?H` have "\ \ x" .. + finally have "f ?a \ x" . + } + then have "f ?a \ ?a" by (rule Inf_greatest) + { + also presume "\ \ f ?a" + finally (order_antisym) show ?thesis . + } + from `mono f` and `f ?a \ ?a` have "f (f ?a) \ f ?a" .. + then have "f ?a \ ?H" .. + then show "?a \ f ?a" by (rule Inf_lower) + qed +qed + +text {* + Above we have used several advanced Isar language elements, such as + explicit block structure and weak assumptions. Thus we have + mimicked the particular way of reasoning of the original text. + + In the subsequent version the order of reasoning is changed to + achieve structured top-down decomposition of the problem at the + outer level, while only the inner steps of reasoning are done in a + forward manner. We are certainly more at ease here, requiring only + the most basic features of the Isar language. +*} + +theorem Knaster_Tarski': + fixes f :: "'a::complete_lattice \ 'a" + assumes "mono f" + shows "\a. f a = a" +proof + let ?H = "{u. f u \ u}" + let ?a = "\?H" + show "f ?a = ?a" + proof (rule order_antisym) + show "f ?a \ ?a" + proof (rule Inf_greatest) + fix x + assume "x \ ?H" + then have "?a \ x" by (rule Inf_lower) + with `mono f` have "f ?a \ f x" .. + also from `x \ ?H` have "\ \ x" .. + finally show "f ?a \ x" . + qed + show "?a \ f ?a" + proof (rule Inf_lower) + from `mono f` and `f ?a \ ?a` have "f (f ?a) \ f ?a" .. + then show "f ?a \ ?H" .. + qed + qed +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Mutilated_Checkerboard.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Mutilated_Checkerboard.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,300 @@ +(* Title: HOL/Isar_Examples/Mutilated_Checkerboard.thy + Author: Markus Wenzel, TU Muenchen (Isar document) + Author: Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts) +*) + +header {* The Mutilated Checker Board Problem *} + +theory Mutilated_Checkerboard +imports Main +begin + +text {* + The Mutilated Checker Board Problem, formalized inductively. See + \cite{paulson-mutilated-board} and + \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the + original tactic script version. +*} + +subsection {* Tilings *} + +inductive_set + tiling :: "'a set set => 'a set set" + for A :: "'a set set" + where + empty: "{} : tiling A" + | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A" + + +text "The union of two disjoint tilings is a tiling." + +lemma tiling_Un: + assumes "t : tiling A" and "u : tiling A" and "t Int u = {}" + shows "t Un u : tiling A" +proof - + let ?T = "tiling A" + from `t : ?T` and `t Int u = {}` + show "t Un u : ?T" + proof (induct t) + case empty + with `u : ?T` show "{} Un u : ?T" by simp + next + case (Un a t) + show "(a Un t) Un u : ?T" + proof - + have "a Un (t Un u) : ?T" + using `a : A` + proof (rule tiling.Un) + from `(a Un t) Int u = {}` have "t Int u = {}" by blast + then show "t Un u: ?T" by (rule Un) + from `a <= - t` and `(a Un t) Int u = {}` + show "a <= - (t Un u)" by blast + qed + also have "a Un (t Un u) = (a Un t) Un u" + by (simp only: Un_assoc) + finally show ?thesis . + qed + qed +qed + + +subsection {* Basic properties of ``below'' *} + +constdefs + below :: "nat => nat set" + "below n == {i. i < n}" + +lemma below_less_iff [iff]: "(i: below k) = (i < k)" + by (simp add: below_def) + +lemma below_0: "below 0 = {}" + by (simp add: below_def) + +lemma Sigma_Suc1: + "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)" + by (simp add: below_def less_Suc_eq) blast + +lemma Sigma_Suc2: + "m = n + 2 ==> A <*> below m = + (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)" + by (auto simp add: below_def) + +lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2 + + +subsection {* Basic properties of ``evnodd'' *} + +constdefs + evnodd :: "(nat * nat) set => nat => (nat * nat) set" + "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}" + +lemma evnodd_iff: + "(i, j): evnodd A b = ((i, j): A & (i + j) mod 2 = b)" + by (simp add: evnodd_def) + +lemma evnodd_subset: "evnodd A b <= A" + by (unfold evnodd_def, rule Int_lower1) + +lemma evnoddD: "x : evnodd A b ==> x : A" + by (rule subsetD, rule evnodd_subset) + +lemma evnodd_finite: "finite A ==> finite (evnodd A b)" + by (rule finite_subset, rule evnodd_subset) + +lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b" + by (unfold evnodd_def) blast + +lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b" + by (unfold evnodd_def) blast + +lemma evnodd_empty: "evnodd {} b = {}" + by (simp add: evnodd_def) + +lemma evnodd_insert: "evnodd (insert (i, j) C) b = + (if (i + j) mod 2 = b + then insert (i, j) (evnodd C b) else evnodd C b)" + by (simp add: evnodd_def) + + +subsection {* Dominoes *} + +inductive_set + domino :: "(nat * nat) set set" + where + horiz: "{(i, j), (i, j + 1)} : domino" + | vertl: "{(i, j), (i + 1, j)} : domino" + +lemma dominoes_tile_row: + "{i} <*> below (2 * n) : tiling domino" + (is "?B n : ?T") +proof (induct n) + case 0 + show ?case by (simp add: below_0 tiling.empty) +next + case (Suc n) + let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}" + have "?B (Suc n) = ?a Un ?B n" + by (auto simp add: Sigma_Suc Un_assoc) + moreover have "... : ?T" + proof (rule tiling.Un) + have "{(i, 2 * n), (i, 2 * n + 1)} : domino" + by (rule domino.horiz) + also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast + finally show "... : domino" . + show "?B n : ?T" by (rule Suc) + show "?a <= - ?B n" by blast + qed + ultimately show ?case by simp +qed + +lemma dominoes_tile_matrix: + "below m <*> below (2 * n) : tiling domino" + (is "?B m : ?T") +proof (induct m) + case 0 + show ?case by (simp add: below_0 tiling.empty) +next + case (Suc m) + let ?t = "{m} <*> below (2 * n)" + have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc) + moreover have "... : ?T" + proof (rule tiling_Un) + show "?t : ?T" by (rule dominoes_tile_row) + show "?B m : ?T" by (rule Suc) + show "?t Int ?B m = {}" by blast + qed + ultimately show ?case by simp +qed + +lemma domino_singleton: + assumes d: "d : domino" and "b < 2" + shows "EX i j. evnodd d b = {(i, j)}" (is "?P d") + using d +proof induct + from `b < 2` have b_cases: "b = 0 | b = 1" by arith + fix i j + note [simp] = evnodd_empty evnodd_insert mod_Suc + from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto + from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto +qed + +lemma domino_finite: + assumes d: "d: domino" + shows "finite d" + using d +proof induct + fix i j :: nat + show "finite {(i, j), (i, j + 1)}" by (intro finite.intros) + show "finite {(i, j), (i + 1, j)}" by (intro finite.intros) +qed + + +subsection {* Tilings of dominoes *} + +lemma tiling_domino_finite: + assumes t: "t : tiling domino" (is "t : ?T") + shows "finite t" (is "?F t") + using t +proof induct + show "?F {}" by (rule finite.emptyI) + fix a t assume "?F t" + assume "a : domino" then have "?F a" by (rule domino_finite) + from this and `?F t` show "?F (a Un t)" by (rule finite_UnI) +qed + +lemma tiling_domino_01: + assumes t: "t : tiling domino" (is "t : ?T") + shows "card (evnodd t 0) = card (evnodd t 1)" + using t +proof induct + case empty + show ?case by (simp add: evnodd_def) +next + case (Un a t) + let ?e = evnodd + note hyp = `card (?e t 0) = card (?e t 1)` + and at = `a <= - t` + have card_suc: + "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))" + proof - + fix b :: nat assume "b < 2" + have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un) + also obtain i j where e: "?e a b = {(i, j)}" + proof - + from `a \ domino` and `b < 2` + have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton) + then show ?thesis by (blast intro: that) + qed + moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp + moreover have "card ... = Suc (card (?e t b))" + proof (rule card_insert_disjoint) + from `t \ tiling domino` have "finite t" + by (rule tiling_domino_finite) + then show "finite (?e t b)" + by (rule evnodd_finite) + from e have "(i, j) : ?e a b" by simp + with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD) + qed + ultimately show "?thesis b" by simp + qed + then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp + also from hyp have "card (?e t 0) = card (?e t 1)" . + also from card_suc have "Suc ... = card (?e (a Un t) 1)" + by simp + finally show ?case . +qed + + +subsection {* Main theorem *} + +constdefs + mutilated_board :: "nat => nat => (nat * nat) set" + "mutilated_board m n == + below (2 * (m + 1)) <*> below (2 * (n + 1)) + - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}" + +theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino" +proof (unfold mutilated_board_def) + let ?T = "tiling domino" + let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))" + let ?t' = "?t - {(0, 0)}" + let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}" + + show "?t'' ~: ?T" + proof + have t: "?t : ?T" by (rule dominoes_tile_matrix) + assume t'': "?t'' : ?T" + + let ?e = evnodd + have fin: "finite (?e ?t 0)" + by (rule evnodd_finite, rule tiling_domino_finite, rule t) + + note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff + have "card (?e ?t'' 0) < card (?e ?t' 0)" + proof - + have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)}) + < card (?e ?t' 0)" + proof (rule card_Diff1_less) + from _ fin show "finite (?e ?t' 0)" + by (rule finite_subset) auto + show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp + qed + then show ?thesis by simp + qed + also have "... < card (?e ?t 0)" + proof - + have "(0, 0) : ?e ?t 0" by simp + with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)" + by (rule card_Diff1_less) + then show ?thesis by simp + qed + also from t have "... = card (?e ?t 1)" + by (rule tiling_domino_01) + also have "?e ?t 1 = ?e ?t'' 1" by simp + also from t'' have "card ... = card (?e ?t'' 0)" + by (rule tiling_domino_01 [symmetric]) + finally have "... < ..." . then show False .. + qed +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Nested_Datatype.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Nested_Datatype.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,86 @@ +header {* Nested datatypes *} + +theory Nested_Datatype +imports Main +begin + +subsection {* Terms and substitution *} + +datatype ('a, 'b) "term" = + Var 'a + | App 'b "('a, 'b) term list" + +consts + subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term" + subst_term_list :: + "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list" + +primrec (subst) + "subst_term f (Var a) = f a" + "subst_term f (App b ts) = App b (subst_term_list f ts)" + "subst_term_list f [] = []" + "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts" + + +text {* + \medskip A simple lemma about composition of substitutions. +*} + +lemma "subst_term (subst_term f1 o f2) t = + subst_term f1 (subst_term f2 t)" + and "subst_term_list (subst_term f1 o f2) ts = + subst_term_list f1 (subst_term_list f2 ts)" + by (induct t and ts) simp_all + +lemma "subst_term (subst_term f1 o f2) t = + subst_term f1 (subst_term f2 t)" +proof - + let "?P t" = ?thesis + let ?Q = "\ts. subst_term_list (subst_term f1 o f2) ts = + subst_term_list f1 (subst_term_list f2 ts)" + show ?thesis + proof (induct t) + fix a show "?P (Var a)" by simp + next + fix b ts assume "?Q ts" + then show "?P (App b ts)" + by (simp only: subst.simps) + next + show "?Q []" by simp + next + fix t ts + assume "?P t" "?Q ts" then show "?Q (t # ts)" + by (simp only: subst.simps) + qed +qed + + +subsection {* Alternative induction *} + +theorem term_induct' [case_names Var App]: + assumes var: "!!a. P (Var a)" + and app: "!!b ts. list_all P ts ==> P (App b ts)" + shows "P t" +proof (induct t) + fix a show "P (Var a)" by (rule var) +next + fix b t ts assume "list_all P ts" + then show "P (App b ts)" by (rule app) +next + show "list_all P []" by simp +next + fix t ts assume "P t" "list_all P ts" + then show "list_all P (t # ts)" by simp +qed + +lemma + "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)" +proof (induct t rule: term_induct') + case (Var a) + show ?case by (simp add: o_def) +next + case (App b ts) + then show ?case by (induct ts) simp_all +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Peirce.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Peirce.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,90 @@ +(* Title: HOL/Isar_Examples/Peirce.thy + Author: Markus Wenzel, TU Muenchen +*) + +header {* Peirce's Law *} + +theory Peirce +imports Main +begin + +text {* + We consider Peirce's Law: $((A \impl B) \impl A) \impl A$. This is + an inherently non-intuitionistic statement, so its proof will + certainly involve some form of classical contradiction. + + The first proof is again a well-balanced combination of plain + backward and forward reasoning. The actual classical step is where + the negated goal may be introduced as additional assumption. This + eventually leads to a contradiction.\footnote{The rule involved there + is negation elimination; it holds in intuitionistic logic as well.} +*} + +theorem "((A --> B) --> A) --> A" +proof + assume "(A --> B) --> A" + show A + proof (rule classical) + assume "~ A" + have "A --> B" + proof + assume A + with `~ A` show B by contradiction + qed + with `(A --> B) --> A` show A .. + qed +qed + +text {* + In the subsequent version the reasoning is rearranged by means of + ``weak assumptions'' (as introduced by \isacommand{presume}). Before + assuming the negated goal $\neg A$, its intended consequence $A \impl + B$ is put into place in order to solve the main problem. + Nevertheless, we do not get anything for free, but have to establish + $A \impl B$ later on. The overall effect is that of a logical + \emph{cut}. + + Technically speaking, whenever some goal is solved by + \isacommand{show} in the context of weak assumptions then the latter + give rise to new subgoals, which may be established separately. In + contrast, strong assumptions (as introduced by \isacommand{assume}) + are solved immediately. +*} + +theorem "((A --> B) --> A) --> A" +proof + assume "(A --> B) --> A" + show A + proof (rule classical) + presume "A --> B" + with `(A --> B) --> A` show A .. + next + assume "~ A" + show "A --> B" + proof + assume A + with `~ A` show B by contradiction + qed + qed +qed + +text {* + Note that the goals stemming from weak assumptions may be even left + until qed time, where they get eventually solved ``by assumption'' as + well. In that case there is really no fundamental difference between + the two kinds of assumptions, apart from the order of reducing the + individual parts of the proof configuration. + + Nevertheless, the ``strong'' mode of plain assumptions is quite + important in practice to achieve robustness of proof text + interpretation. By forcing both the conclusion \emph{and} the + assumptions to unify with the pending goal to be solved, goal + selection becomes quite deterministic. For example, decomposition + with rules of the ``case-analysis'' type usually gives rise to + several goals that only differ in there local contexts. With strong + assumptions these may be still solved in any order in a predictable + way, while weak ones would quickly lead to great confusion, + eventually demanding even some backtracking. +*} + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Puzzle.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Puzzle.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,85 @@ +header {* An old chestnut *} + +theory Puzzle +imports Main +begin + +text_raw {* + \footnote{A question from ``Bundeswettbewerb Mathematik''. Original + pen-and-paper proof due to Herbert Ehler; Isabelle tactic script by + Tobias Nipkow.} +*} + +text {* + \textbf{Problem.} Given some function $f\colon \Nat \to \Nat$ such + that $f \ap (f \ap n) < f \ap (\idt{Suc} \ap n)$ for all $n$. + Demonstrate that $f$ is the identity. +*} + +theorem + assumes f_ax: "\n. f (f n) < f (Suc n)" + shows "f n = n" +proof (rule order_antisym) + { + fix n show "n \ f n" + proof (induct k \ "f n" arbitrary: n rule: less_induct) + case (less k n) + then have hyp: "\m. f m < f n \ m \ f m" by (simp only:) + show "n \ f n" + proof (cases n) + case (Suc m) + from f_ax have "f (f m) < f n" by (simp only: Suc) + with hyp have "f m \ f (f m)" . + also from f_ax have "\ < f n" by (simp only: Suc) + finally have "f m < f n" . + with hyp have "m \ f m" . + also note `\ < f n` + finally have "m < f n" . + then have "n \ f n" by (simp only: Suc) + then show ?thesis . + next + case 0 + then show ?thesis by simp + qed + qed + } note ge = this + + { + fix m n :: nat + assume "m \ n" + then have "f m \ f n" + proof (induct n) + case 0 + then have "m = 0" by simp + then show ?case by simp + next + case (Suc n) + from Suc.prems show "f m \ f (Suc n)" + proof (rule le_SucE) + assume "m \ n" + with Suc.hyps have "f m \ f n" . + also from ge f_ax have "\ < f (Suc n)" + by (rule le_less_trans) + finally show ?thesis by simp + next + assume "m = Suc n" + then show ?thesis by simp + qed + qed + } note mono = this + + show "f n \ n" + proof - + have "\ n < f n" + proof + assume "n < f n" + then have "Suc n \ f n" by simp + then have "f (Suc n) \ f (f n)" by (rule mono) + also have "\ < f (Suc n)" by (rule f_ax) + finally have "\ < \" . then show False .. + qed + then show ?thesis by simp + qed +qed + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/README.html --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/README.html Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,21 @@ + + + + + + + + + HOL/Isar_Examples + + + +

HOL/Isar_Examples

+ +Isar offers a new high-level proof (and theory) language interface to +Isabelle. This directory contains some example Isar documents. See +also the included document, or the Isabelle/Isar page for more +information. + + diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/ROOT.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/ROOT.ML Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,18 @@ +(* Miscellaneous Isabelle/Isar examples for Higher-Order Logic. *) + +no_document use_thys ["../Old_Number_Theory/Primes", "../Old_Number_Theory/Fibonacci"]; + +use_thys [ + "Basic_Logic", + "Cantor", + "Peirce", + "Drinker", + "Expr_Compiler", + "Group", + "Summation", + "Knaster_Tarski", + "Mutilated_Checkerboard", + "Puzzle", + "Nested_Datatype", + "Hoare_Ex" +]; diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/Summation.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/Summation.thy Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,158 @@ +(* Title: HOL/Isar_Examples/Summation.thy + Author: Markus Wenzel +*) + +header {* Summing natural numbers *} + +theory Summation +imports Main +begin + +text_raw {* + \footnote{This example is somewhat reminiscent of the + \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, which is + discussed in \cite{isabelle-ref} in the context of permutative + rewrite rules and ordered rewriting.} +*} + +text {* + Subsequently, we prove some summation laws of natural numbers + (including odds, squares, and cubes). These examples demonstrate how + plain natural deduction (including induction) may be combined with + calculational proof. +*} + + +subsection {* Summation laws *} + +text {* + The sum of natural numbers $0 + \cdots + n$ equals $n \times (n + + 1)/2$. Avoiding formal reasoning about division we prove this + equation multiplied by $2$. +*} + +theorem sum_of_naturals: + "2 * (\i::nat=0..n. i) = n * (n + 1)" + (is "?P n" is "?S n = _") +proof (induct n) + show "?P 0" by simp +next + fix n have "?S (n + 1) = ?S n + 2 * (n + 1)" by simp + also assume "?S n = n * (n + 1)" + also have "... + 2 * (n + 1) = (n + 1) * (n + 2)" by simp + finally show "?P (Suc n)" by simp +qed + +text {* + The above proof is a typical instance of mathematical induction. The + main statement is viewed as some $\var{P} \ap n$ that is split by the + induction method into base case $\var{P} \ap 0$, and step case + $\var{P} \ap n \Impl \var{P} \ap (\idt{Suc} \ap n)$ for arbitrary $n$. + + The step case is established by a short calculation in forward + manner. Starting from the left-hand side $\var{S} \ap (n + 1)$ of + the thesis, the final result is achieved by transformations involving + basic arithmetic reasoning (using the Simplifier). The main point is + where the induction hypothesis $\var{S} \ap n = n \times (n + 1)$ is + introduced in order to replace a certain subterm. So the + ``transitivity'' rule involved here is actual \emph{substitution}. + Also note how the occurrence of ``\dots'' in the subsequent step + documents the position where the right-hand side of the hypothesis + got filled in. + + \medskip A further notable point here is integration of calculations + with plain natural deduction. This works so well in Isar for two + reasons. + \begin{enumerate} + + \item Facts involved in \isakeyword{also}~/ \isakeyword{finally} + calculational chains may be just anything. There is nothing special + about \isakeyword{have}, so the natural deduction element + \isakeyword{assume} works just as well. + + \item There are two \emph{separate} primitives for building natural + deduction contexts: \isakeyword{fix}~$x$ and \isakeyword{assume}~$A$. + Thus it is possible to start reasoning with some new ``arbitrary, but + fixed'' elements before bringing in the actual assumption. In + contrast, natural deduction is occasionally formalized with basic + context elements of the form $x:A$ instead. + + \end{enumerate} +*} + +text {* + \medskip We derive further summation laws for odds, squares, and + cubes as follows. The basic technique of induction plus calculation + is the same as before. +*} + +theorem sum_of_odds: + "(\i::nat=0..i::nat=0..n. i^Suc (Suc 0)) = n * (n + 1) * (2 * n + 1)" + (is "?P n" is "?S n = _") +proof (induct n) + show "?P 0" by simp +next + fix n have "?S (n + 1) = ?S n + 6 * (n + 1)^Suc (Suc 0)" + by (simp add: distrib) + also assume "?S n = n * (n + 1) * (2 * n + 1)" + also have "... + 6 * (n + 1)^Suc (Suc 0) = + (n + 1) * (n + 2) * (2 * (n + 1) + 1)" by (simp add: distrib) + finally show "?P (Suc n)" by simp +qed + +theorem sum_of_cubes: + "4 * (\i::nat=0..n. i^3) = (n * (n + 1))^Suc (Suc 0)" + (is "?P n" is "?S n = _") +proof (induct n) + show "?P 0" by (simp add: power_eq_if) +next + fix n have "?S (n + 1) = ?S n + 4 * (n + 1)^3" + by (simp add: power_eq_if distrib) + also assume "?S n = (n * (n + 1))^Suc (Suc 0)" + also have "... + 4 * (n + 1)^3 = ((n + 1) * ((n + 1) + 1))^Suc (Suc 0)" + by (simp add: power_eq_if distrib) + finally show "?P (Suc n)" by simp +qed + +text {* + Comparing these examples with the tactic script version + \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, we note + an important difference of how induction vs.\ simplification is + applied. While \cite[\S10]{isabelle-ref} advises for these examples + that ``induction should not be applied until the goal is in the + simplest form'' this would be a very bad idea in our setting. + + Simplification normalizes all arithmetic expressions involved, + producing huge intermediate goals. With applying induction + afterwards, the Isar proof text would have to reflect the emerging + configuration by appropriate sub-proofs. This would result in badly + structured, low-level technical reasoning, without any good idea of + the actual point. + + \medskip As a general rule of good proof style, automatic methods + such as $\idt{simp}$ or $\idt{auto}$ should normally be never used as + initial proof methods, but only as terminal ones, solving certain + goals completely. +*} + +end diff -r cc038dc8f412 -r 8f35633c4922 src/HOL/Isar_Examples/document/proof.sty --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Isar_Examples/document/proof.sty Tue Oct 20 19:37:09 2009 +0200 @@ -0,0 +1,254 @@ +% proof.sty (Proof Figure Macros) +% +% version 1.0 +% October 13, 1990 +% Copyright (C) 1990 Makoto Tatsuta (tatsuta@riec.tohoku.ac.jp) +% +% This program is free software; you can redistribute it or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation; either versions 1, or (at your option) +% any later version. +% +% This program is distributed in the hope that it will be useful +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% Usage: +% In \documentstyle, specify an optional style `proof', say, +% \documentstyle[proof]{article}. +% +% The following macros are available: +% +% In all the following macros, all the arguments such as +% and are processed in math mode. +% +% \infer +% draws an inference. +% +% Use & in to delimit upper formulae. +% consists more than 0 formulae. +% +% \infer returns \hbox{ ... } or \vbox{ ... } and +% sets \@LeftOffset and \@RightOffset globally. +% +% \infer[