modernized session Isar_Examples;
authorwenzelm
Tue Oct 20 19:37:09 2009 +0200 (2009-10-20)
changeset 330268f35633c4922
parent 33025 cc038dc8f412
child 33027 9cf389429f6d
modernized session Isar_Examples;
src/HOL/IsaMakefile
src/HOL/Isar_Examples/Basic_Logic.thy
src/HOL/Isar_Examples/Cantor.thy
src/HOL/Isar_Examples/Drinker.thy
src/HOL/Isar_Examples/Expr_Compiler.thy
src/HOL/Isar_Examples/Fibonacci.thy
src/HOL/Isar_Examples/Group.thy
src/HOL/Isar_Examples/Hoare.thy
src/HOL/Isar_Examples/Hoare_Ex.thy
src/HOL/Isar_Examples/Knaster_Tarski.thy
src/HOL/Isar_Examples/Mutilated_Checkerboard.thy
src/HOL/Isar_Examples/Nested_Datatype.thy
src/HOL/Isar_Examples/Peirce.thy
src/HOL/Isar_Examples/Puzzle.thy
src/HOL/Isar_Examples/README.html
src/HOL/Isar_Examples/ROOT.ML
src/HOL/Isar_Examples/Summation.thy
src/HOL/Isar_Examples/document/proof.sty
src/HOL/Isar_Examples/document/root.bib
src/HOL/Isar_Examples/document/root.tex
src/HOL/Isar_Examples/document/style.tex
src/HOL/Isar_examples/Basic_Logic.thy
src/HOL/Isar_examples/Cantor.thy
src/HOL/Isar_examples/Drinker.thy
src/HOL/Isar_examples/Expr_Compiler.thy
src/HOL/Isar_examples/Fibonacci.thy
src/HOL/Isar_examples/Group.thy
src/HOL/Isar_examples/Hoare.thy
src/HOL/Isar_examples/Hoare_Ex.thy
src/HOL/Isar_examples/Knaster_Tarski.thy
src/HOL/Isar_examples/Mutilated_Checkerboard.thy
src/HOL/Isar_examples/Nested_Datatype.thy
src/HOL/Isar_examples/Peirce.thy
src/HOL/Isar_examples/Puzzle.thy
src/HOL/Isar_examples/README.html
src/HOL/Isar_examples/ROOT.ML
src/HOL/Isar_examples/Summation.thy
src/HOL/Isar_examples/document/proof.sty
src/HOL/Isar_examples/document/root.bib
src/HOL/Isar_examples/document/root.tex
src/HOL/Isar_examples/document/style.tex
src/HOL/README.html
src/HOL/ex/document/root.bib
     1.1 --- a/src/HOL/IsaMakefile	Tue Oct 20 19:36:52 2009 +0200
     1.2 +++ b/src/HOL/IsaMakefile	Tue Oct 20 19:37:09 2009 +0200
     1.3 @@ -25,7 +25,7 @@
     1.4    HOL-IOA \
     1.5    HOL-Imperative_HOL \
     1.6    HOL-Induct \
     1.7 -  HOL-Isar_examples \
     1.8 +  HOL-Isar_Examples \
     1.9    HOL-Lambda \
    1.10    HOL-Lattice \
    1.11    HOL-Matrix \
    1.12 @@ -914,22 +914,22 @@
    1.13  	@$(ISABELLE_TOOL) usedir $(OUT)/HOL ex
    1.14  
    1.15  
    1.16 -## HOL-Isar_examples
    1.17 +## HOL-Isar_Examples
    1.18  
    1.19 -HOL-Isar_examples: HOL $(LOG)/HOL-Isar_examples.gz
    1.20 +HOL-Isar_Examples: HOL $(LOG)/HOL-Isar_Examples.gz
    1.21  
    1.22 -$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/Basic_Logic.thy	\
    1.23 -  Isar_examples/Cantor.thy Isar_examples/Drinker.thy			\
    1.24 -  Isar_examples/Expr_Compiler.thy Isar_examples/Fibonacci.thy		\
    1.25 -  Isar_examples/Group.thy Isar_examples/Hoare.thy			\
    1.26 -  Isar_examples/Hoare_Ex.thy Isar_examples/Knaster_Tarski.thy		\
    1.27 -  Isar_examples/Mutilated_Checkerboard.thy				\
    1.28 -  Isar_examples/Nested_Datatype.thy Isar_examples/Peirce.thy		\
    1.29 -  Isar_examples/Puzzle.thy Isar_examples/Summation.thy			\
    1.30 -  Isar_examples/ROOT.ML Isar_examples/document/proof.sty		\
    1.31 -  Isar_examples/document/root.bib Isar_examples/document/root.tex	\
    1.32 -  Isar_examples/document/style.tex Hoare/hoare_tac.ML
    1.33 -	@$(ISABELLE_TOOL) usedir $(OUT)/HOL Isar_examples
    1.34 +$(LOG)/HOL-Isar_Examples.gz: $(OUT)/HOL Isar_Examples/Basic_Logic.thy	\
    1.35 +  Isar_Examples/Cantor.thy Isar_Examples/Drinker.thy			\
    1.36 +  Isar_Examples/Expr_Compiler.thy Isar_Examples/Fibonacci.thy		\
    1.37 +  Isar_Examples/Group.thy Isar_Examples/Hoare.thy			\
    1.38 +  Isar_Examples/Hoare_Ex.thy Isar_Examples/Knaster_Tarski.thy		\
    1.39 +  Isar_Examples/Mutilated_Checkerboard.thy				\
    1.40 +  Isar_Examples/Nested_Datatype.thy Isar_Examples/Peirce.thy		\
    1.41 +  Isar_Examples/Puzzle.thy Isar_Examples/Summation.thy			\
    1.42 +  Isar_Examples/ROOT.ML Isar_Examples/document/proof.sty		\
    1.43 +  Isar_Examples/document/root.bib Isar_Examples/document/root.tex	\
    1.44 +  Isar_Examples/document/style.tex Hoare/hoare_tac.ML
    1.45 +	@$(ISABELLE_TOOL) usedir $(OUT)/HOL Isar_Examples
    1.46  
    1.47  
    1.48  ## HOL-SET-Protocol
    1.49 @@ -1304,7 +1304,7 @@
    1.50  clean:
    1.51  	@rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL		\
    1.52  		$(OUT)/HOL-Nominal $(OUT)/TLA $(LOG)/HOL.gz		\
    1.53 -		$(LOG)/TLA.gz $(LOG)/HOL-Isar_examples.gz		\
    1.54 +		$(LOG)/TLA.gz $(LOG)/HOL-Isar_Examples.gz		\
    1.55  		$(LOG)/HOL-Induct.gz $(LOG)/HOL-ex.gz			\
    1.56  		$(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz			\
    1.57  		$(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz			\
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/src/HOL/Isar_Examples/Basic_Logic.thy	Tue Oct 20 19:37:09 2009 +0200
     2.3 @@ -0,0 +1,448 @@
     2.4 +(*  Title:      HOL/Isar_Examples/Basic_Logic.thy
     2.5 +    Author:     Markus Wenzel, TU Muenchen
     2.6 +
     2.7 +Basic propositional and quantifier reasoning.
     2.8 +*)
     2.9 +
    2.10 +header {* Basic logical reasoning *}
    2.11 +
    2.12 +theory Basic_Logic
    2.13 +imports Main
    2.14 +begin
    2.15 +
    2.16 +
    2.17 +subsection {* Pure backward reasoning *}
    2.18 +
    2.19 +text {*
    2.20 +  In order to get a first idea of how Isabelle/Isar proof documents
    2.21 +  may look like, we consider the propositions @{text I}, @{text K},
    2.22 +  and @{text S}.  The following (rather explicit) proofs should
    2.23 +  require little extra explanations.
    2.24 +*}
    2.25 +
    2.26 +lemma I: "A --> A"
    2.27 +proof
    2.28 +  assume A
    2.29 +  show A by fact
    2.30 +qed
    2.31 +
    2.32 +lemma K: "A --> B --> A"
    2.33 +proof
    2.34 +  assume A
    2.35 +  show "B --> A"
    2.36 +  proof
    2.37 +    show A by fact
    2.38 +  qed
    2.39 +qed
    2.40 +
    2.41 +lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
    2.42 +proof
    2.43 +  assume "A --> B --> C"
    2.44 +  show "(A --> B) --> A --> C"
    2.45 +  proof
    2.46 +    assume "A --> B"
    2.47 +    show "A --> C"
    2.48 +    proof
    2.49 +      assume A
    2.50 +      show C
    2.51 +      proof (rule mp)
    2.52 +        show "B --> C" by (rule mp) fact+
    2.53 +        show B by (rule mp) fact+
    2.54 +      qed
    2.55 +    qed
    2.56 +  qed
    2.57 +qed
    2.58 +
    2.59 +text {*
    2.60 +  Isar provides several ways to fine-tune the reasoning, avoiding
    2.61 +  excessive detail.  Several abbreviated language elements are
    2.62 +  available, enabling the writer to express proofs in a more concise
    2.63 +  way, even without referring to any automated proof tools yet.
    2.64 +
    2.65 +  First of all, proof by assumption may be abbreviated as a single
    2.66 +  dot.
    2.67 +*}
    2.68 +
    2.69 +lemma "A --> A"
    2.70 +proof
    2.71 +  assume A
    2.72 +  show A by fact+
    2.73 +qed
    2.74 +
    2.75 +text {*
    2.76 +  In fact, concluding any (sub-)proof already involves solving any
    2.77 +  remaining goals by assumption\footnote{This is not a completely
    2.78 +  trivial operation, as proof by assumption may involve full
    2.79 +  higher-order unification.}.  Thus we may skip the rather vacuous
    2.80 +  body of the above proof as well.
    2.81 +*}
    2.82 +
    2.83 +lemma "A --> A"
    2.84 +proof
    2.85 +qed
    2.86 +
    2.87 +text {*
    2.88 +  Note that the \isacommand{proof} command refers to the @{text rule}
    2.89 +  method (without arguments) by default.  Thus it implicitly applies a
    2.90 +  single rule, as determined from the syntactic form of the statements
    2.91 +  involved.  The \isacommand{by} command abbreviates any proof with
    2.92 +  empty body, so the proof may be further pruned.
    2.93 +*}
    2.94 +
    2.95 +lemma "A --> A"
    2.96 +  by rule
    2.97 +
    2.98 +text {*
    2.99 +  Proof by a single rule may be abbreviated as double-dot.
   2.100 +*}
   2.101 +
   2.102 +lemma "A --> A" ..
   2.103 +
   2.104 +text {*
   2.105 +  Thus we have arrived at an adequate representation of the proof of a
   2.106 +  tautology that holds by a single standard rule.\footnote{Apparently,
   2.107 +  the rule here is implication introduction.}
   2.108 +*}
   2.109 +
   2.110 +text {*
   2.111 +  Let us also reconsider @{text K}.  Its statement is composed of
   2.112 +  iterated connectives.  Basic decomposition is by a single rule at a
   2.113 +  time, which is why our first version above was by nesting two
   2.114 +  proofs.
   2.115 +
   2.116 +  The @{text intro} proof method repeatedly decomposes a goal's
   2.117 +  conclusion.\footnote{The dual method is @{text elim}, acting on a
   2.118 +  goal's premises.}
   2.119 +*}
   2.120 +
   2.121 +lemma "A --> B --> A"
   2.122 +proof (intro impI)
   2.123 +  assume A
   2.124 +  show A by fact
   2.125 +qed
   2.126 +
   2.127 +text {*
   2.128 +  Again, the body may be collapsed.
   2.129 +*}
   2.130 +
   2.131 +lemma "A --> B --> A"
   2.132 +  by (intro impI)
   2.133 +
   2.134 +text {*
   2.135 +  Just like @{text rule}, the @{text intro} and @{text elim} proof
   2.136 +  methods pick standard structural rules, in case no explicit
   2.137 +  arguments are given.  While implicit rules are usually just fine for
   2.138 +  single rule application, this may go too far with iteration.  Thus
   2.139 +  in practice, @{text intro} and @{text elim} would be typically
   2.140 +  restricted to certain structures by giving a few rules only, e.g.\
   2.141 +  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
   2.142 +  and universal quantifiers.
   2.143 +
   2.144 +  Such well-tuned iterated decomposition of certain structures is the
   2.145 +  prime application of @{text intro} and @{text elim}.  In contrast,
   2.146 +  terminal steps that solve a goal completely are usually performed by
   2.147 +  actual automated proof methods (such as \isacommand{by}~@{text
   2.148 +  blast}.
   2.149 +*}
   2.150 +
   2.151 +
   2.152 +subsection {* Variations of backward vs.\ forward reasoning *}
   2.153 +
   2.154 +text {*
   2.155 +  Certainly, any proof may be performed in backward-style only.  On
   2.156 +  the other hand, small steps of reasoning are often more naturally
   2.157 +  expressed in forward-style.  Isar supports both backward and forward
   2.158 +  reasoning as a first-class concept.  In order to demonstrate the
   2.159 +  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
   2.160 +
   2.161 +  The first version is purely backward.
   2.162 +*}
   2.163 +
   2.164 +lemma "A & B --> B & A"
   2.165 +proof
   2.166 +  assume "A & B"
   2.167 +  show "B & A"
   2.168 +  proof
   2.169 +    show B by (rule conjunct2) fact
   2.170 +    show A by (rule conjunct1) fact
   2.171 +  qed
   2.172 +qed
   2.173 +
   2.174 +text {*
   2.175 +  Above, the @{text "conjunct_1/2"} projection rules had to be named
   2.176 +  explicitly, since the goals @{text B} and @{text A} did not provide
   2.177 +  any structural clue.  This may be avoided using \isacommand{from} to
   2.178 +  focus on the @{text "A \<and> B"} assumption as the current facts,
   2.179 +  enabling the use of double-dot proofs.  Note that \isacommand{from}
   2.180 +  already does forward-chaining, involving the \name{conjE} rule here.
   2.181 +*}
   2.182 +
   2.183 +lemma "A & B --> B & A"
   2.184 +proof
   2.185 +  assume "A & B"
   2.186 +  show "B & A"
   2.187 +  proof
   2.188 +    from `A & B` show B ..
   2.189 +    from `A & B` show A ..
   2.190 +  qed
   2.191 +qed
   2.192 +
   2.193 +text {*
   2.194 +  In the next version, we move the forward step one level upwards.
   2.195 +  Forward-chaining from the most recent facts is indicated by the
   2.196 +  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
   2.197 +  @{text "A \<and> B"} actually becomes an elimination, rather than an
   2.198 +  introduction.  The resulting proof structure directly corresponds to
   2.199 +  that of the @{text conjE} rule, including the repeated goal
   2.200 +  proposition that is abbreviated as @{text ?thesis} below.
   2.201 +*}
   2.202 +
   2.203 +lemma "A & B --> B & A"
   2.204 +proof
   2.205 +  assume "A & B"
   2.206 +  then show "B & A"
   2.207 +  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
   2.208 +    assume B A
   2.209 +    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
   2.210 +  qed
   2.211 +qed
   2.212 +
   2.213 +text {*
   2.214 +  In the subsequent version we flatten the structure of the main body
   2.215 +  by doing forward reasoning all the time.  Only the outermost
   2.216 +  decomposition step is left as backward.
   2.217 +*}
   2.218 +
   2.219 +lemma "A & B --> B & A"
   2.220 +proof
   2.221 +  assume "A & B"
   2.222 +  from `A & B` have A ..
   2.223 +  from `A & B` have B ..
   2.224 +  from `B` `A` show "B & A" ..
   2.225 +qed
   2.226 +
   2.227 +text {*
   2.228 +  We can still push forward-reasoning a bit further, even at the risk
   2.229 +  of getting ridiculous.  Note that we force the initial proof step to
   2.230 +  do nothing here, by referring to the ``-'' proof method.
   2.231 +*}
   2.232 +
   2.233 +lemma "A & B --> B & A"
   2.234 +proof -
   2.235 +  {
   2.236 +    assume "A & B"
   2.237 +    from `A & B` have A ..
   2.238 +    from `A & B` have B ..
   2.239 +    from `B` `A` have "B & A" ..
   2.240 +  }
   2.241 +  then show ?thesis ..         -- {* rule \name{impI} *}
   2.242 +qed
   2.243 +
   2.244 +text {*
   2.245 +  \medskip With these examples we have shifted through a whole range
   2.246 +  from purely backward to purely forward reasoning.  Apparently, in
   2.247 +  the extreme ends we get slightly ill-structured proofs, which also
   2.248 +  require much explicit naming of either rules (backward) or local
   2.249 +  facts (forward).
   2.250 +
   2.251 +  The general lesson learned here is that good proof style would
   2.252 +  achieve just the \emph{right} balance of top-down backward
   2.253 +  decomposition, and bottom-up forward composition.  In general, there
   2.254 +  is no single best way to arrange some pieces of formal reasoning, of
   2.255 +  course.  Depending on the actual applications, the intended audience
   2.256 +  etc., rules (and methods) on the one hand vs.\ facts on the other
   2.257 +  hand have to be emphasized in an appropriate way.  This requires the
   2.258 +  proof writer to develop good taste, and some practice, of course.
   2.259 +*}
   2.260 +
   2.261 +text {*
   2.262 +  For our example the most appropriate way of reasoning is probably
   2.263 +  the middle one, with conjunction introduction done after
   2.264 +  elimination.
   2.265 +*}
   2.266 +
   2.267 +lemma "A & B --> B & A"
   2.268 +proof
   2.269 +  assume "A & B"
   2.270 +  then show "B & A"
   2.271 +  proof
   2.272 +    assume B A
   2.273 +    then show ?thesis ..
   2.274 +  qed
   2.275 +qed
   2.276 +
   2.277 +
   2.278 +
   2.279 +subsection {* A few examples from ``Introduction to Isabelle'' *}
   2.280 +
   2.281 +text {*
   2.282 +  We rephrase some of the basic reasoning examples of
   2.283 +  \cite{isabelle-intro}, using HOL rather than FOL.
   2.284 +*}
   2.285 +
   2.286 +subsubsection {* A propositional proof *}
   2.287 +
   2.288 +text {*
   2.289 +  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
   2.290 +  involves forward-chaining from @{text "P \<or> P"}, followed by an
   2.291 +  explicit case-analysis on the two \emph{identical} cases.
   2.292 +*}
   2.293 +
   2.294 +lemma "P | P --> P"
   2.295 +proof
   2.296 +  assume "P | P"
   2.297 +  then show P
   2.298 +  proof                    -- {*
   2.299 +    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
   2.300 +  *}
   2.301 +    assume P show P by fact
   2.302 +  next
   2.303 +    assume P show P by fact
   2.304 +  qed
   2.305 +qed
   2.306 +
   2.307 +text {*
   2.308 +  Case splits are \emph{not} hardwired into the Isar language as a
   2.309 +  special feature.  The \isacommand{next} command used to separate the
   2.310 +  cases above is just a short form of managing block structure.
   2.311 +
   2.312 +  \medskip In general, applying proof methods may split up a goal into
   2.313 +  separate ``cases'', i.e.\ new subgoals with individual local
   2.314 +  assumptions.  The corresponding proof text typically mimics this by
   2.315 +  establishing results in appropriate contexts, separated by blocks.
   2.316 +
   2.317 +  In order to avoid too much explicit parentheses, the Isar system
   2.318 +  implicitly opens an additional block for any new goal, the
   2.319 +  \isacommand{next} statement then closes one block level, opening a
   2.320 +  new one.  The resulting behavior is what one would expect from
   2.321 +  separating cases, only that it is more flexible.  E.g.\ an induction
   2.322 +  base case (which does not introduce local assumptions) would
   2.323 +  \emph{not} require \isacommand{next} to separate the subsequent step
   2.324 +  case.
   2.325 +
   2.326 +  \medskip In our example the situation is even simpler, since the two
   2.327 +  cases actually coincide.  Consequently the proof may be rephrased as
   2.328 +  follows.
   2.329 +*}
   2.330 +
   2.331 +lemma "P | P --> P"
   2.332 +proof
   2.333 +  assume "P | P"
   2.334 +  then show P
   2.335 +  proof
   2.336 +    assume P
   2.337 +    show P by fact
   2.338 +    show P by fact
   2.339 +  qed
   2.340 +qed
   2.341 +
   2.342 +text {*
   2.343 +  Again, the rather vacuous body of the proof may be collapsed.  Thus
   2.344 +  the case analysis degenerates into two assumption steps, which are
   2.345 +  implicitly performed when concluding the single rule step of the
   2.346 +  double-dot proof as follows.
   2.347 +*}
   2.348 +
   2.349 +lemma "P | P --> P"
   2.350 +proof
   2.351 +  assume "P | P"
   2.352 +  then show P ..
   2.353 +qed
   2.354 +
   2.355 +
   2.356 +subsubsection {* A quantifier proof *}
   2.357 +
   2.358 +text {*
   2.359 +  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
   2.360 +  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
   2.361 +  with @{text "P (f a)"} may be taken as a witness for the second
   2.362 +  existential statement.
   2.363 +
   2.364 +  The first proof is rather verbose, exhibiting quite a lot of
   2.365 +  (redundant) detail.  It gives explicit rules, even with some
   2.366 +  instantiation.  Furthermore, we encounter two new language elements:
   2.367 +  the \isacommand{fix} command augments the context by some new
   2.368 +  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
   2.369 +  binds term abbreviations by higher-order pattern matching.
   2.370 +*}
   2.371 +
   2.372 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.373 +proof
   2.374 +  assume "EX x. P (f x)"
   2.375 +  then show "EX y. P y"
   2.376 +  proof (rule exE)             -- {*
   2.377 +    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
   2.378 +  *}
   2.379 +    fix a
   2.380 +    assume "P (f a)" (is "P ?witness")
   2.381 +    then show ?thesis by (rule exI [of P ?witness])
   2.382 +  qed
   2.383 +qed
   2.384 +
   2.385 +text {*
   2.386 +  While explicit rule instantiation may occasionally improve
   2.387 +  readability of certain aspects of reasoning, it is usually quite
   2.388 +  redundant.  Above, the basic proof outline gives already enough
   2.389 +  structural clues for the system to infer both the rules and their
   2.390 +  instances (by higher-order unification).  Thus we may as well prune
   2.391 +  the text as follows.
   2.392 +*}
   2.393 +
   2.394 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.395 +proof
   2.396 +  assume "EX x. P (f x)"
   2.397 +  then show "EX y. P y"
   2.398 +  proof
   2.399 +    fix a
   2.400 +    assume "P (f a)"
   2.401 +    then show ?thesis ..
   2.402 +  qed
   2.403 +qed
   2.404 +
   2.405 +text {*
   2.406 +  Explicit @{text \<exists>}-elimination as seen above can become quite
   2.407 +  cumbersome in practice.  The derived Isar language element
   2.408 +  ``\isakeyword{obtain}'' provides a more handsome way to do
   2.409 +  generalized existence reasoning.
   2.410 +*}
   2.411 +
   2.412 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.413 +proof
   2.414 +  assume "EX x. P (f x)"
   2.415 +  then obtain a where "P (f a)" ..
   2.416 +  then show "EX y. P y" ..
   2.417 +qed
   2.418 +
   2.419 +text {*
   2.420 +  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
   2.421 +  \isakeyword{assume} together with a soundness proof of the
   2.422 +  elimination involved.  Thus it behaves similar to any other forward
   2.423 +  proof element.  Also note that due to the nature of general
   2.424 +  existence reasoning involved here, any result exported from the
   2.425 +  context of an \isakeyword{obtain} statement may \emph{not} refer to
   2.426 +  the parameters introduced there.
   2.427 +*}
   2.428 +
   2.429 +
   2.430 +
   2.431 +subsubsection {* Deriving rules in Isabelle *}
   2.432 +
   2.433 +text {*
   2.434 +  We derive the conjunction elimination rule from the corresponding
   2.435 +  projections.  The proof is quite straight-forward, since
   2.436 +  Isabelle/Isar supports non-atomic goals and assumptions fully
   2.437 +  transparently.
   2.438 +*}
   2.439 +
   2.440 +theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
   2.441 +proof -
   2.442 +  assume "A & B"
   2.443 +  assume r: "A ==> B ==> C"
   2.444 +  show C
   2.445 +  proof (rule r)
   2.446 +    show A by (rule conjunct1) fact
   2.447 +    show B by (rule conjunct2) fact
   2.448 +  qed
   2.449 +qed
   2.450 +
   2.451 +end
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/src/HOL/Isar_Examples/Cantor.thy	Tue Oct 20 19:37:09 2009 +0200
     3.3 @@ -0,0 +1,71 @@
     3.4 +(*  Title:      HOL/Isar_Examples/Cantor.thy
     3.5 +    Author:     Markus Wenzel, TU Muenchen
     3.6 +*)
     3.7 +
     3.8 +header {* Cantor's Theorem *}
     3.9 +
    3.10 +theory Cantor
    3.11 +imports Main
    3.12 +begin
    3.13 +
    3.14 +text_raw {*
    3.15 +  \footnote{This is an Isar version of the final example of the
    3.16 +  Isabelle/HOL manual \cite{isabelle-HOL}.}
    3.17 +*}
    3.18 +
    3.19 +text {*
    3.20 +  Cantor's Theorem states that every set has more subsets than it has
    3.21 +  elements.  It has become a favorite basic example in pure
    3.22 +  higher-order logic since it is so easily expressed: \[\all{f::\alpha
    3.23 +  \To \alpha \To \idt{bool}} \ex{S::\alpha \To \idt{bool}}
    3.24 +  \all{x::\alpha} f \ap x \not= S\]
    3.25 +
    3.26 +  Viewing types as sets, $\alpha \To \idt{bool}$ represents the
    3.27 +  powerset of $\alpha$.  This version of the theorem states that for
    3.28 +  every function from $\alpha$ to its powerset, some subset is outside
    3.29 +  its range.  The Isabelle/Isar proofs below uses HOL's set theory,
    3.30 +  with the type $\alpha \ap \idt{set}$ and the operator
    3.31 +  $\idt{range}::(\alpha \To \beta) \To \beta \ap \idt{set}$.
    3.32 +*}
    3.33 +
    3.34 +theorem "EX S. S ~: range (f :: 'a => 'a set)"
    3.35 +proof
    3.36 +  let ?S = "{x. x ~: f x}"
    3.37 +  show "?S ~: range f"
    3.38 +  proof
    3.39 +    assume "?S : range f"
    3.40 +    then obtain y where "?S = f y" ..
    3.41 +    then show False
    3.42 +    proof (rule equalityCE)
    3.43 +      assume "y : f y"
    3.44 +      assume "y : ?S" then have "y ~: f y" ..
    3.45 +      with `y : f y` show ?thesis by contradiction
    3.46 +    next
    3.47 +      assume "y ~: ?S"
    3.48 +      assume "y ~: f y" then have "y : ?S" ..
    3.49 +      with `y ~: ?S` show ?thesis by contradiction
    3.50 +    qed
    3.51 +  qed
    3.52 +qed
    3.53 +
    3.54 +text {*
    3.55 +  How much creativity is required?  As it happens, Isabelle can prove
    3.56 +  this theorem automatically using best-first search.  Depth-first
    3.57 +  search would diverge, but best-first search successfully navigates
    3.58 +  through the large search space.  The context of Isabelle's classical
    3.59 +  prover contains rules for the relevant constructs of HOL's set
    3.60 +  theory.
    3.61 +*}
    3.62 +
    3.63 +theorem "EX S. S ~: range (f :: 'a => 'a set)"
    3.64 +  by best
    3.65 +
    3.66 +text {*
    3.67 +  While this establishes the same theorem internally, we do not get
    3.68 +  any idea of how the proof actually works.  There is currently no way
    3.69 +  to transform internal system-level representations of Isabelle
    3.70 +  proofs back into Isar text.  Writing intelligible proof documents
    3.71 +  really is a creative process, after all.
    3.72 +*}
    3.73 +
    3.74 +end
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/src/HOL/Isar_Examples/Drinker.thy	Tue Oct 20 19:37:09 2009 +0200
     4.3 @@ -0,0 +1,54 @@
     4.4 +(*  Title:      HOL/Isar_Examples/Drinker.thy
     4.5 +    Author:     Makarius
     4.6 +*)
     4.7 +
     4.8 +header {* The Drinker's Principle *}
     4.9 +
    4.10 +theory Drinker
    4.11 +imports Main
    4.12 +begin
    4.13 +
    4.14 +text {*
    4.15 +  Here is another example of classical reasoning: the Drinker's
    4.16 +  Principle says that for some person, if he is drunk, everybody else
    4.17 +  is drunk!
    4.18 +
    4.19 +  We first prove a classical part of de-Morgan's law.
    4.20 +*}
    4.21 +
    4.22 +lemma deMorgan:
    4.23 +  assumes "\<not> (\<forall>x. P x)"
    4.24 +  shows "\<exists>x. \<not> P x"
    4.25 +  using prems
    4.26 +proof (rule contrapos_np)
    4.27 +  assume a: "\<not> (\<exists>x. \<not> P x)"
    4.28 +  show "\<forall>x. P x"
    4.29 +  proof
    4.30 +    fix x
    4.31 +    show "P x"
    4.32 +    proof (rule classical)
    4.33 +      assume "\<not> P x"
    4.34 +      then have "\<exists>x. \<not> P x" ..
    4.35 +      with a show ?thesis by contradiction
    4.36 +    qed
    4.37 +  qed
    4.38 +qed
    4.39 +
    4.40 +theorem Drinker's_Principle: "\<exists>x. drunk x \<longrightarrow> (\<forall>x. drunk x)"
    4.41 +proof cases
    4.42 +  fix a assume "\<forall>x. drunk x"
    4.43 +  then have "drunk a \<longrightarrow> (\<forall>x. drunk x)" ..
    4.44 +  then show ?thesis ..
    4.45 +next
    4.46 +  assume "\<not> (\<forall>x. drunk x)"
    4.47 +  then have "\<exists>x. \<not> drunk x" by (rule deMorgan)
    4.48 +  then obtain a where a: "\<not> drunk a" ..
    4.49 +  have "drunk a \<longrightarrow> (\<forall>x. drunk x)"
    4.50 +  proof
    4.51 +    assume "drunk a"
    4.52 +    with a show "\<forall>x. drunk x" by (contradiction)
    4.53 +  qed
    4.54 +  then show ?thesis ..
    4.55 +qed
    4.56 +
    4.57 +end
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/src/HOL/Isar_Examples/Expr_Compiler.thy	Tue Oct 20 19:37:09 2009 +0200
     5.3 @@ -0,0 +1,231 @@
     5.4 +(*  Title:      HOL/Isar_Examples/Expr_Compiler.thy
     5.5 +    Author:     Markus Wenzel, TU Muenchen
     5.6 +
     5.7 +Correctness of a simple expression/stack-machine compiler.
     5.8 +*)
     5.9 +
    5.10 +header {* Correctness of a simple expression compiler *}
    5.11 +
    5.12 +theory Expr_Compiler
    5.13 +imports Main
    5.14 +begin
    5.15 +
    5.16 +text {*
    5.17 + This is a (rather trivial) example of program verification.  We model
    5.18 + a compiler for translating expressions to stack machine instructions,
    5.19 + and prove its correctness wrt.\ some evaluation semantics.
    5.20 +*}
    5.21 +
    5.22 +
    5.23 +subsection {* Binary operations *}
    5.24 +
    5.25 +text {*
    5.26 + Binary operations are just functions over some type of values.  This
    5.27 + is both for abstract syntax and semantics, i.e.\ we use a ``shallow
    5.28 + embedding'' here.
    5.29 +*}
    5.30 +
    5.31 +types
    5.32 +  'val binop = "'val => 'val => 'val"
    5.33 +
    5.34 +
    5.35 +subsection {* Expressions *}
    5.36 +
    5.37 +text {*
    5.38 + The language of expressions is defined as an inductive type,
    5.39 + consisting of variables, constants, and binary operations on
    5.40 + expressions.
    5.41 +*}
    5.42 +
    5.43 +datatype ('adr, 'val) expr =
    5.44 +  Variable 'adr |
    5.45 +  Constant 'val |
    5.46 +  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
    5.47 +
    5.48 +text {*
    5.49 + Evaluation (wrt.\ some environment of variable assignments) is
    5.50 + defined by primitive recursion over the structure of expressions.
    5.51 +*}
    5.52 +
    5.53 +consts
    5.54 +  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
    5.55 +
    5.56 +primrec
    5.57 +  "eval (Variable x) env = env x"
    5.58 +  "eval (Constant c) env = c"
    5.59 +  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
    5.60 +
    5.61 +
    5.62 +subsection {* Machine *}
    5.63 +
    5.64 +text {*
    5.65 + Next we model a simple stack machine, with three instructions.
    5.66 +*}
    5.67 +
    5.68 +datatype ('adr, 'val) instr =
    5.69 +  Const 'val |
    5.70 +  Load 'adr |
    5.71 +  Apply "'val binop"
    5.72 +
    5.73 +text {*
    5.74 + Execution of a list of stack machine instructions is easily defined
    5.75 + as follows.
    5.76 +*}
    5.77 +
    5.78 +consts
    5.79 +  exec :: "(('adr, 'val) instr) list
    5.80 +    => 'val list => ('adr => 'val) => 'val list"
    5.81 +
    5.82 +primrec
    5.83 +  "exec [] stack env = stack"
    5.84 +  "exec (instr # instrs) stack env =
    5.85 +    (case instr of
    5.86 +      Const c => exec instrs (c # stack) env
    5.87 +    | Load x => exec instrs (env x # stack) env
    5.88 +    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
    5.89 +                   # (tl (tl stack))) env)"
    5.90 +
    5.91 +constdefs
    5.92 +  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
    5.93 +  "execute instrs env == hd (exec instrs [] env)"
    5.94 +
    5.95 +
    5.96 +subsection {* Compiler *}
    5.97 +
    5.98 +text {*
    5.99 + We are ready to define the compilation function of expressions to
   5.100 + lists of stack machine instructions.
   5.101 +*}
   5.102 +
   5.103 +consts
   5.104 +  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
   5.105 +
   5.106 +primrec
   5.107 +  "compile (Variable x) = [Load x]"
   5.108 +  "compile (Constant c) = [Const c]"
   5.109 +  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
   5.110 +
   5.111 +
   5.112 +text {*
   5.113 + The main result of this development is the correctness theorem for
   5.114 + $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
   5.115 + list append.
   5.116 +*}
   5.117 +
   5.118 +lemma exec_append:
   5.119 +  "exec (xs @ ys) stack env =
   5.120 +    exec ys (exec xs stack env) env"
   5.121 +proof (induct xs arbitrary: stack)
   5.122 +  case Nil
   5.123 +  show ?case by simp
   5.124 +next
   5.125 +  case (Cons x xs)
   5.126 +  show ?case
   5.127 +  proof (induct x)
   5.128 +    case Const
   5.129 +    from Cons show ?case by simp
   5.130 +  next
   5.131 +    case Load
   5.132 +    from Cons show ?case by simp
   5.133 +  next
   5.134 +    case Apply
   5.135 +    from Cons show ?case by simp
   5.136 +  qed
   5.137 +qed
   5.138 +
   5.139 +theorem correctness: "execute (compile e) env = eval e env"
   5.140 +proof -
   5.141 +  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
   5.142 +  proof (induct e)
   5.143 +    case Variable show ?case by simp
   5.144 +  next
   5.145 +    case Constant show ?case by simp
   5.146 +  next
   5.147 +    case Binop then show ?case by (simp add: exec_append)
   5.148 +  qed
   5.149 +  then show ?thesis by (simp add: execute_def)
   5.150 +qed
   5.151 +
   5.152 +
   5.153 +text {*
   5.154 + \bigskip In the proofs above, the \name{simp} method does quite a lot
   5.155 + of work behind the scenes (mostly ``functional program execution'').
   5.156 + Subsequently, the same reasoning is elaborated in detail --- at most
   5.157 + one recursive function definition is used at a time.  Thus we get a
   5.158 + better idea of what is actually going on.
   5.159 +*}
   5.160 +
   5.161 +lemma exec_append':
   5.162 +  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
   5.163 +proof (induct xs arbitrary: stack)
   5.164 +  case (Nil s)
   5.165 +  have "exec ([] @ ys) s env = exec ys s env" by simp
   5.166 +  also have "... = exec ys (exec [] s env) env" by simp
   5.167 +  finally show ?case .
   5.168 +next
   5.169 +  case (Cons x xs s)
   5.170 +  show ?case
   5.171 +  proof (induct x)
   5.172 +    case (Const val)
   5.173 +    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
   5.174 +      by simp
   5.175 +    also have "... = exec (xs @ ys) (val # s) env" by simp
   5.176 +    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
   5.177 +    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
   5.178 +    finally show ?case .
   5.179 +  next
   5.180 +    case (Load adr)
   5.181 +    from Cons show ?case by simp -- {* same as above *}
   5.182 +  next
   5.183 +    case (Apply fn)
   5.184 +    have "exec ((Apply fn # xs) @ ys) s env =
   5.185 +        exec (Apply fn # xs @ ys) s env" by simp
   5.186 +    also have "... =
   5.187 +        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
   5.188 +    also from Cons have "... =
   5.189 +        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
   5.190 +    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
   5.191 +    finally show ?case .
   5.192 +  qed
   5.193 +qed
   5.194 +
   5.195 +theorem correctness': "execute (compile e) env = eval e env"
   5.196 +proof -
   5.197 +  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
   5.198 +  proof (induct e)
   5.199 +    case (Variable adr s)
   5.200 +    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
   5.201 +      by simp
   5.202 +    also have "... = env adr # s" by simp
   5.203 +    also have "env adr = eval (Variable adr) env" by simp
   5.204 +    finally show ?case .
   5.205 +  next
   5.206 +    case (Constant val s)
   5.207 +    show ?case by simp -- {* same as above *}
   5.208 +  next
   5.209 +    case (Binop fn e1 e2 s)
   5.210 +    have "exec (compile (Binop fn e1 e2)) s env =
   5.211 +        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
   5.212 +    also have "... = exec [Apply fn]
   5.213 +        (exec (compile e1) (exec (compile e2) s env) env) env"
   5.214 +      by (simp only: exec_append)
   5.215 +    also have "exec (compile e2) s env = eval e2 env # s" by fact
   5.216 +    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
   5.217 +    also have "exec [Apply fn] ... env =
   5.218 +        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
   5.219 +    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
   5.220 +    also have "fn (eval e1 env) (eval e2 env) =
   5.221 +        eval (Binop fn e1 e2) env"
   5.222 +      by simp
   5.223 +    finally show ?case .
   5.224 +  qed
   5.225 +
   5.226 +  have "execute (compile e) env = hd (exec (compile e) [] env)"
   5.227 +    by (simp add: execute_def)
   5.228 +  also from exec_compile
   5.229 +    have "exec (compile e) [] env = [eval e env]" .
   5.230 +  also have "hd ... = eval e env" by simp
   5.231 +  finally show ?thesis .
   5.232 +qed
   5.233 +
   5.234 +end
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/src/HOL/Isar_Examples/Fibonacci.thy	Tue Oct 20 19:37:09 2009 +0200
     6.3 @@ -0,0 +1,165 @@
     6.4 +(*  Title:      HOL/Isar_Examples/Fibonacci.thy
     6.5 +    Author:     Gertrud Bauer
     6.6 +    Copyright   1999 Technische Universitaet Muenchen
     6.7 +
     6.8 +The Fibonacci function.  Demonstrates the use of recdef.  Original
     6.9 +tactic script by Lawrence C Paulson.
    6.10 +
    6.11 +Fibonacci numbers: proofs of laws taken from
    6.12 +
    6.13 +  R. L. Graham, D. E. Knuth, O. Patashnik.
    6.14 +  Concrete Mathematics.
    6.15 +  (Addison-Wesley, 1989)
    6.16 +*)
    6.17 +
    6.18 +header {* Fib and Gcd commute *}
    6.19 +
    6.20 +theory Fibonacci
    6.21 +imports Primes
    6.22 +begin
    6.23 +
    6.24 +text_raw {*
    6.25 + \footnote{Isar version by Gertrud Bauer.  Original tactic script by
    6.26 + Larry Paulson.  A few proofs of laws taken from
    6.27 + \cite{Concrete-Math}.}
    6.28 +*}
    6.29 +
    6.30 +
    6.31 +subsection {* Fibonacci numbers *}
    6.32 +
    6.33 +fun fib :: "nat \<Rightarrow> nat" where
    6.34 +  "fib 0 = 0"
    6.35 +  | "fib (Suc 0) = 1"
    6.36 +  | "fib (Suc (Suc x)) = fib x + fib (Suc x)"
    6.37 +
    6.38 +lemma [simp]: "0 < fib (Suc n)"
    6.39 +  by (induct n rule: fib.induct) simp_all
    6.40 +
    6.41 +
    6.42 +text {* Alternative induction rule. *}
    6.43 +
    6.44 +theorem fib_induct:
    6.45 +    "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P (n::nat)"
    6.46 +  by (induct rule: fib.induct) simp_all
    6.47 +
    6.48 +
    6.49 +subsection {* Fib and gcd commute *}
    6.50 +
    6.51 +text {* A few laws taken from \cite{Concrete-Math}. *}
    6.52 +
    6.53 +lemma fib_add:
    6.54 +  "fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n"
    6.55 +  (is "?P n")
    6.56 +  -- {* see \cite[page 280]{Concrete-Math} *}
    6.57 +proof (induct n rule: fib_induct)
    6.58 +  show "?P 0" by simp
    6.59 +  show "?P 1" by simp
    6.60 +  fix n
    6.61 +  have "fib (n + 2 + k + 1)
    6.62 +    = fib (n + k + 1) + fib (n + 1 + k + 1)" by simp
    6.63 +  also assume "fib (n + k + 1)
    6.64 +    = fib (k + 1) * fib (n + 1) + fib k * fib n"
    6.65 +      (is " _ = ?R1")
    6.66 +  also assume "fib (n + 1 + k + 1)
    6.67 +    = fib (k + 1) * fib (n + 1 + 1) + fib k * fib (n + 1)"
    6.68 +      (is " _ = ?R2")
    6.69 +  also have "?R1 + ?R2
    6.70 +    = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)"
    6.71 +    by (simp add: add_mult_distrib2)
    6.72 +  finally show "?P (n + 2)" .
    6.73 +qed
    6.74 +
    6.75 +lemma gcd_fib_Suc_eq_1: "gcd (fib n) (fib (n + 1)) = 1" (is "?P n")
    6.76 +proof (induct n rule: fib_induct)
    6.77 +  show "?P 0" by simp
    6.78 +  show "?P 1" by simp
    6.79 +  fix n
    6.80 +  have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)"
    6.81 +    by simp
    6.82 +  also have "gcd (fib (n + 2)) ... = gcd (fib (n + 2)) (fib (n + 1))"
    6.83 +    by (simp only: gcd_add2')
    6.84 +  also have "... = gcd (fib (n + 1)) (fib (n + 1 + 1))"
    6.85 +    by (simp add: gcd_commute)
    6.86 +  also assume "... = 1"
    6.87 +  finally show "?P (n + 2)" .
    6.88 +qed
    6.89 +
    6.90 +lemma gcd_mult_add: "0 < n ==> gcd (n * k + m) n = gcd m n"
    6.91 +proof -
    6.92 +  assume "0 < n"
    6.93 +  then have "gcd (n * k + m) n = gcd n (m mod n)"
    6.94 +    by (simp add: gcd_non_0 add_commute)
    6.95 +  also from `0 < n` have "... = gcd m n" by (simp add: gcd_non_0)
    6.96 +  finally show ?thesis .
    6.97 +qed
    6.98 +
    6.99 +lemma gcd_fib_add: "gcd (fib m) (fib (n + m)) = gcd (fib m) (fib n)"
   6.100 +proof (cases m)
   6.101 +  case 0
   6.102 +  then show ?thesis by simp
   6.103 +next
   6.104 +  case (Suc k)
   6.105 +  then have "gcd (fib m) (fib (n + m)) = gcd (fib (n + k + 1)) (fib (k + 1))"
   6.106 +    by (simp add: gcd_commute)
   6.107 +  also have "fib (n + k + 1)
   6.108 +    = fib (k + 1) * fib (n + 1) + fib k * fib n"
   6.109 +    by (rule fib_add)
   6.110 +  also have "gcd ... (fib (k + 1)) = gcd (fib k * fib n) (fib (k + 1))"
   6.111 +    by (simp add: gcd_mult_add)
   6.112 +  also have "... = gcd (fib n) (fib (k + 1))"
   6.113 +    by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel)
   6.114 +  also have "... = gcd (fib m) (fib n)"
   6.115 +    using Suc by (simp add: gcd_commute)
   6.116 +  finally show ?thesis .
   6.117 +qed
   6.118 +
   6.119 +lemma gcd_fib_diff:
   6.120 +  assumes "m <= n"
   6.121 +  shows "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib n)"
   6.122 +proof -
   6.123 +  have "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib (n - m + m))"
   6.124 +    by (simp add: gcd_fib_add)
   6.125 +  also from `m <= n` have "n - m + m = n" by simp
   6.126 +  finally show ?thesis .
   6.127 +qed
   6.128 +
   6.129 +lemma gcd_fib_mod:
   6.130 +  assumes "0 < m"
   6.131 +  shows "gcd (fib m) (fib (n mod m)) = gcd (fib m) (fib n)"
   6.132 +proof (induct n rule: nat_less_induct)
   6.133 +  case (1 n) note hyp = this
   6.134 +  show ?case
   6.135 +  proof -
   6.136 +    have "n mod m = (if n < m then n else (n - m) mod m)"
   6.137 +      by (rule mod_if)
   6.138 +    also have "gcd (fib m) (fib ...) = gcd (fib m) (fib n)"
   6.139 +    proof (cases "n < m")
   6.140 +      case True then show ?thesis by simp
   6.141 +    next
   6.142 +      case False then have "m <= n" by simp
   6.143 +      from `0 < m` and False have "n - m < n" by simp
   6.144 +      with hyp have "gcd (fib m) (fib ((n - m) mod m))
   6.145 +        = gcd (fib m) (fib (n - m))" by simp
   6.146 +      also have "... = gcd (fib m) (fib n)"
   6.147 +        using `m <= n` by (rule gcd_fib_diff)
   6.148 +      finally have "gcd (fib m) (fib ((n - m) mod m)) =
   6.149 +        gcd (fib m) (fib n)" .
   6.150 +      with False show ?thesis by simp
   6.151 +    qed
   6.152 +    finally show ?thesis .
   6.153 +  qed
   6.154 +qed
   6.155 +
   6.156 +
   6.157 +theorem fib_gcd: "fib (gcd m n) = gcd (fib m) (fib n)" (is "?P m n")
   6.158 +proof (induct m n rule: gcd_induct)
   6.159 +  fix m show "fib (gcd m 0) = gcd (fib m) (fib 0)" by simp
   6.160 +  fix n :: nat assume n: "0 < n"
   6.161 +  then have "gcd m n = gcd n (m mod n)" by (rule gcd_non_0)
   6.162 +  also assume hyp: "fib ... = gcd (fib n) (fib (m mod n))"
   6.163 +  also from n have "... = gcd (fib n) (fib m)" by (rule gcd_fib_mod)
   6.164 +  also have "... = gcd (fib m) (fib n)" by (rule gcd_commute)
   6.165 +  finally show "fib (gcd m n) = gcd (fib m) (fib n)" .
   6.166 +qed
   6.167 +
   6.168 +end
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/src/HOL/Isar_Examples/Group.thy	Tue Oct 20 19:37:09 2009 +0200
     7.3 @@ -0,0 +1,267 @@
     7.4 +(*  Title:      HOL/Isar_Examples/Group.thy
     7.5 +    Author:     Markus Wenzel, TU Muenchen
     7.6 +*)
     7.7 +
     7.8 +header {* Basic group theory *}
     7.9 +
    7.10 +theory Group
    7.11 +imports Main
    7.12 +begin
    7.13 +
    7.14 +subsection {* Groups and calculational reasoning *} 
    7.15 +
    7.16 +text {*
    7.17 + Groups over signature $({\times} :: \alpha \To \alpha \To \alpha,
    7.18 + \idt{one} :: \alpha, \idt{inverse} :: \alpha \To \alpha)$ are defined
    7.19 + as an axiomatic type class as follows.  Note that the parent class
    7.20 + $\idt{times}$ is provided by the basic HOL theory.
    7.21 +*}
    7.22 +
    7.23 +consts
    7.24 +  one :: "'a"
    7.25 +  inverse :: "'a => 'a"
    7.26 +
    7.27 +axclass
    7.28 +  group < times
    7.29 +  group_assoc:         "(x * y) * z = x * (y * z)"
    7.30 +  group_left_one:      "one * x = x"
    7.31 +  group_left_inverse:  "inverse x * x = one"
    7.32 +
    7.33 +text {*
    7.34 + The group axioms only state the properties of left one and inverse,
    7.35 + the right versions may be derived as follows.
    7.36 +*}
    7.37 +
    7.38 +theorem group_right_inverse: "x * inverse x = (one::'a::group)"
    7.39 +proof -
    7.40 +  have "x * inverse x = one * (x * inverse x)"
    7.41 +    by (simp only: group_left_one)
    7.42 +  also have "... = one * x * inverse x"
    7.43 +    by (simp only: group_assoc)
    7.44 +  also have "... = inverse (inverse x) * inverse x * x * inverse x"
    7.45 +    by (simp only: group_left_inverse)
    7.46 +  also have "... = inverse (inverse x) * (inverse x * x) * inverse x"
    7.47 +    by (simp only: group_assoc)
    7.48 +  also have "... = inverse (inverse x) * one * inverse x"
    7.49 +    by (simp only: group_left_inverse)
    7.50 +  also have "... = inverse (inverse x) * (one * inverse x)"
    7.51 +    by (simp only: group_assoc)
    7.52 +  also have "... = inverse (inverse x) * inverse x"
    7.53 +    by (simp only: group_left_one)
    7.54 +  also have "... = one"
    7.55 +    by (simp only: group_left_inverse)
    7.56 +  finally show ?thesis .
    7.57 +qed
    7.58 +
    7.59 +text {*
    7.60 + With \name{group-right-inverse} already available,
    7.61 + \name{group-right-one}\label{thm:group-right-one} is now established
    7.62 + much easier.
    7.63 +*}
    7.64 +
    7.65 +theorem group_right_one: "x * one = (x::'a::group)"
    7.66 +proof -
    7.67 +  have "x * one = x * (inverse x * x)"
    7.68 +    by (simp only: group_left_inverse)
    7.69 +  also have "... = x * inverse x * x"
    7.70 +    by (simp only: group_assoc)
    7.71 +  also have "... = one * x"
    7.72 +    by (simp only: group_right_inverse)
    7.73 +  also have "... = x"
    7.74 +    by (simp only: group_left_one)
    7.75 +  finally show ?thesis .
    7.76 +qed
    7.77 +
    7.78 +text {*
    7.79 + \medskip The calculational proof style above follows typical
    7.80 + presentations given in any introductory course on algebra.  The basic
    7.81 + technique is to form a transitive chain of equations, which in turn
    7.82 + are established by simplifying with appropriate rules.  The low-level
    7.83 + logical details of equational reasoning are left implicit.
    7.84 +
    7.85 + Note that ``$\dots$'' is just a special term variable that is bound
    7.86 + automatically to the argument\footnote{The argument of a curried
    7.87 + infix expression happens to be its right-hand side.} of the last fact
    7.88 + achieved by any local assumption or proven statement.  In contrast to
    7.89 + $\var{thesis}$, the ``$\dots$'' variable is bound \emph{after} the
    7.90 + proof is finished, though.
    7.91 +
    7.92 + There are only two separate Isar language elements for calculational
    7.93 + proofs: ``\isakeyword{also}'' for initial or intermediate
    7.94 + calculational steps, and ``\isakeyword{finally}'' for exhibiting the
    7.95 + result of a calculation.  These constructs are not hardwired into
    7.96 + Isabelle/Isar, but defined on top of the basic Isar/VM interpreter.
    7.97 + Expanding the \isakeyword{also} and \isakeyword{finally} derived
    7.98 + language elements, calculations may be simulated by hand as
    7.99 + demonstrated below.
   7.100 +*}
   7.101 +
   7.102 +theorem "x * one = (x::'a::group)"
   7.103 +proof -
   7.104 +  have "x * one = x * (inverse x * x)"
   7.105 +    by (simp only: group_left_inverse)
   7.106 +
   7.107 +  note calculation = this
   7.108 +    -- {* first calculational step: init calculation register *}
   7.109 +
   7.110 +  have "... = x * inverse x * x"
   7.111 +    by (simp only: group_assoc)
   7.112 +
   7.113 +  note calculation = trans [OF calculation this]
   7.114 +    -- {* general calculational step: compose with transitivity rule *}
   7.115 +
   7.116 +  have "... = one * x"
   7.117 +    by (simp only: group_right_inverse)
   7.118 +
   7.119 +  note calculation = trans [OF calculation this]
   7.120 +    -- {* general calculational step: compose with transitivity rule *}
   7.121 +
   7.122 +  have "... = x"
   7.123 +    by (simp only: group_left_one)
   7.124 +
   7.125 +  note calculation = trans [OF calculation this]
   7.126 +    -- {* final calculational step: compose with transitivity rule ... *}
   7.127 +  from calculation
   7.128 +    -- {* ... and pick up the final result *}
   7.129 +
   7.130 +  show ?thesis .
   7.131 +qed
   7.132 +
   7.133 +text {*
   7.134 + Note that this scheme of calculations is not restricted to plain
   7.135 + transitivity.  Rules like anti-symmetry, or even forward and backward
   7.136 + substitution work as well.  For the actual implementation of
   7.137 + \isacommand{also} and \isacommand{finally}, Isabelle/Isar maintains
   7.138 + separate context information of ``transitivity'' rules.  Rule
   7.139 + selection takes place automatically by higher-order unification.
   7.140 +*}
   7.141 +
   7.142 +
   7.143 +subsection {* Groups as monoids *}
   7.144 +
   7.145 +text {*
   7.146 + Monoids over signature $({\times} :: \alpha \To \alpha \To \alpha,
   7.147 + \idt{one} :: \alpha)$ are defined like this.
   7.148 +*}
   7.149 +
   7.150 +axclass monoid < times
   7.151 +  monoid_assoc:       "(x * y) * z = x * (y * z)"
   7.152 +  monoid_left_one:   "one * x = x"
   7.153 +  monoid_right_one:  "x * one = x"
   7.154 +
   7.155 +text {*
   7.156 + Groups are \emph{not} yet monoids directly from the definition.  For
   7.157 + monoids, \name{right-one} had to be included as an axiom, but for
   7.158 + groups both \name{right-one} and \name{right-inverse} are derivable
   7.159 + from the other axioms.  With \name{group-right-one} derived as a
   7.160 + theorem of group theory (see page~\pageref{thm:group-right-one}), we
   7.161 + may still instantiate $\idt{group} \subseteq \idt{monoid}$ properly
   7.162 + as follows.
   7.163 +*}
   7.164 +
   7.165 +instance group < monoid
   7.166 +  by (intro_classes,
   7.167 +       rule group_assoc,
   7.168 +       rule group_left_one,
   7.169 +       rule group_right_one)
   7.170 +
   7.171 +text {*
   7.172 + The \isacommand{instance} command actually is a version of
   7.173 + \isacommand{theorem}, setting up a goal that reflects the intended
   7.174 + class relation (or type constructor arity).  Thus any Isar proof
   7.175 + language element may be involved to establish this statement.  When
   7.176 + concluding the proof, the result is transformed into the intended
   7.177 + type signature extension behind the scenes.
   7.178 +*}
   7.179 +
   7.180 +subsection {* More theorems of group theory *}
   7.181 +
   7.182 +text {*
   7.183 + The one element is already uniquely determined by preserving an
   7.184 + \emph{arbitrary} group element.
   7.185 +*}
   7.186 +
   7.187 +theorem group_one_equality: "e * x = x ==> one = (e::'a::group)"
   7.188 +proof -
   7.189 +  assume eq: "e * x = x"
   7.190 +  have "one = x * inverse x"
   7.191 +    by (simp only: group_right_inverse)
   7.192 +  also have "... = (e * x) * inverse x"
   7.193 +    by (simp only: eq)
   7.194 +  also have "... = e * (x * inverse x)"
   7.195 +    by (simp only: group_assoc)
   7.196 +  also have "... = e * one"
   7.197 +    by (simp only: group_right_inverse)
   7.198 +  also have "... = e"
   7.199 +    by (simp only: group_right_one)
   7.200 +  finally show ?thesis .
   7.201 +qed
   7.202 +
   7.203 +text {*
   7.204 + Likewise, the inverse is already determined by the cancel property.
   7.205 +*}
   7.206 +
   7.207 +theorem group_inverse_equality:
   7.208 +  "x' * x = one ==> inverse x = (x'::'a::group)"
   7.209 +proof -
   7.210 +  assume eq: "x' * x = one"
   7.211 +  have "inverse x = one * inverse x"
   7.212 +    by (simp only: group_left_one)
   7.213 +  also have "... = (x' * x) * inverse x"
   7.214 +    by (simp only: eq)
   7.215 +  also have "... = x' * (x * inverse x)"
   7.216 +    by (simp only: group_assoc)
   7.217 +  also have "... = x' * one"
   7.218 +    by (simp only: group_right_inverse)
   7.219 +  also have "... = x'"
   7.220 +    by (simp only: group_right_one)
   7.221 +  finally show ?thesis .
   7.222 +qed
   7.223 +
   7.224 +text {*
   7.225 + The inverse operation has some further characteristic properties.
   7.226 +*}
   7.227 +
   7.228 +theorem group_inverse_times:
   7.229 +  "inverse (x * y) = inverse y * inverse (x::'a::group)"
   7.230 +proof (rule group_inverse_equality)
   7.231 +  show "(inverse y * inverse x) * (x * y) = one"
   7.232 +  proof -
   7.233 +    have "(inverse y * inverse x) * (x * y) =
   7.234 +        (inverse y * (inverse x * x)) * y"
   7.235 +      by (simp only: group_assoc)
   7.236 +    also have "... = (inverse y * one) * y"
   7.237 +      by (simp only: group_left_inverse)
   7.238 +    also have "... = inverse y * y"
   7.239 +      by (simp only: group_right_one)
   7.240 +    also have "... = one"
   7.241 +      by (simp only: group_left_inverse)
   7.242 +    finally show ?thesis .
   7.243 +  qed
   7.244 +qed
   7.245 +
   7.246 +theorem inverse_inverse: "inverse (inverse x) = (x::'a::group)"
   7.247 +proof (rule group_inverse_equality)
   7.248 +  show "x * inverse x = one"
   7.249 +    by (simp only: group_right_inverse)
   7.250 +qed
   7.251 +
   7.252 +theorem inverse_inject: "inverse x = inverse y ==> x = (y::'a::group)"
   7.253 +proof -
   7.254 +  assume eq: "inverse x = inverse y"
   7.255 +  have "x = x * one"
   7.256 +    by (simp only: group_right_one)
   7.257 +  also have "... = x * (inverse y * y)"
   7.258 +    by (simp only: group_left_inverse)
   7.259 +  also have "... = x * (inverse x * y)"
   7.260 +    by (simp only: eq)
   7.261 +  also have "... = (x * inverse x) * y"
   7.262 +    by (simp only: group_assoc)
   7.263 +  also have "... = one * y"
   7.264 +    by (simp only: group_right_inverse)
   7.265 +  also have "... = y"
   7.266 +    by (simp only: group_left_one)
   7.267 +  finally show ?thesis .
   7.268 +qed
   7.269 +
   7.270 +end
   7.271 \ No newline at end of file
     8.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.2 +++ b/src/HOL/Isar_Examples/Hoare.thy	Tue Oct 20 19:37:09 2009 +0200
     8.3 @@ -0,0 +1,463 @@
     8.4 +(*  Title:      HOL/Isar_Examples/Hoare.thy
     8.5 +    Author:     Markus Wenzel, TU Muenchen
     8.6 +
     8.7 +A formulation of Hoare logic suitable for Isar.
     8.8 +*)
     8.9 +
    8.10 +header {* Hoare Logic *}
    8.11 +
    8.12 +theory Hoare
    8.13 +imports Main
    8.14 +uses ("~~/src/HOL/Hoare/hoare_tac.ML")
    8.15 +begin
    8.16 +
    8.17 +subsection {* Abstract syntax and semantics *}
    8.18 +
    8.19 +text {*
    8.20 + The following abstract syntax and semantics of Hoare Logic over
    8.21 + \texttt{WHILE} programs closely follows the existing tradition in
    8.22 + Isabelle/HOL of formalizing the presentation given in
    8.23 + \cite[\S6]{Winskel:1993}.  See also
    8.24 + \url{http://isabelle.in.tum.de/library/Hoare/} and
    8.25 + \cite{Nipkow:1998:Winskel}.
    8.26 +*}
    8.27 +
    8.28 +types
    8.29 +  'a bexp = "'a set"
    8.30 +  'a assn = "'a set"
    8.31 +
    8.32 +datatype 'a com =
    8.33 +    Basic "'a => 'a"
    8.34 +  | Seq "'a com" "'a com"    ("(_;/ _)" [60, 61] 60)
    8.35 +  | Cond "'a bexp" "'a com" "'a com"
    8.36 +  | While "'a bexp" "'a assn" "'a com"
    8.37 +
    8.38 +abbreviation
    8.39 +  Skip  ("SKIP") where
    8.40 +  "SKIP == Basic id"
    8.41 +
    8.42 +types
    8.43 +  'a sem = "'a => 'a => bool"
    8.44 +
    8.45 +consts
    8.46 +  iter :: "nat => 'a bexp => 'a sem => 'a sem"
    8.47 +primrec
    8.48 +  "iter 0 b S s s' = (s ~: b & s = s')"
    8.49 +  "iter (Suc n) b S s s' =
    8.50 +    (s : b & (EX s''. S s s'' & iter n b S s'' s'))"
    8.51 +
    8.52 +consts
    8.53 +  Sem :: "'a com => 'a sem"
    8.54 +primrec
    8.55 +  "Sem (Basic f) s s' = (s' = f s)"
    8.56 +  "Sem (c1; c2) s s' = (EX s''. Sem c1 s s'' & Sem c2 s'' s')"
    8.57 +  "Sem (Cond b c1 c2) s s' =
    8.58 +    (if s : b then Sem c1 s s' else Sem c2 s s')"
    8.59 +  "Sem (While b x c) s s' = (EX n. iter n b (Sem c) s s')"
    8.60 +
    8.61 +constdefs
    8.62 +  Valid :: "'a bexp => 'a com => 'a bexp => bool"
    8.63 +    ("(3|- _/ (2_)/ _)" [100, 55, 100] 50)
    8.64 +  "|- P c Q == ALL s s'. Sem c s s' --> s : P --> s' : Q"
    8.65 +
    8.66 +syntax (xsymbols)
    8.67 +  Valid :: "'a bexp => 'a com => 'a bexp => bool"
    8.68 +    ("(3\<turnstile> _/ (2_)/ _)" [100, 55, 100] 50)
    8.69 +
    8.70 +lemma ValidI [intro?]:
    8.71 +    "(!!s s'. Sem c s s' ==> s : P ==> s' : Q) ==> |- P c Q"
    8.72 +  by (simp add: Valid_def)
    8.73 +
    8.74 +lemma ValidD [dest?]:
    8.75 +    "|- P c Q ==> Sem c s s' ==> s : P ==> s' : Q"
    8.76 +  by (simp add: Valid_def)
    8.77 +
    8.78 +
    8.79 +subsection {* Primitive Hoare rules *}
    8.80 +
    8.81 +text {*
    8.82 + From the semantics defined above, we derive the standard set of
    8.83 + primitive Hoare rules; e.g.\ see \cite[\S6]{Winskel:1993}.  Usually,
    8.84 + variant forms of these rules are applied in actual proof, see also
    8.85 + \S\ref{sec:hoare-isar} and \S\ref{sec:hoare-vcg}.
    8.86 +
    8.87 + \medskip The \name{basic} rule represents any kind of atomic access
    8.88 + to the state space.  This subsumes the common rules of \name{skip}
    8.89 + and \name{assign}, as formulated in \S\ref{sec:hoare-isar}.
    8.90 +*}
    8.91 +
    8.92 +theorem basic: "|- {s. f s : P} (Basic f) P"
    8.93 +proof
    8.94 +  fix s s' assume s: "s : {s. f s : P}"
    8.95 +  assume "Sem (Basic f) s s'"
    8.96 +  hence "s' = f s" by simp
    8.97 +  with s show "s' : P" by simp
    8.98 +qed
    8.99 +
   8.100 +text {*
   8.101 + The rules for sequential commands and semantic consequences are
   8.102 + established in a straight forward manner as follows.
   8.103 +*}
   8.104 +
   8.105 +theorem seq: "|- P c1 Q ==> |- Q c2 R ==> |- P (c1; c2) R"
   8.106 +proof
   8.107 +  assume cmd1: "|- P c1 Q" and cmd2: "|- Q c2 R"
   8.108 +  fix s s' assume s: "s : P"
   8.109 +  assume "Sem (c1; c2) s s'"
   8.110 +  then obtain s'' where sem1: "Sem c1 s s''" and sem2: "Sem c2 s'' s'"
   8.111 +    by auto
   8.112 +  from cmd1 sem1 s have "s'' : Q" ..
   8.113 +  with cmd2 sem2 show "s' : R" ..
   8.114 +qed
   8.115 +
   8.116 +theorem conseq: "P' <= P ==> |- P c Q ==> Q <= Q' ==> |- P' c Q'"
   8.117 +proof
   8.118 +  assume P'P: "P' <= P" and QQ': "Q <= Q'"
   8.119 +  assume cmd: "|- P c Q"
   8.120 +  fix s s' :: 'a
   8.121 +  assume sem: "Sem c s s'"
   8.122 +  assume "s : P'" with P'P have "s : P" ..
   8.123 +  with cmd sem have "s' : Q" ..
   8.124 +  with QQ' show "s' : Q'" ..
   8.125 +qed
   8.126 +
   8.127 +text {*
   8.128 + The rule for conditional commands is directly reflected by the
   8.129 + corresponding semantics; in the proof we just have to look closely
   8.130 + which cases apply.
   8.131 +*}
   8.132 +
   8.133 +theorem cond:
   8.134 +  "|- (P Int b) c1 Q ==> |- (P Int -b) c2 Q ==> |- P (Cond b c1 c2) Q"
   8.135 +proof
   8.136 +  assume case_b: "|- (P Int b) c1 Q" and case_nb: "|- (P Int -b) c2 Q"
   8.137 +  fix s s' assume s: "s : P"
   8.138 +  assume sem: "Sem (Cond b c1 c2) s s'"
   8.139 +  show "s' : Q"
   8.140 +  proof cases
   8.141 +    assume b: "s : b"
   8.142 +    from case_b show ?thesis
   8.143 +    proof
   8.144 +      from sem b show "Sem c1 s s'" by simp
   8.145 +      from s b show "s : P Int b" by simp
   8.146 +    qed
   8.147 +  next
   8.148 +    assume nb: "s ~: b"
   8.149 +    from case_nb show ?thesis
   8.150 +    proof
   8.151 +      from sem nb show "Sem c2 s s'" by simp
   8.152 +      from s nb show "s : P Int -b" by simp
   8.153 +    qed
   8.154 +  qed
   8.155 +qed
   8.156 +
   8.157 +text {*
   8.158 + The \name{while} rule is slightly less trivial --- it is the only one
   8.159 + based on recursion, which is expressed in the semantics by a
   8.160 + Kleene-style least fixed-point construction.  The auxiliary statement
   8.161 + below, which is by induction on the number of iterations is the main
   8.162 + point to be proven; the rest is by routine application of the
   8.163 + semantics of \texttt{WHILE}.
   8.164 +*}
   8.165 +
   8.166 +theorem while:
   8.167 +  assumes body: "|- (P Int b) c P"
   8.168 +  shows "|- P (While b X c) (P Int -b)"
   8.169 +proof
   8.170 +  fix s s' assume s: "s : P"
   8.171 +  assume "Sem (While b X c) s s'"
   8.172 +  then obtain n where "iter n b (Sem c) s s'" by auto
   8.173 +  from this and s show "s' : P Int -b"
   8.174 +  proof (induct n arbitrary: s)
   8.175 +    case 0
   8.176 +    thus ?case by auto
   8.177 +  next
   8.178 +    case (Suc n)
   8.179 +    then obtain s'' where b: "s : b" and sem: "Sem c s s''"
   8.180 +      and iter: "iter n b (Sem c) s'' s'"
   8.181 +      by auto
   8.182 +    from Suc and b have "s : P Int b" by simp
   8.183 +    with body sem have "s'' : P" ..
   8.184 +    with iter show ?case by (rule Suc)
   8.185 +  qed
   8.186 +qed
   8.187 +
   8.188 +
   8.189 +subsection {* Concrete syntax for assertions *}
   8.190 +
   8.191 +text {*
   8.192 + We now introduce concrete syntax for describing commands (with
   8.193 + embedded expressions) and assertions. The basic technique is that of
   8.194 + semantic ``quote-antiquote''.  A \emph{quotation} is a syntactic
   8.195 + entity delimited by an implicit abstraction, say over the state
   8.196 + space.  An \emph{antiquotation} is a marked expression within a
   8.197 + quotation that refers the implicit argument; a typical antiquotation
   8.198 + would select (or even update) components from the state.
   8.199 +
   8.200 + We will see some examples later in the concrete rules and
   8.201 + applications.
   8.202 +*}
   8.203 +
   8.204 +text {*
   8.205 + The following specification of syntax and translations is for
   8.206 + Isabelle experts only; feel free to ignore it.
   8.207 +
   8.208 + While the first part is still a somewhat intelligible specification
   8.209 + of the concrete syntactic representation of our Hoare language, the
   8.210 + actual ``ML drivers'' is quite involved.  Just note that the we
   8.211 + re-use the basic quote/antiquote translations as already defined in
   8.212 + Isabelle/Pure (see \verb,Syntax.quote_tr, and
   8.213 + \verb,Syntax.quote_tr',).
   8.214 +*}
   8.215 +
   8.216 +syntax
   8.217 +  "_quote"       :: "'b => ('a => 'b)"       ("(.'(_').)" [0] 1000)
   8.218 +  "_antiquote"   :: "('a => 'b) => 'b"       ("\<acute>_" [1000] 1000)
   8.219 +  "_Subst"       :: "'a bexp \<Rightarrow> 'b \<Rightarrow> idt \<Rightarrow> 'a bexp"
   8.220 +        ("_[_'/\<acute>_]" [1000] 999)
   8.221 +  "_Assert"      :: "'a => 'a set"           ("(.{_}.)" [0] 1000)
   8.222 +  "_Assign"      :: "idt => 'b => 'a com"    ("(\<acute>_ :=/ _)" [70, 65] 61)
   8.223 +  "_Cond"        :: "'a bexp => 'a com => 'a com => 'a com"
   8.224 +        ("(0IF _/ THEN _/ ELSE _/ FI)" [0, 0, 0] 61)
   8.225 +  "_While_inv"   :: "'a bexp => 'a assn => 'a com => 'a com"
   8.226 +        ("(0WHILE _/ INV _ //DO _ /OD)"  [0, 0, 0] 61)
   8.227 +  "_While"       :: "'a bexp => 'a com => 'a com"
   8.228 +        ("(0WHILE _ //DO _ /OD)"  [0, 0] 61)
   8.229 +
   8.230 +syntax (xsymbols)
   8.231 +  "_Assert"      :: "'a => 'a set"            ("(\<lbrace>_\<rbrace>)" [0] 1000)
   8.232 +
   8.233 +translations
   8.234 +  ".{b}."                   => "Collect .(b)."
   8.235 +  "B [a/\<acute>x]"                => ".{\<acute>(_update_name x (\<lambda>_. a)) \<in> B}."
   8.236 +  "\<acute>x := a"                 => "Basic .(\<acute>(_update_name x (\<lambda>_. a)))."
   8.237 +  "IF b THEN c1 ELSE c2 FI" => "Cond .{b}. c1 c2"
   8.238 +  "WHILE b INV i DO c OD"   => "While .{b}. i c"
   8.239 +  "WHILE b DO c OD"         == "WHILE b INV CONST undefined DO c OD"
   8.240 +
   8.241 +parse_translation {*
   8.242 +  let
   8.243 +    fun quote_tr [t] = Syntax.quote_tr "_antiquote" t
   8.244 +      | quote_tr ts = raise TERM ("quote_tr", ts);
   8.245 +  in [("_quote", quote_tr)] end
   8.246 +*}
   8.247 +
   8.248 +text {*
   8.249 + As usual in Isabelle syntax translations, the part for printing is
   8.250 + more complicated --- we cannot express parts as macro rules as above.
   8.251 + Don't look here, unless you have to do similar things for yourself.
   8.252 +*}
   8.253 +
   8.254 +print_translation {*
   8.255 +  let
   8.256 +    fun quote_tr' f (t :: ts) =
   8.257 +          Term.list_comb (f $ Syntax.quote_tr' "_antiquote" t, ts)
   8.258 +      | quote_tr' _ _ = raise Match;
   8.259 +
   8.260 +    val assert_tr' = quote_tr' (Syntax.const "_Assert");
   8.261 +
   8.262 +    fun bexp_tr' name ((Const ("Collect", _) $ t) :: ts) =
   8.263 +          quote_tr' (Syntax.const name) (t :: ts)
   8.264 +      | bexp_tr' _ _ = raise Match;
   8.265 +
   8.266 +    fun upd_tr' (x_upd, T) =
   8.267 +      (case try (unsuffix Record.updateN) x_upd of
   8.268 +        SOME x => (x, if T = dummyT then T else Term.domain_type T)
   8.269 +      | NONE => raise Match);
   8.270 +
   8.271 +    fun update_name_tr' (Free x) = Free (upd_tr' x)
   8.272 +      | update_name_tr' ((c as Const ("_free", _)) $ Free x) =
   8.273 +          c $ Free (upd_tr' x)
   8.274 +      | update_name_tr' (Const x) = Const (upd_tr' x)
   8.275 +      | update_name_tr' _ = raise Match;
   8.276 +
   8.277 +    fun K_tr' (Abs (_,_,t)) = if null (loose_bnos t) then t else raise Match
   8.278 +      | K_tr' (Abs (_,_,Abs (_,_,t)$Bound 0)) = if null (loose_bnos t) then t else raise Match
   8.279 +      | K_tr' _ = raise Match;
   8.280 +
   8.281 +    fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) =
   8.282 +          quote_tr' (Syntax.const "_Assign" $ update_name_tr' f)
   8.283 +            (Abs (x, dummyT, K_tr' k) :: ts)
   8.284 +      | assign_tr' _ = raise Match;
   8.285 +  in
   8.286 +    [("Collect", assert_tr'), ("Basic", assign_tr'),
   8.287 +      ("Cond", bexp_tr' "_Cond"), ("While", bexp_tr' "_While_inv")]
   8.288 +  end
   8.289 +*}
   8.290 +
   8.291 +
   8.292 +subsection {* Rules for single-step proof \label{sec:hoare-isar} *}
   8.293 +
   8.294 +text {*
   8.295 + We are now ready to introduce a set of Hoare rules to be used in
   8.296 + single-step structured proofs in Isabelle/Isar.  We refer to the
   8.297 + concrete syntax introduce above.
   8.298 +
   8.299 + \medskip Assertions of Hoare Logic may be manipulated in
   8.300 + calculational proofs, with the inclusion expressed in terms of sets
   8.301 + or predicates.  Reversed order is supported as well.
   8.302 +*}
   8.303 +
   8.304 +lemma [trans]: "|- P c Q ==> P' <= P ==> |- P' c Q"
   8.305 +  by (unfold Valid_def) blast
   8.306 +lemma [trans] : "P' <= P ==> |- P c Q ==> |- P' c Q"
   8.307 +  by (unfold Valid_def) blast
   8.308 +
   8.309 +lemma [trans]: "Q <= Q' ==> |- P c Q ==> |- P c Q'"
   8.310 +  by (unfold Valid_def) blast
   8.311 +lemma [trans]: "|- P c Q ==> Q <= Q' ==> |- P c Q'"
   8.312 +  by (unfold Valid_def) blast
   8.313 +
   8.314 +lemma [trans]:
   8.315 +    "|- .{\<acute>P}. c Q ==> (!!s. P' s --> P s) ==> |- .{\<acute>P'}. c Q"
   8.316 +  by (simp add: Valid_def)
   8.317 +lemma [trans]:
   8.318 +    "(!!s. P' s --> P s) ==> |- .{\<acute>P}. c Q ==> |- .{\<acute>P'}. c Q"
   8.319 +  by (simp add: Valid_def)
   8.320 +
   8.321 +lemma [trans]:
   8.322 +    "|- P c .{\<acute>Q}. ==> (!!s. Q s --> Q' s) ==> |- P c .{\<acute>Q'}."
   8.323 +  by (simp add: Valid_def)
   8.324 +lemma [trans]:
   8.325 +    "(!!s. Q s --> Q' s) ==> |- P c .{\<acute>Q}. ==> |- P c .{\<acute>Q'}."
   8.326 +  by (simp add: Valid_def)
   8.327 +
   8.328 +
   8.329 +text {*
   8.330 + Identity and basic assignments.\footnote{The $\idt{hoare}$ method
   8.331 + introduced in \S\ref{sec:hoare-vcg} is able to provide proper
   8.332 + instances for any number of basic assignments, without producing
   8.333 + additional verification conditions.}
   8.334 +*}
   8.335 +
   8.336 +lemma skip [intro?]: "|- P SKIP P"
   8.337 +proof -
   8.338 +  have "|- {s. id s : P} SKIP P" by (rule basic)
   8.339 +  thus ?thesis by simp
   8.340 +qed
   8.341 +
   8.342 +lemma assign: "|- P [\<acute>a/\<acute>x] \<acute>x := \<acute>a P"
   8.343 +  by (rule basic)
   8.344 +
   8.345 +text {*
   8.346 + Note that above formulation of assignment corresponds to our
   8.347 + preferred way to model state spaces, using (extensible) record types
   8.348 + in HOL \cite{Naraschewski-Wenzel:1998:HOOL}.  For any record field
   8.349 + $x$, Isabelle/HOL provides a functions $x$ (selector) and
   8.350 + $\idt{x{\dsh}update}$ (update).  Above, there is only a place-holder
   8.351 + appearing for the latter kind of function: due to concrete syntax
   8.352 + \isa{\'x := \'a} also contains \isa{x\_update}.\footnote{Note that due
   8.353 + to the external nature of HOL record fields, we could not even state
   8.354 + a general theorem relating selector and update functions (if this
   8.355 + were required here); this would only work for any particular instance
   8.356 + of record fields introduced so far.}
   8.357 +*}
   8.358 +
   8.359 +text {*
   8.360 + Sequential composition --- normalizing with associativity achieves
   8.361 + proper of chunks of code verified separately.
   8.362 +*}
   8.363 +
   8.364 +lemmas [trans, intro?] = seq
   8.365 +
   8.366 +lemma seq_assoc [simp]: "( |- P c1;(c2;c3) Q) = ( |- P (c1;c2);c3 Q)"
   8.367 +  by (auto simp add: Valid_def)
   8.368 +
   8.369 +text {*
   8.370 + Conditional statements.
   8.371 +*}
   8.372 +
   8.373 +lemmas [trans, intro?] = cond
   8.374 +
   8.375 +lemma [trans, intro?]:
   8.376 +  "|- .{\<acute>P & \<acute>b}. c1 Q
   8.377 +      ==> |- .{\<acute>P & ~ \<acute>b}. c2 Q
   8.378 +      ==> |- .{\<acute>P}. IF \<acute>b THEN c1 ELSE c2 FI Q"
   8.379 +    by (rule cond) (simp_all add: Valid_def)
   8.380 +
   8.381 +text {*
   8.382 + While statements --- with optional invariant.
   8.383 +*}
   8.384 +
   8.385 +lemma [intro?]:
   8.386 +    "|- (P Int b) c P ==> |- P (While b P c) (P Int -b)"
   8.387 +  by (rule while)
   8.388 +
   8.389 +lemma [intro?]:
   8.390 +    "|- (P Int b) c P ==> |- P (While b undefined c) (P Int -b)"
   8.391 +  by (rule while)
   8.392 +
   8.393 +
   8.394 +lemma [intro?]:
   8.395 +  "|- .{\<acute>P & \<acute>b}. c .{\<acute>P}.
   8.396 +    ==> |- .{\<acute>P}. WHILE \<acute>b INV .{\<acute>P}. DO c OD .{\<acute>P & ~ \<acute>b}."
   8.397 +  by (simp add: while Collect_conj_eq Collect_neg_eq)
   8.398 +
   8.399 +lemma [intro?]:
   8.400 +  "|- .{\<acute>P & \<acute>b}. c .{\<acute>P}.
   8.401 +    ==> |- .{\<acute>P}. WHILE \<acute>b DO c OD .{\<acute>P & ~ \<acute>b}."
   8.402 +  by (simp add: while Collect_conj_eq Collect_neg_eq)
   8.403 +
   8.404 +
   8.405 +subsection {* Verification conditions \label{sec:hoare-vcg} *}
   8.406 +
   8.407 +text {*
   8.408 + We now load the \emph{original} ML file for proof scripts and tactic
   8.409 + definition for the Hoare Verification Condition Generator (see
   8.410 + \url{http://isabelle.in.tum.de/library/Hoare/}).  As far as we are
   8.411 + concerned here, the result is a proof method \name{hoare}, which may
   8.412 + be applied to a Hoare Logic assertion to extract purely logical
   8.413 + verification conditions.  It is important to note that the method
   8.414 + requires \texttt{WHILE} loops to be fully annotated with invariants
   8.415 + beforehand.  Furthermore, only \emph{concrete} pieces of code are
   8.416 + handled --- the underlying tactic fails ungracefully if supplied with
   8.417 + meta-variables or parameters, for example.
   8.418 +*}
   8.419 +
   8.420 +lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) q"
   8.421 +  by (auto simp add: Valid_def)
   8.422 +
   8.423 +lemma BasicRule: "p \<subseteq> {s. f s \<in> q} \<Longrightarrow> Valid p (Basic f) q"
   8.424 +  by (auto simp: Valid_def)
   8.425 +
   8.426 +lemma SeqRule: "Valid P c1 Q \<Longrightarrow> Valid Q c2 R \<Longrightarrow> Valid P (c1;c2) R"
   8.427 +  by (auto simp: Valid_def)
   8.428 +
   8.429 +lemma CondRule:
   8.430 +  "p \<subseteq> {s. (s \<in> b \<longrightarrow> s \<in> w) \<and> (s \<notin> b \<longrightarrow> s \<in> w')}
   8.431 +    \<Longrightarrow> Valid w c1 q \<Longrightarrow> Valid w' c2 q \<Longrightarrow> Valid p (Cond b c1 c2) q"
   8.432 +  by (auto simp: Valid_def)
   8.433 +
   8.434 +lemma iter_aux:
   8.435 +  "\<forall>s s'. Sem c s s' --> s : I & s : b --> s' : I ==>
   8.436 +       (\<And>s s'. s : I \<Longrightarrow> iter n b (Sem c) s s' \<Longrightarrow> s' : I & s' ~: b)"
   8.437 +  apply(induct n)
   8.438 +   apply clarsimp
   8.439 +   apply (simp (no_asm_use))
   8.440 +   apply blast
   8.441 +  done
   8.442 +
   8.443 +lemma WhileRule:
   8.444 +    "p \<subseteq> i \<Longrightarrow> Valid (i \<inter> b) c i \<Longrightarrow> i \<inter> (-b) \<subseteq> q \<Longrightarrow> Valid p (While b i c) q"
   8.445 +  apply (clarsimp simp: Valid_def)
   8.446 +  apply (drule iter_aux)
   8.447 +    prefer 2
   8.448 +    apply assumption
   8.449 +   apply blast
   8.450 +  apply blast
   8.451 +  done
   8.452 +
   8.453 +lemma Compl_Collect: "- Collect b = {x. \<not> b x}"
   8.454 +  by blast
   8.455 +
   8.456 +lemmas AbortRule = SkipRule  -- "dummy version"
   8.457 +
   8.458 +use "~~/src/HOL/Hoare/hoare_tac.ML"
   8.459 +
   8.460 +method_setup hoare = {*
   8.461 +  Scan.succeed (fn ctxt =>
   8.462 +    (SIMPLE_METHOD'
   8.463 +       (hoare_tac ctxt (simp_tac (HOL_basic_ss addsimps [@{thm "Record.K_record_comp"}] ))))) *}
   8.464 +  "verification condition generator for Hoare logic"
   8.465 +
   8.466 +end
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/src/HOL/Isar_Examples/Hoare_Ex.thy	Tue Oct 20 19:37:09 2009 +0200
     9.3 @@ -0,0 +1,329 @@
     9.4 +header {* Using Hoare Logic *}
     9.5 +
     9.6 +theory Hoare_Ex
     9.7 +imports Hoare
     9.8 +begin
     9.9 +
    9.10 +subsection {* State spaces *}
    9.11 +
    9.12 +text {*
    9.13 + First of all we provide a store of program variables that
    9.14 + occur in any of the programs considered later.  Slightly unexpected
    9.15 + things may happen when attempting to work with undeclared variables.
    9.16 +*}
    9.17 +
    9.18 +record vars =
    9.19 +  I :: nat
    9.20 +  M :: nat
    9.21 +  N :: nat
    9.22 +  S :: nat
    9.23 +
    9.24 +text {*
    9.25 + While all of our variables happen to have the same type, nothing
    9.26 + would prevent us from working with many-sorted programs as well, or
    9.27 + even polymorphic ones.  Also note that Isabelle/HOL's extensible
    9.28 + record types even provides simple means to extend the state space
    9.29 + later.
    9.30 +*}
    9.31 +
    9.32 +
    9.33 +subsection {* Basic examples *}
    9.34 +
    9.35 +text {*
    9.36 + We look at few trivialities involving assignment and sequential
    9.37 + composition, in order to get an idea of how to work with our
    9.38 + formulation of Hoare Logic.
    9.39 +*}
    9.40 +
    9.41 +text {*
    9.42 + Using the basic \name{assign} rule directly is a bit cumbersome.
    9.43 +*}
    9.44 +
    9.45 +lemma
    9.46 +  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
    9.47 +  by (rule assign)
    9.48 +
    9.49 +text {*
    9.50 + Certainly we want the state modification already done, e.g.\ by
    9.51 + simplification.  The \name{hoare} method performs the basic state
    9.52 + update for us; we may apply the Simplifier afterwards to achieve
    9.53 + ``obvious'' consequences as well.
    9.54 +*}
    9.55 +
    9.56 +lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
    9.57 +  by hoare
    9.58 +
    9.59 +lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
    9.60 +  by hoare
    9.61 +
    9.62 +lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
    9.63 +  by hoare simp
    9.64 +
    9.65 +lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
    9.66 +  by hoare
    9.67 +
    9.68 +lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
    9.69 +  by hoare simp
    9.70 +
    9.71 +lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
    9.72 +  by hoare
    9.73 +
    9.74 +lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
    9.75 +  by hoare simp
    9.76 +
    9.77 +lemma
    9.78 +"|- .{\<acute>M = a & \<acute>N = b}.
    9.79 +    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
    9.80 +    .{\<acute>M = b & \<acute>N = a}."
    9.81 +  by hoare simp
    9.82 +
    9.83 +text {*
    9.84 + It is important to note that statements like the following one can
    9.85 + only be proven for each individual program variable.  Due to the
    9.86 + extra-logical nature of record fields, we cannot formulate a theorem
    9.87 + relating record selectors and updates schematically.
    9.88 +*}
    9.89 +
    9.90 +lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
    9.91 +  by hoare
    9.92 +
    9.93 +lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
    9.94 +  oops
    9.95 +
    9.96 +lemma
    9.97 +  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
    9.98 +  -- {* same statement without concrete syntax *}
    9.99 +  oops
   9.100 +
   9.101 +
   9.102 +text {*
   9.103 + In the following assignments we make use of the consequence rule in
   9.104 + order to achieve the intended precondition.  Certainly, the
   9.105 + \name{hoare} method is able to handle this case, too.
   9.106 +*}
   9.107 +
   9.108 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
   9.109 +proof -
   9.110 +  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
   9.111 +    by auto
   9.112 +  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
   9.113 +    by hoare
   9.114 +  finally show ?thesis .
   9.115 +qed
   9.116 +
   9.117 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
   9.118 +proof -
   9.119 +  have "!!m n::nat. m = n --> m + 1 ~= n"
   9.120 +      -- {* inclusion of assertions expressed in ``pure'' logic, *}
   9.121 +      -- {* without mentioning the state space *}
   9.122 +    by simp
   9.123 +  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
   9.124 +    by hoare
   9.125 +  finally show ?thesis .
   9.126 +qed
   9.127 +
   9.128 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
   9.129 +  by hoare simp
   9.130 +
   9.131 +
   9.132 +subsection {* Multiplication by addition *}
   9.133 +
   9.134 +text {*
   9.135 + We now do some basic examples of actual \texttt{WHILE} programs.
   9.136 + This one is a loop for calculating the product of two natural
   9.137 + numbers, by iterated addition.  We first give detailed structured
   9.138 + proof based on single-step Hoare rules.
   9.139 +*}
   9.140 +
   9.141 +lemma
   9.142 +  "|- .{\<acute>M = 0 & \<acute>S = 0}.
   9.143 +      WHILE \<acute>M ~= a
   9.144 +      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
   9.145 +      .{\<acute>S = a * b}."
   9.146 +proof -
   9.147 +  let "|- _ ?while _" = ?thesis
   9.148 +  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
   9.149 +
   9.150 +  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
   9.151 +  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
   9.152 +  proof
   9.153 +    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
   9.154 +    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
   9.155 +      by auto
   9.156 +    also have "|- ... ?c .{\<acute>?inv}." by hoare
   9.157 +    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
   9.158 +  qed
   9.159 +  also have "... <= .{\<acute>S = a * b}." by auto
   9.160 +  finally show ?thesis .
   9.161 +qed
   9.162 +
   9.163 +text {*
   9.164 + The subsequent version of the proof applies the \name{hoare} method
   9.165 + to reduce the Hoare statement to a purely logical problem that can be
   9.166 + solved fully automatically.  Note that we have to specify the
   9.167 + \texttt{WHILE} loop invariant in the original statement.
   9.168 +*}
   9.169 +
   9.170 +lemma
   9.171 +  "|- .{\<acute>M = 0 & \<acute>S = 0}.
   9.172 +      WHILE \<acute>M ~= a
   9.173 +      INV .{\<acute>S = \<acute>M * b}.
   9.174 +      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
   9.175 +      .{\<acute>S = a * b}."
   9.176 +  by hoare auto
   9.177 +
   9.178 +
   9.179 +subsection {* Summing natural numbers *}
   9.180 +
   9.181 +text {*
   9.182 + We verify an imperative program to sum natural numbers up to a given
   9.183 + limit.  First some functional definition for proper specification of
   9.184 + the problem.
   9.185 +*}
   9.186 +
   9.187 +text {*
   9.188 + The following proof is quite explicit in the individual steps taken,
   9.189 + with the \name{hoare} method only applied locally to take care of
   9.190 + assignment and sequential composition.  Note that we express
   9.191 + intermediate proof obligation in pure logic, without referring to the
   9.192 + state space.
   9.193 +*}
   9.194 +
   9.195 +declare atLeast0LessThan[symmetric,simp]
   9.196 +
   9.197 +theorem
   9.198 +  "|- .{True}.
   9.199 +      \<acute>S := 0; \<acute>I := 1;
   9.200 +      WHILE \<acute>I ~= n
   9.201 +      DO
   9.202 +        \<acute>S := \<acute>S + \<acute>I;
   9.203 +        \<acute>I := \<acute>I + 1
   9.204 +      OD
   9.205 +      .{\<acute>S = (SUM j<n. j)}."
   9.206 +  (is "|- _ (_; ?while) _")
   9.207 +proof -
   9.208 +  let ?sum = "\<lambda>k::nat. SUM j<k. j"
   9.209 +  let ?inv = "\<lambda>s i::nat. s = ?sum i"
   9.210 +
   9.211 +  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
   9.212 +  proof -
   9.213 +    have "True --> 0 = ?sum 1"
   9.214 +      by simp
   9.215 +    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
   9.216 +      by hoare
   9.217 +    finally show ?thesis .
   9.218 +  qed
   9.219 +  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
   9.220 +  proof
   9.221 +    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
   9.222 +    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
   9.223 +      by simp
   9.224 +    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
   9.225 +      by hoare
   9.226 +    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
   9.227 +  qed
   9.228 +  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
   9.229 +    by simp
   9.230 +  finally show ?thesis .
   9.231 +qed
   9.232 +
   9.233 +text {*
   9.234 + The next version uses the \name{hoare} method, while still explaining
   9.235 + the resulting proof obligations in an abstract, structured manner.
   9.236 +*}
   9.237 +
   9.238 +theorem
   9.239 +  "|- .{True}.
   9.240 +      \<acute>S := 0; \<acute>I := 1;
   9.241 +      WHILE \<acute>I ~= n
   9.242 +      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
   9.243 +      DO
   9.244 +        \<acute>S := \<acute>S + \<acute>I;
   9.245 +        \<acute>I := \<acute>I + 1
   9.246 +      OD
   9.247 +      .{\<acute>S = (SUM j<n. j)}."
   9.248 +proof -
   9.249 +  let ?sum = "\<lambda>k::nat. SUM j<k. j"
   9.250 +  let ?inv = "\<lambda>s i::nat. s = ?sum i"
   9.251 +
   9.252 +  show ?thesis
   9.253 +  proof hoare
   9.254 +    show "?inv 0 1" by simp
   9.255 +  next
   9.256 +    fix s i assume "?inv s i & i ~= n"
   9.257 +    thus "?inv (s + i) (i + 1)" by simp
   9.258 +  next
   9.259 +    fix s i assume "?inv s i & ~ i ~= n"
   9.260 +    thus "s = ?sum n" by simp
   9.261 +  qed
   9.262 +qed
   9.263 +
   9.264 +text {*
   9.265 + Certainly, this proof may be done fully automatic as well, provided
   9.266 + that the invariant is given beforehand.
   9.267 +*}
   9.268 +
   9.269 +theorem
   9.270 +  "|- .{True}.
   9.271 +      \<acute>S := 0; \<acute>I := 1;
   9.272 +      WHILE \<acute>I ~= n
   9.273 +      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
   9.274 +      DO
   9.275 +        \<acute>S := \<acute>S + \<acute>I;
   9.276 +        \<acute>I := \<acute>I + 1
   9.277 +      OD
   9.278 +      .{\<acute>S = (SUM j<n. j)}."
   9.279 +  by hoare auto
   9.280 +
   9.281 +
   9.282 +subsection{* Time *}
   9.283 +
   9.284 +text{*
   9.285 +  A simple embedding of time in Hoare logic: function @{text timeit}
   9.286 +  inserts an extra variable to keep track of the elapsed time.
   9.287 +*}
   9.288 +
   9.289 +record tstate = time :: nat
   9.290 +
   9.291 +types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
   9.292 +
   9.293 +consts timeit :: "'a time com \<Rightarrow> 'a time com"
   9.294 +primrec
   9.295 +  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
   9.296 +  "timeit (c1; c2) = (timeit c1; timeit c2)"
   9.297 +  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
   9.298 +  "timeit (While b iv c) = While b iv (timeit c)"
   9.299 +
   9.300 +record tvars = tstate +
   9.301 +  I :: nat
   9.302 +  J :: nat
   9.303 +
   9.304 +lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
   9.305 +  by (induct n) simp_all
   9.306 +
   9.307 +lemma "|- .{i = \<acute>I & \<acute>time = 0}.
   9.308 + timeit(
   9.309 + WHILE \<acute>I \<noteq> 0
   9.310 + INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
   9.311 + DO
   9.312 +   \<acute>J := \<acute>I;
   9.313 +   WHILE \<acute>J \<noteq> 0
   9.314 +   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
   9.315 +   DO \<acute>J := \<acute>J - 1 OD;
   9.316 +   \<acute>I := \<acute>I - 1
   9.317 + OD
   9.318 + ) .{2*\<acute>time = i*i + 5*i}."
   9.319 +  apply simp
   9.320 +  apply hoare
   9.321 +      apply simp
   9.322 +     apply clarsimp
   9.323 +    apply clarsimp
   9.324 +   apply arith
   9.325 +   prefer 2
   9.326 +   apply clarsimp
   9.327 +  apply (clarsimp simp: nat_distrib)
   9.328 +  apply (frule lem)
   9.329 +  apply arith
   9.330 +  done
   9.331 +
   9.332 +end
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/src/HOL/Isar_Examples/Knaster_Tarski.thy	Tue Oct 20 19:37:09 2009 +0200
    10.3 @@ -0,0 +1,111 @@
    10.4 +(*  Title:      HOL/Isar_Examples/Knaster_Tarski.thy
    10.5 +    Author:     Markus Wenzel, TU Muenchen
    10.6 +
    10.7 +Typical textbook proof example.
    10.8 +*)
    10.9 +
   10.10 +header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
   10.11 +
   10.12 +theory Knaster_Tarski
   10.13 +imports Main Lattice_Syntax
   10.14 +begin
   10.15 +
   10.16 +
   10.17 +subsection {* Prose version *}
   10.18 +
   10.19 +text {*
   10.20 +  According to the textbook \cite[pages 93--94]{davey-priestley}, the
   10.21 +  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
   10.22 +  dualized the argument, and tuned the notation a little bit.}
   10.23 +
   10.24 +  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
   10.25 +  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
   10.26 +  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
   10.27 +
   10.28 +  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
   10.29 +  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
   10.30 +  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
   10.31 +  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
   10.32 +  the reverse one (!) and thereby complete the proof that @{text a} is
   10.33 +  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
   10.34 +  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
   10.35 +*}
   10.36 +
   10.37 +
   10.38 +subsection {* Formal versions *}
   10.39 +
   10.40 +text {*
   10.41 +  The Isar proof below closely follows the original presentation.
   10.42 +  Virtually all of the prose narration has been rephrased in terms of
   10.43 +  formal Isar language elements.  Just as many textbook-style proofs,
   10.44 +  there is a strong bias towards forward proof, and several bends in
   10.45 +  the course of reasoning.
   10.46 +*}
   10.47 +
   10.48 +theorem Knaster_Tarski:
   10.49 +  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   10.50 +  assumes "mono f"
   10.51 +  shows "\<exists>a. f a = a"
   10.52 +proof
   10.53 +  let ?H = "{u. f u \<le> u}"
   10.54 +  let ?a = "\<Sqinter>?H"
   10.55 +  show "f ?a = ?a"
   10.56 +  proof -
   10.57 +    {
   10.58 +      fix x
   10.59 +      assume "x \<in> ?H"
   10.60 +      then have "?a \<le> x" by (rule Inf_lower)
   10.61 +      with `mono f` have "f ?a \<le> f x" ..
   10.62 +      also from `x \<in> ?H` have "\<dots> \<le> x" ..
   10.63 +      finally have "f ?a \<le> x" .
   10.64 +    }
   10.65 +    then have "f ?a \<le> ?a" by (rule Inf_greatest)
   10.66 +    {
   10.67 +      also presume "\<dots> \<le> f ?a"
   10.68 +      finally (order_antisym) show ?thesis .
   10.69 +    }
   10.70 +    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
   10.71 +    then have "f ?a \<in> ?H" ..
   10.72 +    then show "?a \<le> f ?a" by (rule Inf_lower)
   10.73 +  qed
   10.74 +qed
   10.75 +
   10.76 +text {*
   10.77 +  Above we have used several advanced Isar language elements, such as
   10.78 +  explicit block structure and weak assumptions.  Thus we have
   10.79 +  mimicked the particular way of reasoning of the original text.
   10.80 +
   10.81 +  In the subsequent version the order of reasoning is changed to
   10.82 +  achieve structured top-down decomposition of the problem at the
   10.83 +  outer level, while only the inner steps of reasoning are done in a
   10.84 +  forward manner.  We are certainly more at ease here, requiring only
   10.85 +  the most basic features of the Isar language.
   10.86 +*}
   10.87 +
   10.88 +theorem Knaster_Tarski':
   10.89 +  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   10.90 +  assumes "mono f"
   10.91 +  shows "\<exists>a. f a = a"
   10.92 +proof
   10.93 +  let ?H = "{u. f u \<le> u}"
   10.94 +  let ?a = "\<Sqinter>?H"
   10.95 +  show "f ?a = ?a"
   10.96 +  proof (rule order_antisym)
   10.97 +    show "f ?a \<le> ?a"
   10.98 +    proof (rule Inf_greatest)
   10.99 +      fix x
  10.100 +      assume "x \<in> ?H"
  10.101 +      then have "?a \<le> x" by (rule Inf_lower)
  10.102 +      with `mono f` have "f ?a \<le> f x" ..
  10.103 +      also from `x \<in> ?H` have "\<dots> \<le> x" ..
  10.104 +      finally show "f ?a \<le> x" .
  10.105 +    qed
  10.106 +    show "?a \<le> f ?a"
  10.107 +    proof (rule Inf_lower)
  10.108 +      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
  10.109 +      then show "f ?a \<in> ?H" ..
  10.110 +    qed
  10.111 +  qed
  10.112 +qed
  10.113 +
  10.114 +end
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/src/HOL/Isar_Examples/Mutilated_Checkerboard.thy	Tue Oct 20 19:37:09 2009 +0200
    11.3 @@ -0,0 +1,300 @@
    11.4 +(*  Title:      HOL/Isar_Examples/Mutilated_Checkerboard.thy
    11.5 +    Author:     Markus Wenzel, TU Muenchen (Isar document)
    11.6 +    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
    11.7 +*)
    11.8 +
    11.9 +header {* The Mutilated Checker Board Problem *}
   11.10 +
   11.11 +theory Mutilated_Checkerboard
   11.12 +imports Main
   11.13 +begin
   11.14 +
   11.15 +text {*
   11.16 + The Mutilated Checker Board Problem, formalized inductively.  See
   11.17 + \cite{paulson-mutilated-board} and
   11.18 + \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
   11.19 + original tactic script version.
   11.20 +*}
   11.21 +
   11.22 +subsection {* Tilings *}
   11.23 +
   11.24 +inductive_set
   11.25 +  tiling :: "'a set set => 'a set set"
   11.26 +  for A :: "'a set set"
   11.27 +  where
   11.28 +    empty: "{} : tiling A"
   11.29 +  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
   11.30 +
   11.31 +
   11.32 +text "The union of two disjoint tilings is a tiling."
   11.33 +
   11.34 +lemma tiling_Un:
   11.35 +  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
   11.36 +  shows "t Un u : tiling A"
   11.37 +proof -
   11.38 +  let ?T = "tiling A"
   11.39 +  from `t : ?T` and `t Int u = {}`
   11.40 +  show "t Un u : ?T"
   11.41 +  proof (induct t)
   11.42 +    case empty
   11.43 +    with `u : ?T` show "{} Un u : ?T" by simp
   11.44 +  next
   11.45 +    case (Un a t)
   11.46 +    show "(a Un t) Un u : ?T"
   11.47 +    proof -
   11.48 +      have "a Un (t Un u) : ?T"
   11.49 +        using `a : A`
   11.50 +      proof (rule tiling.Un)
   11.51 +        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
   11.52 +        then show "t Un u: ?T" by (rule Un)
   11.53 +        from `a <= - t` and `(a Un t) Int u = {}`
   11.54 +        show "a <= - (t Un u)" by blast
   11.55 +      qed
   11.56 +      also have "a Un (t Un u) = (a Un t) Un u"
   11.57 +        by (simp only: Un_assoc)
   11.58 +      finally show ?thesis .
   11.59 +    qed
   11.60 +  qed
   11.61 +qed
   11.62 +
   11.63 +
   11.64 +subsection {* Basic properties of ``below'' *}
   11.65 +
   11.66 +constdefs
   11.67 +  below :: "nat => nat set"
   11.68 +  "below n == {i. i < n}"
   11.69 +
   11.70 +lemma below_less_iff [iff]: "(i: below k) = (i < k)"
   11.71 +  by (simp add: below_def)
   11.72 +
   11.73 +lemma below_0: "below 0 = {}"
   11.74 +  by (simp add: below_def)
   11.75 +
   11.76 +lemma Sigma_Suc1:
   11.77 +    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
   11.78 +  by (simp add: below_def less_Suc_eq) blast
   11.79 +
   11.80 +lemma Sigma_Suc2:
   11.81 +    "m = n + 2 ==> A <*> below m =
   11.82 +      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
   11.83 +  by (auto simp add: below_def)
   11.84 +
   11.85 +lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
   11.86 +
   11.87 +
   11.88 +subsection {* Basic properties of ``evnodd'' *}
   11.89 +
   11.90 +constdefs
   11.91 +  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
   11.92 +  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
   11.93 +
   11.94 +lemma evnodd_iff:
   11.95 +    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
   11.96 +  by (simp add: evnodd_def)
   11.97 +
   11.98 +lemma evnodd_subset: "evnodd A b <= A"
   11.99 +  by (unfold evnodd_def, rule Int_lower1)
  11.100 +
  11.101 +lemma evnoddD: "x : evnodd A b ==> x : A"
  11.102 +  by (rule subsetD, rule evnodd_subset)
  11.103 +
  11.104 +lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
  11.105 +  by (rule finite_subset, rule evnodd_subset)
  11.106 +
  11.107 +lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
  11.108 +  by (unfold evnodd_def) blast
  11.109 +
  11.110 +lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
  11.111 +  by (unfold evnodd_def) blast
  11.112 +
  11.113 +lemma evnodd_empty: "evnodd {} b = {}"
  11.114 +  by (simp add: evnodd_def)
  11.115 +
  11.116 +lemma evnodd_insert: "evnodd (insert (i, j) C) b =
  11.117 +    (if (i + j) mod 2 = b
  11.118 +      then insert (i, j) (evnodd C b) else evnodd C b)"
  11.119 +  by (simp add: evnodd_def)
  11.120 +
  11.121 +
  11.122 +subsection {* Dominoes *}
  11.123 +
  11.124 +inductive_set
  11.125 +  domino :: "(nat * nat) set set"
  11.126 +  where
  11.127 +    horiz: "{(i, j), (i, j + 1)} : domino"
  11.128 +  | vertl: "{(i, j), (i + 1, j)} : domino"
  11.129 +
  11.130 +lemma dominoes_tile_row:
  11.131 +  "{i} <*> below (2 * n) : tiling domino"
  11.132 +  (is "?B n : ?T")
  11.133 +proof (induct n)
  11.134 +  case 0
  11.135 +  show ?case by (simp add: below_0 tiling.empty)
  11.136 +next
  11.137 +  case (Suc n)
  11.138 +  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
  11.139 +  have "?B (Suc n) = ?a Un ?B n"
  11.140 +    by (auto simp add: Sigma_Suc Un_assoc)
  11.141 +  moreover have "... : ?T"
  11.142 +  proof (rule tiling.Un)
  11.143 +    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
  11.144 +      by (rule domino.horiz)
  11.145 +    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
  11.146 +    finally show "... : domino" .
  11.147 +    show "?B n : ?T" by (rule Suc)
  11.148 +    show "?a <= - ?B n" by blast
  11.149 +  qed
  11.150 +  ultimately show ?case by simp
  11.151 +qed
  11.152 +
  11.153 +lemma dominoes_tile_matrix:
  11.154 +  "below m <*> below (2 * n) : tiling domino"
  11.155 +  (is "?B m : ?T")
  11.156 +proof (induct m)
  11.157 +  case 0
  11.158 +  show ?case by (simp add: below_0 tiling.empty)
  11.159 +next
  11.160 +  case (Suc m)
  11.161 +  let ?t = "{m} <*> below (2 * n)"
  11.162 +  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
  11.163 +  moreover have "... : ?T"
  11.164 +  proof (rule tiling_Un)
  11.165 +    show "?t : ?T" by (rule dominoes_tile_row)
  11.166 +    show "?B m : ?T" by (rule Suc)
  11.167 +    show "?t Int ?B m = {}" by blast
  11.168 +  qed
  11.169 +  ultimately show ?case by simp
  11.170 +qed
  11.171 +
  11.172 +lemma domino_singleton:
  11.173 +  assumes d: "d : domino" and "b < 2"
  11.174 +  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
  11.175 +  using d
  11.176 +proof induct
  11.177 +  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
  11.178 +  fix i j
  11.179 +  note [simp] = evnodd_empty evnodd_insert mod_Suc
  11.180 +  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
  11.181 +  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
  11.182 +qed
  11.183 +
  11.184 +lemma domino_finite:
  11.185 +  assumes d: "d: domino"
  11.186 +  shows "finite d"
  11.187 +  using d
  11.188 +proof induct
  11.189 +  fix i j :: nat
  11.190 +  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
  11.191 +  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
  11.192 +qed
  11.193 +
  11.194 +
  11.195 +subsection {* Tilings of dominoes *}
  11.196 +
  11.197 +lemma tiling_domino_finite:
  11.198 +  assumes t: "t : tiling domino"  (is "t : ?T")
  11.199 +  shows "finite t"  (is "?F t")
  11.200 +  using t
  11.201 +proof induct
  11.202 +  show "?F {}" by (rule finite.emptyI)
  11.203 +  fix a t assume "?F t"
  11.204 +  assume "a : domino" then have "?F a" by (rule domino_finite)
  11.205 +  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
  11.206 +qed
  11.207 +
  11.208 +lemma tiling_domino_01:
  11.209 +  assumes t: "t : tiling domino"  (is "t : ?T")
  11.210 +  shows "card (evnodd t 0) = card (evnodd t 1)"
  11.211 +  using t
  11.212 +proof induct
  11.213 +  case empty
  11.214 +  show ?case by (simp add: evnodd_def)
  11.215 +next
  11.216 +  case (Un a t)
  11.217 +  let ?e = evnodd
  11.218 +  note hyp = `card (?e t 0) = card (?e t 1)`
  11.219 +    and at = `a <= - t`
  11.220 +  have card_suc:
  11.221 +    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
  11.222 +  proof -
  11.223 +    fix b :: nat assume "b < 2"
  11.224 +    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
  11.225 +    also obtain i j where e: "?e a b = {(i, j)}"
  11.226 +    proof -
  11.227 +      from `a \<in> domino` and `b < 2`
  11.228 +      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
  11.229 +      then show ?thesis by (blast intro: that)
  11.230 +    qed
  11.231 +    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
  11.232 +    moreover have "card ... = Suc (card (?e t b))"
  11.233 +    proof (rule card_insert_disjoint)
  11.234 +      from `t \<in> tiling domino` have "finite t"
  11.235 +        by (rule tiling_domino_finite)
  11.236 +      then show "finite (?e t b)"
  11.237 +        by (rule evnodd_finite)
  11.238 +      from e have "(i, j) : ?e a b" by simp
  11.239 +      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
  11.240 +    qed
  11.241 +    ultimately show "?thesis b" by simp
  11.242 +  qed
  11.243 +  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
  11.244 +  also from hyp have "card (?e t 0) = card (?e t 1)" .
  11.245 +  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
  11.246 +    by simp
  11.247 +  finally show ?case .
  11.248 +qed
  11.249 +
  11.250 +
  11.251 +subsection {* Main theorem *}
  11.252 +
  11.253 +constdefs
  11.254 +  mutilated_board :: "nat => nat => (nat * nat) set"
  11.255 +  "mutilated_board m n ==
  11.256 +    below (2 * (m + 1)) <*> below (2 * (n + 1))
  11.257 +      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
  11.258 +
  11.259 +theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
  11.260 +proof (unfold mutilated_board_def)
  11.261 +  let ?T = "tiling domino"
  11.262 +  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
  11.263 +  let ?t' = "?t - {(0, 0)}"
  11.264 +  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
  11.265 +
  11.266 +  show "?t'' ~: ?T"
  11.267 +  proof
  11.268 +    have t: "?t : ?T" by (rule dominoes_tile_matrix)
  11.269 +    assume t'': "?t'' : ?T"
  11.270 +
  11.271 +    let ?e = evnodd
  11.272 +    have fin: "finite (?e ?t 0)"
  11.273 +      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
  11.274 +
  11.275 +    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
  11.276 +    have "card (?e ?t'' 0) < card (?e ?t' 0)"
  11.277 +    proof -
  11.278 +      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
  11.279 +        < card (?e ?t' 0)"
  11.280 +      proof (rule card_Diff1_less)
  11.281 +        from _ fin show "finite (?e ?t' 0)"
  11.282 +          by (rule finite_subset) auto
  11.283 +        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
  11.284 +      qed
  11.285 +      then show ?thesis by simp
  11.286 +    qed
  11.287 +    also have "... < card (?e ?t 0)"
  11.288 +    proof -
  11.289 +      have "(0, 0) : ?e ?t 0" by simp
  11.290 +      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
  11.291 +        by (rule card_Diff1_less)
  11.292 +      then show ?thesis by simp
  11.293 +    qed
  11.294 +    also from t have "... = card (?e ?t 1)"
  11.295 +      by (rule tiling_domino_01)
  11.296 +    also have "?e ?t 1 = ?e ?t'' 1" by simp
  11.297 +    also from t'' have "card ... = card (?e ?t'' 0)"
  11.298 +      by (rule tiling_domino_01 [symmetric])
  11.299 +    finally have "... < ..." . then show False ..
  11.300 +  qed
  11.301 +qed
  11.302 +
  11.303 +end
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/src/HOL/Isar_Examples/Nested_Datatype.thy	Tue Oct 20 19:37:09 2009 +0200
    12.3 @@ -0,0 +1,86 @@
    12.4 +header {* Nested datatypes *}
    12.5 +
    12.6 +theory Nested_Datatype
    12.7 +imports Main
    12.8 +begin
    12.9 +
   12.10 +subsection {* Terms and substitution *}
   12.11 +
   12.12 +datatype ('a, 'b) "term" =
   12.13 +    Var 'a
   12.14 +  | App 'b "('a, 'b) term list"
   12.15 +
   12.16 +consts
   12.17 +  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
   12.18 +  subst_term_list ::
   12.19 +    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
   12.20 +
   12.21 +primrec (subst)
   12.22 +  "subst_term f (Var a) = f a"
   12.23 +  "subst_term f (App b ts) = App b (subst_term_list f ts)"
   12.24 +  "subst_term_list f [] = []"
   12.25 +  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   12.26 +
   12.27 +
   12.28 +text {*
   12.29 + \medskip A simple lemma about composition of substitutions.
   12.30 +*}
   12.31 +
   12.32 +lemma "subst_term (subst_term f1 o f2) t =
   12.33 +      subst_term f1 (subst_term f2 t)"
   12.34 +  and "subst_term_list (subst_term f1 o f2) ts =
   12.35 +      subst_term_list f1 (subst_term_list f2 ts)"
   12.36 +  by (induct t and ts) simp_all
   12.37 +
   12.38 +lemma "subst_term (subst_term f1 o f2) t =
   12.39 +  subst_term f1 (subst_term f2 t)"
   12.40 +proof -
   12.41 +  let "?P t" = ?thesis
   12.42 +  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   12.43 +    subst_term_list f1 (subst_term_list f2 ts)"
   12.44 +  show ?thesis
   12.45 +  proof (induct t)
   12.46 +    fix a show "?P (Var a)" by simp
   12.47 +  next
   12.48 +    fix b ts assume "?Q ts"
   12.49 +    then show "?P (App b ts)"
   12.50 +      by (simp only: subst.simps)
   12.51 +  next
   12.52 +    show "?Q []" by simp
   12.53 +  next
   12.54 +    fix t ts
   12.55 +    assume "?P t" "?Q ts" then show "?Q (t # ts)"
   12.56 +      by (simp only: subst.simps)
   12.57 +  qed
   12.58 +qed
   12.59 +
   12.60 +
   12.61 +subsection {* Alternative induction *}
   12.62 +
   12.63 +theorem term_induct' [case_names Var App]:
   12.64 +  assumes var: "!!a. P (Var a)"
   12.65 +    and app: "!!b ts. list_all P ts ==> P (App b ts)"
   12.66 +  shows "P t"
   12.67 +proof (induct t)
   12.68 +  fix a show "P (Var a)" by (rule var)
   12.69 +next
   12.70 +  fix b t ts assume "list_all P ts"
   12.71 +  then show "P (App b ts)" by (rule app)
   12.72 +next
   12.73 +  show "list_all P []" by simp
   12.74 +next
   12.75 +  fix t ts assume "P t" "list_all P ts"
   12.76 +  then show "list_all P (t # ts)" by simp
   12.77 +qed
   12.78 +
   12.79 +lemma
   12.80 +  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
   12.81 +proof (induct t rule: term_induct')
   12.82 +  case (Var a)
   12.83 +  show ?case by (simp add: o_def)
   12.84 +next
   12.85 +  case (App b ts)
   12.86 +  then show ?case by (induct ts) simp_all
   12.87 +qed
   12.88 +
   12.89 +end
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/src/HOL/Isar_Examples/Peirce.thy	Tue Oct 20 19:37:09 2009 +0200
    13.3 @@ -0,0 +1,90 @@
    13.4 +(*  Title:      HOL/Isar_Examples/Peirce.thy
    13.5 +    Author:     Markus Wenzel, TU Muenchen
    13.6 +*)
    13.7 +
    13.8 +header {* Peirce's Law *}
    13.9 +
   13.10 +theory Peirce
   13.11 +imports Main
   13.12 +begin
   13.13 +
   13.14 +text {*
   13.15 + We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.  This is
   13.16 + an inherently non-intuitionistic statement, so its proof will
   13.17 + certainly involve some form of classical contradiction.
   13.18 +
   13.19 + The first proof is again a well-balanced combination of plain
   13.20 + backward and forward reasoning.  The actual classical step is where
   13.21 + the negated goal may be introduced as additional assumption.  This
   13.22 + eventually leads to a contradiction.\footnote{The rule involved there
   13.23 + is negation elimination; it holds in intuitionistic logic as well.}
   13.24 +*}
   13.25 +
   13.26 +theorem "((A --> B) --> A) --> A"
   13.27 +proof
   13.28 +  assume "(A --> B) --> A"
   13.29 +  show A
   13.30 +  proof (rule classical)
   13.31 +    assume "~ A"
   13.32 +    have "A --> B"
   13.33 +    proof
   13.34 +      assume A
   13.35 +      with `~ A` show B by contradiction
   13.36 +    qed
   13.37 +    with `(A --> B) --> A` show A ..
   13.38 +  qed
   13.39 +qed
   13.40 +
   13.41 +text {*
   13.42 + In the subsequent version the reasoning is rearranged by means of
   13.43 + ``weak assumptions'' (as introduced by \isacommand{presume}).  Before
   13.44 + assuming the negated goal $\neg A$, its intended consequence $A \impl
   13.45 + B$ is put into place in order to solve the main problem.
   13.46 + Nevertheless, we do not get anything for free, but have to establish
   13.47 + $A \impl B$ later on.  The overall effect is that of a logical
   13.48 + \emph{cut}.
   13.49 +
   13.50 + Technically speaking, whenever some goal is solved by
   13.51 + \isacommand{show} in the context of weak assumptions then the latter
   13.52 + give rise to new subgoals, which may be established separately.  In
   13.53 + contrast, strong assumptions (as introduced by \isacommand{assume})
   13.54 + are solved immediately.
   13.55 +*}
   13.56 +
   13.57 +theorem "((A --> B) --> A) --> A"
   13.58 +proof
   13.59 +  assume "(A --> B) --> A"
   13.60 +  show A
   13.61 +  proof (rule classical)
   13.62 +    presume "A --> B"
   13.63 +    with `(A --> B) --> A` show A ..
   13.64 +  next
   13.65 +    assume "~ A"
   13.66 +    show "A --> B"
   13.67 +    proof
   13.68 +      assume A
   13.69 +      with `~ A` show B by contradiction
   13.70 +    qed
   13.71 +  qed
   13.72 +qed
   13.73 +
   13.74 +text {*
   13.75 + Note that the goals stemming from weak assumptions may be even left
   13.76 + until qed time, where they get eventually solved ``by assumption'' as
   13.77 + well.  In that case there is really no fundamental difference between
   13.78 + the two kinds of assumptions, apart from the order of reducing the
   13.79 + individual parts of the proof configuration.
   13.80 +
   13.81 + Nevertheless, the ``strong'' mode of plain assumptions is quite
   13.82 + important in practice to achieve robustness of proof text
   13.83 + interpretation.  By forcing both the conclusion \emph{and} the
   13.84 + assumptions to unify with the pending goal to be solved, goal
   13.85 + selection becomes quite deterministic.  For example, decomposition
   13.86 + with rules of the ``case-analysis'' type usually gives rise to
   13.87 + several goals that only differ in there local contexts.  With strong
   13.88 + assumptions these may be still solved in any order in a predictable
   13.89 + way, while weak ones would quickly lead to great confusion,
   13.90 + eventually demanding even some backtracking.
   13.91 +*}
   13.92 +
   13.93 +end
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/src/HOL/Isar_Examples/Puzzle.thy	Tue Oct 20 19:37:09 2009 +0200
    14.3 @@ -0,0 +1,85 @@
    14.4 +header {* An old chestnut *}
    14.5 +
    14.6 +theory Puzzle
    14.7 +imports Main
    14.8 +begin
    14.9 +
   14.10 +text_raw {*
   14.11 +  \footnote{A question from ``Bundeswettbewerb Mathematik''.  Original
   14.12 +  pen-and-paper proof due to Herbert Ehler; Isabelle tactic script by
   14.13 +  Tobias Nipkow.}
   14.14 +*}
   14.15 +
   14.16 +text {*
   14.17 +  \textbf{Problem.}  Given some function $f\colon \Nat \to \Nat$ such
   14.18 +  that $f \ap (f \ap n) < f \ap (\idt{Suc} \ap n)$ for all $n$.
   14.19 +  Demonstrate that $f$ is the identity.
   14.20 +*}
   14.21 +
   14.22 +theorem
   14.23 +  assumes f_ax: "\<And>n. f (f n) < f (Suc n)"
   14.24 +  shows "f n = n"
   14.25 +proof (rule order_antisym)
   14.26 +  {
   14.27 +    fix n show "n \<le> f n"
   14.28 +    proof (induct k \<equiv> "f n" arbitrary: n rule: less_induct)
   14.29 +      case (less k n)
   14.30 +      then have hyp: "\<And>m. f m < f n \<Longrightarrow> m \<le> f m" by (simp only:)
   14.31 +      show "n \<le> f n"
   14.32 +      proof (cases n)
   14.33 +        case (Suc m)
   14.34 +        from f_ax have "f (f m) < f n" by (simp only: Suc)
   14.35 +        with hyp have "f m \<le> f (f m)" .
   14.36 +        also from f_ax have "\<dots> < f n" by (simp only: Suc)
   14.37 +        finally have "f m < f n" .
   14.38 +        with hyp have "m \<le> f m" .
   14.39 +        also note `\<dots> < f n`
   14.40 +        finally have "m < f n" .
   14.41 +        then have "n \<le> f n" by (simp only: Suc)
   14.42 +        then show ?thesis .
   14.43 +      next
   14.44 +        case 0
   14.45 +        then show ?thesis by simp
   14.46 +      qed
   14.47 +    qed
   14.48 +  } note ge = this
   14.49 +
   14.50 +  {
   14.51 +    fix m n :: nat
   14.52 +    assume "m \<le> n"
   14.53 +    then have "f m \<le> f n"
   14.54 +    proof (induct n)
   14.55 +      case 0
   14.56 +      then have "m = 0" by simp
   14.57 +      then show ?case by simp
   14.58 +    next
   14.59 +      case (Suc n)
   14.60 +      from Suc.prems show "f m \<le> f (Suc n)"
   14.61 +      proof (rule le_SucE)
   14.62 +        assume "m \<le> n"
   14.63 +        with Suc.hyps have "f m \<le> f n" .
   14.64 +        also from ge f_ax have "\<dots> < f (Suc n)"
   14.65 +          by (rule le_less_trans)
   14.66 +        finally show ?thesis by simp
   14.67 +      next
   14.68 +        assume "m = Suc n"
   14.69 +        then show ?thesis by simp
   14.70 +      qed
   14.71 +    qed
   14.72 +  } note mono = this
   14.73 +
   14.74 +  show "f n \<le> n"
   14.75 +  proof -
   14.76 +    have "\<not> n < f n"
   14.77 +    proof
   14.78 +      assume "n < f n"
   14.79 +      then have "Suc n \<le> f n" by simp
   14.80 +      then have "f (Suc n) \<le> f (f n)" by (rule mono)
   14.81 +      also have "\<dots> < f (Suc n)" by (rule f_ax)
   14.82 +      finally have "\<dots> < \<dots>" . then show False ..
   14.83 +    qed
   14.84 +    then show ?thesis by simp
   14.85 +  qed
   14.86 +qed
   14.87 +
   14.88 +end
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/src/HOL/Isar_Examples/README.html	Tue Oct 20 19:37:09 2009 +0200
    15.3 @@ -0,0 +1,21 @@
    15.4 +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
    15.5 +
    15.6 +<!-- $Id$ -->
    15.7 +
    15.8 +<html>
    15.9 +
   15.10 +<head>
   15.11 +  <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
   15.12 +  <title>HOL/Isar_Examples</title>
   15.13 +</head>
   15.14 +
   15.15 +<body>
   15.16 +<h1>HOL/Isar_Examples</h1>
   15.17 +
   15.18 +Isar offers a new high-level proof (and theory) language interface to
   15.19 +Isabelle.  This directory contains some example Isar documents.  See
   15.20 +also the included document, or the <a
   15.21 +href="http://isabelle.in.tum.de/Isar/">Isabelle/Isar page</a> for more
   15.22 +information.
   15.23 +</body>
   15.24 +</html>
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/src/HOL/Isar_Examples/ROOT.ML	Tue Oct 20 19:37:09 2009 +0200
    16.3 @@ -0,0 +1,18 @@
    16.4 +(* Miscellaneous Isabelle/Isar examples for Higher-Order Logic. *)
    16.5 +
    16.6 +no_document use_thys ["../Old_Number_Theory/Primes", "../Old_Number_Theory/Fibonacci"];
    16.7 +
    16.8 +use_thys [
    16.9 +  "Basic_Logic",
   16.10 +  "Cantor",
   16.11 +  "Peirce",
   16.12 +  "Drinker",
   16.13 +  "Expr_Compiler",
   16.14 +  "Group",
   16.15 +  "Summation",
   16.16 +  "Knaster_Tarski",
   16.17 +  "Mutilated_Checkerboard",
   16.18 +  "Puzzle",
   16.19 +  "Nested_Datatype",
   16.20 +  "Hoare_Ex"
   16.21 +];
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/src/HOL/Isar_Examples/Summation.thy	Tue Oct 20 19:37:09 2009 +0200
    17.3 @@ -0,0 +1,158 @@
    17.4 +(*  Title:      HOL/Isar_Examples/Summation.thy
    17.5 +    Author:     Markus Wenzel
    17.6 +*)
    17.7 +
    17.8 +header {* Summing natural numbers *}
    17.9 +
   17.10 +theory Summation
   17.11 +imports Main
   17.12 +begin
   17.13 +
   17.14 +text_raw {*
   17.15 + \footnote{This example is somewhat reminiscent of the
   17.16 + \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, which is
   17.17 + discussed in \cite{isabelle-ref} in the context of permutative
   17.18 + rewrite rules and ordered rewriting.}
   17.19 +*}
   17.20 +
   17.21 +text {*
   17.22 + Subsequently, we prove some summation laws of natural numbers
   17.23 + (including odds, squares, and cubes).  These examples demonstrate how
   17.24 + plain natural deduction (including induction) may be combined with
   17.25 + calculational proof.
   17.26 +*}
   17.27 +
   17.28 +
   17.29 +subsection {* Summation laws *}
   17.30 +
   17.31 +text {*
   17.32 + The sum of natural numbers $0 + \cdots + n$ equals $n \times (n +
   17.33 + 1)/2$.  Avoiding formal reasoning about division we prove this
   17.34 + equation multiplied by $2$.
   17.35 +*}
   17.36 +
   17.37 +theorem sum_of_naturals:
   17.38 +  "2 * (\<Sum>i::nat=0..n. i) = n * (n + 1)"
   17.39 +  (is "?P n" is "?S n = _")
   17.40 +proof (induct n)
   17.41 +  show "?P 0" by simp
   17.42 +next
   17.43 +  fix n have "?S (n + 1) = ?S n + 2 * (n + 1)" by simp
   17.44 +  also assume "?S n = n * (n + 1)"
   17.45 +  also have "... + 2 * (n + 1) = (n + 1) * (n + 2)" by simp
   17.46 +  finally show "?P (Suc n)" by simp
   17.47 +qed
   17.48 +
   17.49 +text {*
   17.50 + The above proof is a typical instance of mathematical induction.  The
   17.51 + main statement is viewed as some $\var{P} \ap n$ that is split by the
   17.52 + induction method into base case $\var{P} \ap 0$, and step case
   17.53 + $\var{P} \ap n \Impl \var{P} \ap (\idt{Suc} \ap n)$ for arbitrary $n$.
   17.54 +
   17.55 + The step case is established by a short calculation in forward
   17.56 + manner.  Starting from the left-hand side $\var{S} \ap (n + 1)$ of
   17.57 + the thesis, the final result is achieved by transformations involving
   17.58 + basic arithmetic reasoning (using the Simplifier).  The main point is
   17.59 + where the induction hypothesis $\var{S} \ap n = n \times (n + 1)$ is
   17.60 + introduced in order to replace a certain subterm.  So the
   17.61 + ``transitivity'' rule involved here is actual \emph{substitution}.
   17.62 + Also note how the occurrence of ``\dots'' in the subsequent step
   17.63 + documents the position where the right-hand side of the hypothesis
   17.64 + got filled in.
   17.65 +
   17.66 + \medskip A further notable point here is integration of calculations
   17.67 + with plain natural deduction.  This works so well in Isar for two
   17.68 + reasons.
   17.69 + \begin{enumerate}
   17.70 +
   17.71 + \item Facts involved in \isakeyword{also}~/ \isakeyword{finally}
   17.72 + calculational chains may be just anything.  There is nothing special
   17.73 + about \isakeyword{have}, so the natural deduction element
   17.74 + \isakeyword{assume} works just as well.
   17.75 +
   17.76 + \item There are two \emph{separate} primitives for building natural
   17.77 + deduction contexts: \isakeyword{fix}~$x$ and \isakeyword{assume}~$A$.
   17.78 + Thus it is possible to start reasoning with some new ``arbitrary, but
   17.79 + fixed'' elements before bringing in the actual assumption.  In
   17.80 + contrast, natural deduction is occasionally formalized with basic
   17.81 + context elements of the form $x:A$ instead.
   17.82 +
   17.83 + \end{enumerate}
   17.84 +*}
   17.85 +
   17.86 +text {*
   17.87 + \medskip We derive further summation laws for odds, squares, and
   17.88 + cubes as follows.  The basic technique of induction plus calculation
   17.89 + is the same as before.
   17.90 +*}
   17.91 +
   17.92 +theorem sum_of_odds:
   17.93 +  "(\<Sum>i::nat=0..<n. 2 * i + 1) = n^Suc (Suc 0)"
   17.94 +  (is "?P n" is "?S n = _")
   17.95 +proof (induct n)
   17.96 +  show "?P 0" by simp
   17.97 +next
   17.98 +  fix n have "?S (n + 1) = ?S n + 2 * n + 1" by simp
   17.99 +  also assume "?S n = n^Suc (Suc 0)"
  17.100 +  also have "... + 2 * n + 1 = (n + 1)^Suc (Suc 0)" by simp
  17.101 +  finally show "?P (Suc n)" by simp
  17.102 +qed
  17.103 +
  17.104 +text {*
  17.105 + Subsequently we require some additional tweaking of Isabelle built-in
  17.106 + arithmetic simplifications, such as bringing in distributivity by
  17.107 + hand.
  17.108 +*}
  17.109 +
  17.110 +lemmas distrib = add_mult_distrib add_mult_distrib2
  17.111 +
  17.112 +theorem sum_of_squares:
  17.113 +  "6 * (\<Sum>i::nat=0..n. i^Suc (Suc 0)) = n * (n + 1) * (2 * n + 1)"
  17.114 +  (is "?P n" is "?S n = _")
  17.115 +proof (induct n)
  17.116 +  show "?P 0" by simp
  17.117 +next
  17.118 +  fix n have "?S (n + 1) = ?S n + 6 * (n + 1)^Suc (Suc 0)"
  17.119 +    by (simp add: distrib)
  17.120 +  also assume "?S n = n * (n + 1) * (2 * n + 1)"
  17.121 +  also have "... + 6 * (n + 1)^Suc (Suc 0) =
  17.122 +    (n + 1) * (n + 2) * (2 * (n + 1) + 1)" by (simp add: distrib)
  17.123 +  finally show "?P (Suc n)" by simp
  17.124 +qed
  17.125 +
  17.126 +theorem sum_of_cubes:
  17.127 +  "4 * (\<Sum>i::nat=0..n. i^3) = (n * (n + 1))^Suc (Suc 0)"
  17.128 +  (is "?P n" is "?S n = _")
  17.129 +proof (induct n)
  17.130 +  show "?P 0" by (simp add: power_eq_if)
  17.131 +next
  17.132 +  fix n have "?S (n + 1) = ?S n + 4 * (n + 1)^3"
  17.133 +    by (simp add: power_eq_if distrib)
  17.134 +  also assume "?S n = (n * (n + 1))^Suc (Suc 0)"
  17.135 +  also have "... + 4 * (n + 1)^3 = ((n + 1) * ((n + 1) + 1))^Suc (Suc 0)"
  17.136 +    by (simp add: power_eq_if distrib)
  17.137 +  finally show "?P (Suc n)" by simp
  17.138 +qed
  17.139 +
  17.140 +text {*
  17.141 + Comparing these examples with the tactic script version
  17.142 + \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, we note
  17.143 + an important difference of how induction vs.\ simplification is
  17.144 + applied.  While \cite[\S10]{isabelle-ref} advises for these examples
  17.145 + that ``induction should not be applied until the goal is in the
  17.146 + simplest form'' this would be a very bad idea in our setting.
  17.147 +
  17.148 + Simplification normalizes all arithmetic expressions involved,
  17.149 + producing huge intermediate goals.  With applying induction
  17.150 + afterwards, the Isar proof text would have to reflect the emerging
  17.151 + configuration by appropriate sub-proofs.  This would result in badly
  17.152 + structured, low-level technical reasoning, without any good idea of
  17.153 + the actual point.
  17.154 +
  17.155 + \medskip As a general rule of good proof style, automatic methods
  17.156 + such as $\idt{simp}$ or $\idt{auto}$ should normally be never used as
  17.157 + initial proof methods, but only as terminal ones, solving certain
  17.158 + goals completely.
  17.159 +*}
  17.160 +
  17.161 +end
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/src/HOL/Isar_Examples/document/proof.sty	Tue Oct 20 19:37:09 2009 +0200
    18.3 @@ -0,0 +1,254 @@
    18.4 +%       proof.sty       (Proof Figure Macros)
    18.5 +%
    18.6 +%       version 1.0
    18.7 +%       October 13, 1990
    18.8 +%       Copyright (C) 1990 Makoto Tatsuta (tatsuta@riec.tohoku.ac.jp)
    18.9 +%
   18.10 +% This program is free software; you can redistribute it or modify
   18.11 +% it under the terms of the GNU General Public License as published by
   18.12 +% the Free Software Foundation; either versions 1, or (at your option)
   18.13 +% any later version.
   18.14 +%
   18.15 +% This program is distributed in the hope that it will be useful
   18.16 +% but WITHOUT ANY WARRANTY; without even the implied warranty of
   18.17 +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   18.18 +% GNU General Public License for more details.
   18.19 +%
   18.20 +%       Usage:
   18.21 +%               In \documentstyle, specify an optional style `proof', say,
   18.22 +%                       \documentstyle[proof]{article}.
   18.23 +%
   18.24 +%       The following macros are available:
   18.25 +%
   18.26 +%       In all the following macros, all the arguments such as
   18.27 +%       <Lowers> and <Uppers> are processed in math mode.
   18.28 +%
   18.29 +%       \infer<Lower><Uppers>
   18.30 +%               draws an inference.
   18.31 +%
   18.32 +%               Use & in <Uppers> to delimit upper formulae.
   18.33 +%               <Uppers> consists more than 0 formulae.
   18.34 +%
   18.35 +%               \infer returns \hbox{ ... } or \vbox{ ... } and
   18.36 +%               sets \@LeftOffset and \@RightOffset globally.
   18.37 +%
   18.38 +%       \infer[<Label>]<Lower><Uppers>
   18.39 +%               draws an inference labeled with <Label>.
   18.40 +%
   18.41 +%       \infer*<Lower><Uppers>
   18.42 +%               draws a many step deduction.
   18.43 +%
   18.44 +%       \infer*[<Label>]<Lower><Uppers>
   18.45 +%               draws a many step deduction labeled with <Label>.
   18.46 +%
   18.47 +%       \deduce<Lower><Uppers>
   18.48 +%               draws an inference without a rule.
   18.49 +%
   18.50 +%       \deduce[<Proof>]<Lower><Uppers>
   18.51 +%               draws a many step deduction with a proof name.
   18.52 +%
   18.53 +%       Example:
   18.54 +%               If you want to write
   18.55 +%                           B C
   18.56 +%                          -----
   18.57 +%                      A     D
   18.58 +%                     ----------
   18.59 +%                         E
   18.60 +%       use
   18.61 +%               \infer{E}{
   18.62 +%                       A
   18.63 +%                       &
   18.64 +%                       \infer{D}{B & C}
   18.65 +%               }
   18.66 +%
   18.67 +
   18.68 +%       Style Parameters
   18.69 +
   18.70 +\newdimen\inferLineSkip         \inferLineSkip=2pt
   18.71 +\newdimen\inferLabelSkip        \inferLabelSkip=5pt
   18.72 +\def\inferTabSkip{\quad}
   18.73 +
   18.74 +%       Variables
   18.75 +
   18.76 +\newdimen\@LeftOffset   % global
   18.77 +\newdimen\@RightOffset  % global
   18.78 +\newdimen\@SavedLeftOffset      % safe from users
   18.79 +
   18.80 +\newdimen\UpperWidth
   18.81 +\newdimen\LowerWidth
   18.82 +\newdimen\LowerHeight
   18.83 +\newdimen\UpperLeftOffset
   18.84 +\newdimen\UpperRightOffset
   18.85 +\newdimen\UpperCenter
   18.86 +\newdimen\LowerCenter
   18.87 +\newdimen\UpperAdjust
   18.88 +\newdimen\RuleAdjust
   18.89 +\newdimen\LowerAdjust
   18.90 +\newdimen\RuleWidth
   18.91 +\newdimen\HLabelAdjust
   18.92 +\newdimen\VLabelAdjust
   18.93 +\newdimen\WidthAdjust
   18.94 +
   18.95 +\newbox\@UpperPart
   18.96 +\newbox\@LowerPart
   18.97 +\newbox\@LabelPart
   18.98 +\newbox\ResultBox
   18.99 +
  18.100 +%       Flags
  18.101 +
  18.102 +\newif\if@inferRule     % whether \@infer draws a rule.
  18.103 +\newif\if@ReturnLeftOffset      % whether \@infer returns \@LeftOffset.
  18.104 +\newif\if@MathSaved     % whether inner math mode where \infer or
  18.105 +                        % \deduce appears.
  18.106 +
  18.107 +%       Special Fonts
  18.108 +
  18.109 +\def\DeduceSym{\vtop{\baselineskip4\p@ \lineskiplimit\z@
  18.110 +    \vbox{\hbox{.}\hbox{.}\hbox{.}}\hbox{.}}}
  18.111 +
  18.112 +%       Math Save Macros
  18.113 +%
  18.114 +%       \@SaveMath is called in the very begining of toplevel macros
  18.115 +%       which are \infer and \deduce.
  18.116 +%       \@RestoreMath is called in the very last before toplevel macros end.
  18.117 +%       Remark \infer and \deduce ends calling \@infer.
  18.118 +
  18.119 +%\def\@SaveMath{\@MathSavedfalse \ifmmode \ifinner
  18.120 +%        \relax $\relax \@MathSavedtrue \fi\fi }
  18.121 +%
  18.122 +%\def\@RestoreMath{\if@MathSaved \relax $\relax\fi }
  18.123 +
  18.124 +\def\@SaveMath{\relax}
  18.125 +\def\@RestoreMath{\relax}
  18.126 +
  18.127 +
  18.128 +%       Macros
  18.129 +
  18.130 +\def\@ifEmpty#1#2#3{\def\@tempa{\@empty}\def\@tempb{#1}\relax
  18.131 +        \ifx \@tempa \@tempb #2\else #3\fi }
  18.132 +
  18.133 +\def\infer{\@SaveMath \@ifnextchar *{\@inferSteps}{\@inferOneStep}}
  18.134 +
  18.135 +\def\@inferOneStep{\@inferRuletrue
  18.136 +        \@ifnextchar [{\@infer}{\@infer[\@empty]}}
  18.137 +
  18.138 +\def\@inferSteps*{\@ifnextchar [{\@@inferSteps}{\@@inferSteps[\@empty]}}
  18.139 +
  18.140 +\def\@@inferSteps[#1]{\@deduce{#1}[\DeduceSym]}
  18.141 +
  18.142 +\def\deduce{\@SaveMath \@ifnextchar [{\@deduce{\@empty}}
  18.143 +        {\@inferRulefalse \@infer[\@empty]}}
  18.144 +
  18.145 +%       \@deduce<Proof Label>[<Proof>]<Lower><Uppers>
  18.146 +
  18.147 +\def\@deduce#1[#2]#3#4{\@inferRulefalse
  18.148 +        \@infer[\@empty]{#3}{\@SaveMath \@infer[{#1}]{#2}{#4}}}
  18.149 +
  18.150 +%       \@infer[<Label>]<Lower><Uppers>
  18.151 +%               If \@inferRuletrue, draws a rule and <Label> is right to
  18.152 +%               a rule.
  18.153 +%               Otherwise, draws no rule and <Label> is right to <Lower>.
  18.154 +
  18.155 +\def\@infer[#1]#2#3{\relax
  18.156 +% Get parameters
  18.157 +        \if@ReturnLeftOffset \else \@SavedLeftOffset=\@LeftOffset \fi
  18.158 +        \setbox\@LabelPart=\hbox{$#1$}\relax
  18.159 +        \setbox\@LowerPart=\hbox{$#2$}\relax
  18.160 +%
  18.161 +        \global\@LeftOffset=0pt
  18.162 +        \setbox\@UpperPart=\vbox{\tabskip=0pt \halign{\relax
  18.163 +                \global\@RightOffset=0pt \@ReturnLeftOffsettrue $##$&&
  18.164 +                \inferTabSkip
  18.165 +                \global\@RightOffset=0pt \@ReturnLeftOffsetfalse $##$\cr
  18.166 +                #3\cr}}\relax
  18.167 +%                       Here is a little trick.
  18.168 +%                       \@ReturnLeftOffsettrue(false) influences on \infer or
  18.169 +%                       \deduce placed in ## locally
  18.170 +%                       because of \@SaveMath and \@RestoreMath.
  18.171 +        \UpperLeftOffset=\@LeftOffset
  18.172 +        \UpperRightOffset=\@RightOffset
  18.173 +% Calculate Adjustments
  18.174 +        \LowerWidth=\wd\@LowerPart
  18.175 +        \LowerHeight=\ht\@LowerPart
  18.176 +        \LowerCenter=0.5\LowerWidth
  18.177 +%
  18.178 +        \UpperWidth=\wd\@UpperPart \advance\UpperWidth by -\UpperLeftOffset
  18.179 +        \advance\UpperWidth by -\UpperRightOffset
  18.180 +        \UpperCenter=\UpperLeftOffset
  18.181 +        \advance\UpperCenter by 0.5\UpperWidth
  18.182 +%
  18.183 +        \ifdim \UpperWidth > \LowerWidth
  18.184 +                % \UpperCenter > \LowerCenter
  18.185 +        \UpperAdjust=0pt
  18.186 +        \RuleAdjust=\UpperLeftOffset
  18.187 +        \LowerAdjust=\UpperCenter \advance\LowerAdjust by -\LowerCenter
  18.188 +        \RuleWidth=\UpperWidth
  18.189 +        \global\@LeftOffset=\LowerAdjust
  18.190 +%
  18.191 +        \else   % \UpperWidth <= \LowerWidth
  18.192 +        \ifdim \UpperCenter > \LowerCenter
  18.193 +%
  18.194 +        \UpperAdjust=0pt
  18.195 +        \RuleAdjust=\UpperCenter \advance\RuleAdjust by -\LowerCenter
  18.196 +        \LowerAdjust=\RuleAdjust
  18.197 +        \RuleWidth=\LowerWidth
  18.198 +        \global\@LeftOffset=\LowerAdjust
  18.199 +%
  18.200 +        \else   % \UpperWidth <= \LowerWidth
  18.201 +                % \UpperCenter <= \LowerCenter
  18.202 +%
  18.203 +        \UpperAdjust=\LowerCenter \advance\UpperAdjust by -\UpperCenter
  18.204 +        \RuleAdjust=0pt
  18.205 +        \LowerAdjust=0pt
  18.206 +        \RuleWidth=\LowerWidth
  18.207 +        \global\@LeftOffset=0pt
  18.208 +%
  18.209 +        \fi\fi
  18.210 +% Make a box
  18.211 +        \if@inferRule
  18.212 +%
  18.213 +        \setbox\ResultBox=\vbox{
  18.214 +                \moveright \UpperAdjust \box\@UpperPart
  18.215 +                \nointerlineskip \kern\inferLineSkip
  18.216 +                \moveright \RuleAdjust \vbox{\hrule width\RuleWidth}\relax
  18.217 +                \nointerlineskip \kern\inferLineSkip
  18.218 +                \moveright \LowerAdjust \box\@LowerPart }\relax
  18.219 +%
  18.220 +        \@ifEmpty{#1}{}{\relax
  18.221 +%
  18.222 +        \HLabelAdjust=\wd\ResultBox     \advance\HLabelAdjust by -\RuleAdjust
  18.223 +        \advance\HLabelAdjust by -\RuleWidth
  18.224 +        \WidthAdjust=\HLabelAdjust
  18.225 +        \advance\WidthAdjust by -\inferLabelSkip
  18.226 +        \advance\WidthAdjust by -\wd\@LabelPart
  18.227 +        \ifdim \WidthAdjust < 0pt \WidthAdjust=0pt \fi
  18.228 +%
  18.229 +        \VLabelAdjust=\dp\@LabelPart
  18.230 +        \advance\VLabelAdjust by -\ht\@LabelPart
  18.231 +        \VLabelAdjust=0.5\VLabelAdjust  \advance\VLabelAdjust by \LowerHeight
  18.232 +        \advance\VLabelAdjust by \inferLineSkip
  18.233 +%
  18.234 +        \setbox\ResultBox=\hbox{\box\ResultBox
  18.235 +                \kern -\HLabelAdjust \kern\inferLabelSkip
  18.236 +                \raise\VLabelAdjust \box\@LabelPart \kern\WidthAdjust}\relax
  18.237 +%
  18.238 +        }\relax % end @ifEmpty
  18.239 +%
  18.240 +        \else % \@inferRulefalse
  18.241 +%
  18.242 +        \setbox\ResultBox=\vbox{
  18.243 +                \moveright \UpperAdjust \box\@UpperPart
  18.244 +                \nointerlineskip \kern\inferLineSkip
  18.245 +                \moveright \LowerAdjust \hbox{\unhbox\@LowerPart
  18.246 +                        \@ifEmpty{#1}{}{\relax
  18.247 +                        \kern\inferLabelSkip \unhbox\@LabelPart}}}\relax
  18.248 +        \fi
  18.249 +%
  18.250 +        \global\@RightOffset=\wd\ResultBox
  18.251 +        \global\advance\@RightOffset by -\@LeftOffset
  18.252 +        \global\advance\@RightOffset by -\LowerWidth
  18.253 +        \if@ReturnLeftOffset \else \global\@LeftOffset=\@SavedLeftOffset \fi
  18.254 +%
  18.255 +        \box\ResultBox
  18.256 +        \@RestoreMath
  18.257 +}
    19.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.2 +++ b/src/HOL/Isar_Examples/document/root.bib	Tue Oct 20 19:37:09 2009 +0200
    19.3 @@ -0,0 +1,91 @@
    19.4 +
    19.5 +@string{CUCL="Comp. Lab., Univ. Camb."}
    19.6 +@string{CUP="Cambridge University Press"}
    19.7 +@string{Springer="Springer-Verlag"}
    19.8 +@string{TUM="TU Munich"}
    19.9 +
   19.10 +@Book{Concrete-Math,
   19.11 +  author = 	 {R. L. Graham and D. E. Knuth and O. Patashnik},
   19.12 +  title = 	 {Concrete Mathematics},
   19.13 +  publisher = 	 {Addison-Wesley},
   19.14 +  year = 	 1989
   19.15 +}
   19.16 +
   19.17 +@InProceedings{Naraschewski-Wenzel:1998:HOOL,
   19.18 +  author	= {Wolfgang Naraschewski and Markus Wenzel},
   19.19 +  title		= {Object-Oriented Verification based on Record Subtyping in
   19.20 +                  {H}igher-{O}rder {L}ogic},
   19.21 +  crossref      = {tphols98}}
   19.22 +
   19.23 +@Article{Nipkow:1998:Winskel,
   19.24 +  author = 	 {Tobias Nipkow},
   19.25 +  title = 	 {Winskel is (almost) Right: Towards a Mechanized Semantics Textbook},
   19.26 +  journal = 	 {Formal Aspects of Computing},
   19.27 +  year = 	 1998,
   19.28 +  volume =	 10,
   19.29 +  pages =	 {171--186}
   19.30 +}
   19.31 +
   19.32 +@InProceedings{Wenzel:1999:TPHOL,
   19.33 +  author = 	 {Markus Wenzel},
   19.34 +  title = 	 {{Isar} --- a Generic Interpretative Approach to Readable Formal Proof Documents},
   19.35 +  crossref =     {tphols99}}
   19.36 +
   19.37 +@Book{Winskel:1993,
   19.38 +  author = 	 {G. Winskel},
   19.39 +  title = 	 {The Formal Semantics of Programming Languages},
   19.40 +  publisher = 	 {MIT Press},
   19.41 +  year = 	 1993
   19.42 +}
   19.43 +
   19.44 +@Book{davey-priestley,
   19.45 +  author	= {B. A. Davey and H. A. Priestley},
   19.46 +  title		= {Introduction to Lattices and Order},
   19.47 +  publisher	= CUP,
   19.48 +  year		= 1990}
   19.49 +
   19.50 +@manual{isabelle-HOL,
   19.51 +  author	= {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel},
   19.52 +  title		= {{Isabelle}'s Logics: {HOL}},
   19.53 +  institution	= {Institut f\"ur Informatik, Technische Universi\"at
   19.54 +                  M\"unchen and Computer Laboratory, University of Cambridge}}
   19.55 +
   19.56 +@manual{isabelle-intro,
   19.57 +  author	= {Lawrence C. Paulson},
   19.58 +  title		= {Introduction to {Isabelle}},
   19.59 +  institution	= CUCL}
   19.60 +
   19.61 +@manual{isabelle-isar-ref,
   19.62 +  author	= {Markus Wenzel},
   19.63 +  title		= {The {Isabelle/Isar} Reference Manual},
   19.64 +  institution	= TUM}
   19.65 +
   19.66 +@manual{isabelle-ref,
   19.67 +  author	= {Lawrence C. Paulson},
   19.68 +  title		= {The {Isabelle} Reference Manual},
   19.69 +  institution	= CUCL}
   19.70 +
   19.71 +@TechReport{paulson-mutilated-board,
   19.72 +  author = 	 {Lawrence C. Paulson},
   19.73 +  title = 	 {A Simple Formalization and Proof for the Mutilated Chess Board},
   19.74 +  institution =  CUCL,
   19.75 +  year = 	 1996,
   19.76 +  number =	 394,
   19.77 +  note = {\url{http://www.cl.cam.ac.uk/users/lcp/papers/Reports/mutil.pdf}}
   19.78 +}
   19.79 +
   19.80 +@Proceedings{tphols98,
   19.81 +  title		= {Theorem Proving in Higher Order Logics: {TPHOLs} '98},
   19.82 +  booktitle	= {Theorem Proving in Higher Order Logics: {TPHOLs} '98},
   19.83 +  editor	= {Jim Grundy and Malcom Newey},
   19.84 +  series	= {LNCS},
   19.85 +  volume        = 1479,
   19.86 +  year		= 1998}
   19.87 +
   19.88 +@Proceedings{tphols99,
   19.89 +  title		= {Theorem Proving in Higher Order Logics: {TPHOLs} '99},
   19.90 +  booktitle	= {Theorem Proving in Higher Order Logics: {TPHOLs} '99},
   19.91 +  editor	= {Bertot, Y. and Dowek, G. and Hirschowitz, A. and
   19.92 +                  Paulin, C. and Thery, L.},
   19.93 +  series	= {LNCS 1690},
   19.94 +  year		= 1999}
    20.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.2 +++ b/src/HOL/Isar_Examples/document/root.tex	Tue Oct 20 19:37:09 2009 +0200
    20.3 @@ -0,0 +1,30 @@
    20.4 +\input{style}
    20.5 +
    20.6 +\hyphenation{Isabelle}
    20.7 +
    20.8 +\begin{document}
    20.9 +
   20.10 +\title{Miscellaneous Isabelle/Isar examples for Higher-Order Logic}
   20.11 +\author{Markus Wenzel \\ \url{http://www.in.tum.de/~wenzelm/} \\[2ex]
   20.12 +  With contributions by Gertrud Bauer and Tobias Nipkow}
   20.13 +\maketitle
   20.14 +
   20.15 +\begin{abstract}
   20.16 +  Isar offers a high-level proof (and theory) language for Isabelle.
   20.17 +  We give various examples of Isabelle/Isar proof developments,
   20.18 +  ranging from simple demonstrations of certain language features to a
   20.19 +  bit more advanced applications.  The ``real'' applications of
   20.20 +  Isabelle/Isar are found elsewhere.
   20.21 +\end{abstract}
   20.22 +
   20.23 +\tableofcontents
   20.24 +
   20.25 +\parindent 0pt \parskip 0.5ex
   20.26 +
   20.27 +\input{session}
   20.28 +
   20.29 +\nocite{isabelle-isar-ref,Wenzel:1999:TPHOL}
   20.30 +\bibliographystyle{abbrv}
   20.31 +\bibliography{root}
   20.32 +
   20.33 +\end{document}
    21.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.2 +++ b/src/HOL/Isar_Examples/document/style.tex	Tue Oct 20 19:37:09 2009 +0200
    21.3 @@ -0,0 +1,40 @@
    21.4 +\documentclass[11pt,a4paper]{article}
    21.5 +\usepackage[only,bigsqcap]{stmaryrd}
    21.6 +\usepackage{ifthen,proof,amssymb,isabelle,isabellesym}
    21.7 +\isabellestyle{it}
    21.8 +\usepackage{pdfsetup}\urlstyle{rm}
    21.9 +
   21.10 +\renewcommand{\isamarkupheader}[1]{\section{#1}}
   21.11 +
   21.12 +\renewcommand{\isacommand}[1]
   21.13 +{\ifthenelse{\equal{sorry}{#1}}{$\;$\dummyproof}
   21.14 +  {\ifthenelse{\equal{oops}{#1}}{$\vdots$}{\isakeyword{#1}}}}
   21.15 +
   21.16 +\newcommand{\DUMMYPROOF}{{\langle\idt{proof}\rangle}}
   21.17 +\newcommand{\dummyproof}{$\DUMMYPROOF$}
   21.18 +
   21.19 +\newcommand{\name}[1]{\textsl{#1}}
   21.20 +
   21.21 +\newcommand{\idt}[1]{{\mathord{\mathit{#1}}}}
   21.22 +\newcommand{\var}[1]{{?\!\idt{#1}}}
   21.23 +\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
   21.24 +\newcommand{\dsh}{\dshsym}
   21.25 +
   21.26 +\newcommand{\To}{\to}
   21.27 +\newcommand{\dt}{{\mathpunct.}}
   21.28 +\newcommand{\ap}{\mathbin{\!}}
   21.29 +\newcommand{\lam}[1]{\mathop{\lambda} #1\dt\;}
   21.30 +\newcommand{\all}[1]{\forall #1\dt\;}
   21.31 +\newcommand{\ex}[1]{\exists #1\dt\;}
   21.32 +\newcommand{\impl}{\to}
   21.33 +\newcommand{\conj}{\land}
   21.34 +\newcommand{\disj}{\lor}
   21.35 +\newcommand{\Impl}{\Longrightarrow}
   21.36 +
   21.37 +\newcommand{\Nat}{\mathord{\mathrm{I}\mkern-3.8mu\mathrm{N}}}
   21.38 +
   21.39 +
   21.40 +%%% Local Variables: 
   21.41 +%%% mode: latex
   21.42 +%%% TeX-master: "root"
   21.43 +%%% End: 
    22.1 --- a/src/HOL/Isar_examples/Basic_Logic.thy	Tue Oct 20 19:36:52 2009 +0200
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,448 +0,0 @@
    22.4 -(*  Title:      HOL/Isar_examples/Basic_Logic.thy
    22.5 -    Author:     Markus Wenzel, TU Muenchen
    22.6 -
    22.7 -Basic propositional and quantifier reasoning.
    22.8 -*)
    22.9 -
   22.10 -header {* Basic logical reasoning *}
   22.11 -
   22.12 -theory Basic_Logic
   22.13 -imports Main
   22.14 -begin
   22.15 -
   22.16 -
   22.17 -subsection {* Pure backward reasoning *}
   22.18 -
   22.19 -text {*
   22.20 -  In order to get a first idea of how Isabelle/Isar proof documents
   22.21 -  may look like, we consider the propositions @{text I}, @{text K},
   22.22 -  and @{text S}.  The following (rather explicit) proofs should
   22.23 -  require little extra explanations.
   22.24 -*}
   22.25 -
   22.26 -lemma I: "A --> A"
   22.27 -proof
   22.28 -  assume A
   22.29 -  show A by fact
   22.30 -qed
   22.31 -
   22.32 -lemma K: "A --> B --> A"
   22.33 -proof
   22.34 -  assume A
   22.35 -  show "B --> A"
   22.36 -  proof
   22.37 -    show A by fact
   22.38 -  qed
   22.39 -qed
   22.40 -
   22.41 -lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
   22.42 -proof
   22.43 -  assume "A --> B --> C"
   22.44 -  show "(A --> B) --> A --> C"
   22.45 -  proof
   22.46 -    assume "A --> B"
   22.47 -    show "A --> C"
   22.48 -    proof
   22.49 -      assume A
   22.50 -      show C
   22.51 -      proof (rule mp)
   22.52 -        show "B --> C" by (rule mp) fact+
   22.53 -        show B by (rule mp) fact+
   22.54 -      qed
   22.55 -    qed
   22.56 -  qed
   22.57 -qed
   22.58 -
   22.59 -text {*
   22.60 -  Isar provides several ways to fine-tune the reasoning, avoiding
   22.61 -  excessive detail.  Several abbreviated language elements are
   22.62 -  available, enabling the writer to express proofs in a more concise
   22.63 -  way, even without referring to any automated proof tools yet.
   22.64 -
   22.65 -  First of all, proof by assumption may be abbreviated as a single
   22.66 -  dot.
   22.67 -*}
   22.68 -
   22.69 -lemma "A --> A"
   22.70 -proof
   22.71 -  assume A
   22.72 -  show A by fact+
   22.73 -qed
   22.74 -
   22.75 -text {*
   22.76 -  In fact, concluding any (sub-)proof already involves solving any
   22.77 -  remaining goals by assumption\footnote{This is not a completely
   22.78 -  trivial operation, as proof by assumption may involve full
   22.79 -  higher-order unification.}.  Thus we may skip the rather vacuous
   22.80 -  body of the above proof as well.
   22.81 -*}
   22.82 -
   22.83 -lemma "A --> A"
   22.84 -proof
   22.85 -qed
   22.86 -
   22.87 -text {*
   22.88 -  Note that the \isacommand{proof} command refers to the @{text rule}
   22.89 -  method (without arguments) by default.  Thus it implicitly applies a
   22.90 -  single rule, as determined from the syntactic form of the statements
   22.91 -  involved.  The \isacommand{by} command abbreviates any proof with
   22.92 -  empty body, so the proof may be further pruned.
   22.93 -*}
   22.94 -
   22.95 -lemma "A --> A"
   22.96 -  by rule
   22.97 -
   22.98 -text {*
   22.99 -  Proof by a single rule may be abbreviated as double-dot.
  22.100 -*}
  22.101 -
  22.102 -lemma "A --> A" ..
  22.103 -
  22.104 -text {*
  22.105 -  Thus we have arrived at an adequate representation of the proof of a
  22.106 -  tautology that holds by a single standard rule.\footnote{Apparently,
  22.107 -  the rule here is implication introduction.}
  22.108 -*}
  22.109 -
  22.110 -text {*
  22.111 -  Let us also reconsider @{text K}.  Its statement is composed of
  22.112 -  iterated connectives.  Basic decomposition is by a single rule at a
  22.113 -  time, which is why our first version above was by nesting two
  22.114 -  proofs.
  22.115 -
  22.116 -  The @{text intro} proof method repeatedly decomposes a goal's
  22.117 -  conclusion.\footnote{The dual method is @{text elim}, acting on a
  22.118 -  goal's premises.}
  22.119 -*}
  22.120 -
  22.121 -lemma "A --> B --> A"
  22.122 -proof (intro impI)
  22.123 -  assume A
  22.124 -  show A by fact
  22.125 -qed
  22.126 -
  22.127 -text {*
  22.128 -  Again, the body may be collapsed.
  22.129 -*}
  22.130 -
  22.131 -lemma "A --> B --> A"
  22.132 -  by (intro impI)
  22.133 -
  22.134 -text {*
  22.135 -  Just like @{text rule}, the @{text intro} and @{text elim} proof
  22.136 -  methods pick standard structural rules, in case no explicit
  22.137 -  arguments are given.  While implicit rules are usually just fine for
  22.138 -  single rule application, this may go too far with iteration.  Thus
  22.139 -  in practice, @{text intro} and @{text elim} would be typically
  22.140 -  restricted to certain structures by giving a few rules only, e.g.\
  22.141 -  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
  22.142 -  and universal quantifiers.
  22.143 -
  22.144 -  Such well-tuned iterated decomposition of certain structures is the
  22.145 -  prime application of @{text intro} and @{text elim}.  In contrast,
  22.146 -  terminal steps that solve a goal completely are usually performed by
  22.147 -  actual automated proof methods (such as \isacommand{by}~@{text
  22.148 -  blast}.
  22.149 -*}
  22.150 -
  22.151 -
  22.152 -subsection {* Variations of backward vs.\ forward reasoning *}
  22.153 -
  22.154 -text {*
  22.155 -  Certainly, any proof may be performed in backward-style only.  On
  22.156 -  the other hand, small steps of reasoning are often more naturally
  22.157 -  expressed in forward-style.  Isar supports both backward and forward
  22.158 -  reasoning as a first-class concept.  In order to demonstrate the
  22.159 -  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
  22.160 -
  22.161 -  The first version is purely backward.
  22.162 -*}
  22.163 -
  22.164 -lemma "A & B --> B & A"
  22.165 -proof
  22.166 -  assume "A & B"
  22.167 -  show "B & A"
  22.168 -  proof
  22.169 -    show B by (rule conjunct2) fact
  22.170 -    show A by (rule conjunct1) fact
  22.171 -  qed
  22.172 -qed
  22.173 -
  22.174 -text {*
  22.175 -  Above, the @{text "conjunct_1/2"} projection rules had to be named
  22.176 -  explicitly, since the goals @{text B} and @{text A} did not provide
  22.177 -  any structural clue.  This may be avoided using \isacommand{from} to
  22.178 -  focus on the @{text "A \<and> B"} assumption as the current facts,
  22.179 -  enabling the use of double-dot proofs.  Note that \isacommand{from}
  22.180 -  already does forward-chaining, involving the \name{conjE} rule here.
  22.181 -*}
  22.182 -
  22.183 -lemma "A & B --> B & A"
  22.184 -proof
  22.185 -  assume "A & B"
  22.186 -  show "B & A"
  22.187 -  proof
  22.188 -    from `A & B` show B ..
  22.189 -    from `A & B` show A ..
  22.190 -  qed
  22.191 -qed
  22.192 -
  22.193 -text {*
  22.194 -  In the next version, we move the forward step one level upwards.
  22.195 -  Forward-chaining from the most recent facts is indicated by the
  22.196 -  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
  22.197 -  @{text "A \<and> B"} actually becomes an elimination, rather than an
  22.198 -  introduction.  The resulting proof structure directly corresponds to
  22.199 -  that of the @{text conjE} rule, including the repeated goal
  22.200 -  proposition that is abbreviated as @{text ?thesis} below.
  22.201 -*}
  22.202 -
  22.203 -lemma "A & B --> B & A"
  22.204 -proof
  22.205 -  assume "A & B"
  22.206 -  then show "B & A"
  22.207 -  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
  22.208 -    assume B A
  22.209 -    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
  22.210 -  qed
  22.211 -qed
  22.212 -
  22.213 -text {*
  22.214 -  In the subsequent version we flatten the structure of the main body
  22.215 -  by doing forward reasoning all the time.  Only the outermost
  22.216 -  decomposition step is left as backward.
  22.217 -*}
  22.218 -
  22.219 -lemma "A & B --> B & A"
  22.220 -proof
  22.221 -  assume "A & B"
  22.222 -  from `A & B` have A ..
  22.223 -  from `A & B` have B ..
  22.224 -  from `B` `A` show "B & A" ..
  22.225 -qed
  22.226 -
  22.227 -text {*
  22.228 -  We can still push forward-reasoning a bit further, even at the risk
  22.229 -  of getting ridiculous.  Note that we force the initial proof step to
  22.230 -  do nothing here, by referring to the ``-'' proof method.
  22.231 -*}
  22.232 -
  22.233 -lemma "A & B --> B & A"
  22.234 -proof -
  22.235 -  {
  22.236 -    assume "A & B"
  22.237 -    from `A & B` have A ..
  22.238 -    from `A & B` have B ..
  22.239 -    from `B` `A` have "B & A" ..
  22.240 -  }
  22.241 -  then show ?thesis ..         -- {* rule \name{impI} *}
  22.242 -qed
  22.243 -
  22.244 -text {*
  22.245 -  \medskip With these examples we have shifted through a whole range
  22.246 -  from purely backward to purely forward reasoning.  Apparently, in
  22.247 -  the extreme ends we get slightly ill-structured proofs, which also
  22.248 -  require much explicit naming of either rules (backward) or local
  22.249 -  facts (forward).
  22.250 -
  22.251 -  The general lesson learned here is that good proof style would
  22.252 -  achieve just the \emph{right} balance of top-down backward
  22.253 -  decomposition, and bottom-up forward composition.  In general, there
  22.254 -  is no single best way to arrange some pieces of formal reasoning, of
  22.255 -  course.  Depending on the actual applications, the intended audience
  22.256 -  etc., rules (and methods) on the one hand vs.\ facts on the other
  22.257 -  hand have to be emphasized in an appropriate way.  This requires the
  22.258 -  proof writer to develop good taste, and some practice, of course.
  22.259 -*}
  22.260 -
  22.261 -text {*
  22.262 -  For our example the most appropriate way of reasoning is probably
  22.263 -  the middle one, with conjunction introduction done after
  22.264 -  elimination.
  22.265 -*}
  22.266 -
  22.267 -lemma "A & B --> B & A"
  22.268 -proof
  22.269 -  assume "A & B"
  22.270 -  then show "B & A"
  22.271 -  proof
  22.272 -    assume B A
  22.273 -    then show ?thesis ..
  22.274 -  qed
  22.275 -qed
  22.276 -
  22.277 -
  22.278 -
  22.279 -subsection {* A few examples from ``Introduction to Isabelle'' *}
  22.280 -
  22.281 -text {*
  22.282 -  We rephrase some of the basic reasoning examples of
  22.283 -  \cite{isabelle-intro}, using HOL rather than FOL.
  22.284 -*}
  22.285 -
  22.286 -subsubsection {* A propositional proof *}
  22.287 -
  22.288 -text {*
  22.289 -  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
  22.290 -  involves forward-chaining from @{text "P \<or> P"}, followed by an
  22.291 -  explicit case-analysis on the two \emph{identical} cases.
  22.292 -*}
  22.293 -
  22.294 -lemma "P | P --> P"
  22.295 -proof
  22.296 -  assume "P | P"
  22.297 -  then show P
  22.298 -  proof                    -- {*
  22.299 -    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
  22.300 -  *}
  22.301 -    assume P show P by fact
  22.302 -  next
  22.303 -    assume P show P by fact
  22.304 -  qed
  22.305 -qed
  22.306 -
  22.307 -text {*
  22.308 -  Case splits are \emph{not} hardwired into the Isar language as a
  22.309 -  special feature.  The \isacommand{next} command used to separate the
  22.310 -  cases above is just a short form of managing block structure.
  22.311 -
  22.312 -  \medskip In general, applying proof methods may split up a goal into
  22.313 -  separate ``cases'', i.e.\ new subgoals with individual local
  22.314 -  assumptions.  The corresponding proof text typically mimics this by
  22.315 -  establishing results in appropriate contexts, separated by blocks.
  22.316 -
  22.317 -  In order to avoid too much explicit parentheses, the Isar system
  22.318 -  implicitly opens an additional block for any new goal, the
  22.319 -  \isacommand{next} statement then closes one block level, opening a
  22.320 -  new one.  The resulting behavior is what one would expect from
  22.321 -  separating cases, only that it is more flexible.  E.g.\ an induction
  22.322 -  base case (which does not introduce local assumptions) would
  22.323 -  \emph{not} require \isacommand{next} to separate the subsequent step
  22.324 -  case.
  22.325 -
  22.326 -  \medskip In our example the situation is even simpler, since the two
  22.327 -  cases actually coincide.  Consequently the proof may be rephrased as
  22.328 -  follows.
  22.329 -*}
  22.330 -
  22.331 -lemma "P | P --> P"
  22.332 -proof
  22.333 -  assume "P | P"
  22.334 -  then show P
  22.335 -  proof
  22.336 -    assume P
  22.337 -    show P by fact
  22.338 -    show P by fact
  22.339 -  qed
  22.340 -qed
  22.341 -
  22.342 -text {*
  22.343 -  Again, the rather vacuous body of the proof may be collapsed.  Thus
  22.344 -  the case analysis degenerates into two assumption steps, which are
  22.345 -  implicitly performed when concluding the single rule step of the
  22.346 -  double-dot proof as follows.
  22.347 -*}
  22.348 -
  22.349 -lemma "P | P --> P"
  22.350 -proof
  22.351 -  assume "P | P"
  22.352 -  then show P ..
  22.353 -qed
  22.354 -
  22.355 -
  22.356 -subsubsection {* A quantifier proof *}
  22.357 -
  22.358 -text {*
  22.359 -  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
  22.360 -  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
  22.361 -  with @{text "P (f a)"} may be taken as a witness for the second
  22.362 -  existential statement.
  22.363 -
  22.364 -  The first proof is rather verbose, exhibiting quite a lot of
  22.365 -  (redundant) detail.  It gives explicit rules, even with some
  22.366 -  instantiation.  Furthermore, we encounter two new language elements:
  22.367 -  the \isacommand{fix} command augments the context by some new
  22.368 -  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
  22.369 -  binds term abbreviations by higher-order pattern matching.
  22.370 -*}
  22.371 -
  22.372 -lemma "(EX x. P (f x)) --> (EX y. P y)"
  22.373 -proof
  22.374 -  assume "EX x. P (f x)"
  22.375 -  then show "EX y. P y"
  22.376 -  proof (rule exE)             -- {*
  22.377 -    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
  22.378 -  *}
  22.379 -    fix a
  22.380 -    assume "P (f a)" (is "P ?witness")
  22.381 -    then show ?thesis by (rule exI [of P ?witness])
  22.382 -  qed
  22.383 -qed
  22.384 -
  22.385 -text {*
  22.386 -  While explicit rule instantiation may occasionally improve
  22.387 -  readability of certain aspects of reasoning, it is usually quite
  22.388 -  redundant.  Above, the basic proof outline gives already enough
  22.389 -  structural clues for the system to infer both the rules and their
  22.390 -  instances (by higher-order unification).  Thus we may as well prune
  22.391 -  the text as follows.
  22.392 -*}
  22.393 -
  22.394 -lemma "(EX x. P (f x)) --> (EX y. P y)"
  22.395 -proof
  22.396 -  assume "EX x. P (f x)"
  22.397 -  then show "EX y. P y"
  22.398 -  proof
  22.399 -    fix a
  22.400 -    assume "P (f a)"
  22.401 -    then show ?thesis ..
  22.402 -  qed
  22.403 -qed
  22.404 -
  22.405 -text {*
  22.406 -  Explicit @{text \<exists>}-elimination as seen above can become quite
  22.407 -  cumbersome in practice.  The derived Isar language element
  22.408 -  ``\isakeyword{obtain}'' provides a more handsome way to do
  22.409 -  generalized existence reasoning.
  22.410 -*}
  22.411 -
  22.412 -lemma "(EX x. P (f x)) --> (EX y. P y)"
  22.413 -proof
  22.414 -  assume "EX x. P (f x)"
  22.415 -  then obtain a where "P (f a)" ..
  22.416 -  then show "EX y. P y" ..
  22.417 -qed
  22.418 -
  22.419 -text {*
  22.420 -  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
  22.421 -  \isakeyword{assume} together with a soundness proof of the
  22.422 -  elimination involved.  Thus it behaves similar to any other forward
  22.423 -  proof element.  Also note that due to the nature of general
  22.424 -  existence reasoning involved here, any result exported from the
  22.425 -  context of an \isakeyword{obtain} statement may \emph{not} refer to
  22.426 -  the parameters introduced there.
  22.427 -*}
  22.428 -
  22.429 -
  22.430 -
  22.431 -subsubsection {* Deriving rules in Isabelle *}
  22.432 -
  22.433 -text {*
  22.434 -  We derive the conjunction elimination rule from the corresponding
  22.435 -  projections.  The proof is quite straight-forward, since
  22.436 -  Isabelle/Isar supports non-atomic goals and assumptions fully
  22.437 -  transparently.
  22.438 -*}
  22.439 -
  22.440 -theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
  22.441 -proof -
  22.442 -  assume "A & B"
  22.443 -  assume r: "A ==> B ==> C"
  22.444 -  show C
  22.445 -  proof (rule r)
  22.446 -    show A by (rule conjunct1) fact
  22.447 -    show B by (rule conjunct2) fact
  22.448 -  qed
  22.449 -qed
  22.450 -
  22.451 -end
    23.1 --- a/src/HOL/Isar_examples/Cantor.thy	Tue Oct 20 19:36:52 2009 +0200
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,71 +0,0 @@
    23.4 -(*  Title:      HOL/Isar_examples/Cantor.thy
    23.5 -    Author:     Markus Wenzel, TU Muenchen
    23.6 -*)
    23.7 -
    23.8 -header {* Cantor's Theorem *}
    23.9 -
   23.10 -theory Cantor
   23.11 -imports Main
   23.12 -begin
   23.13 -
   23.14 -text_raw {*
   23.15 -  \footnote{This is an Isar version of the final example of the
   23.16 -  Isabelle/HOL manual \cite{isabelle-HOL}.}
   23.17 -*}
   23.18 -
   23.19 -text {*
   23.20 -  Cantor's Theorem states that every set has more subsets than it has
   23.21 -  elements.  It has become a favorite basic example in pure
   23.22 -  higher-order logic since it is so easily expressed: \[\all{f::\alpha
   23.23 -  \To \alpha \To \idt{bool}} \ex{S::\alpha \To \idt{bool}}
   23.24 -  \all{x::\alpha} f \ap x \not= S\]
   23.25 -
   23.26 -  Viewing types as sets, $\alpha \To \idt{bool}$ represents the
   23.27 -  powerset of $\alpha$.  This version of the theorem states that for
   23.28 -  every function from $\alpha$ to its powerset, some subset is outside
   23.29 -  its range.  The Isabelle/Isar proofs below uses HOL's set theory,
   23.30 -  with the type $\alpha \ap \idt{set}$ and the operator
   23.31 -  $\idt{range}::(\alpha \To \beta) \To \beta \ap \idt{set}$.
   23.32 -*}
   23.33 -
   23.34 -theorem "EX S. S ~: range (f :: 'a => 'a set)"
   23.35 -proof
   23.36 -  let ?S = "{x. x ~: f x}"
   23.37 -  show "?S ~: range f"
   23.38 -  proof
   23.39 -    assume "?S : range f"
   23.40 -    then obtain y where "?S = f y" ..
   23.41 -    then show False
   23.42 -    proof (rule equalityCE)
   23.43 -      assume "y : f y"
   23.44 -      assume "y : ?S" then have "y ~: f y" ..
   23.45 -      with `y : f y` show ?thesis by contradiction
   23.46 -    next
   23.47 -      assume "y ~: ?S"
   23.48 -      assume "y ~: f y" then have "y : ?S" ..
   23.49 -      with `y ~: ?S` show ?thesis by contradiction
   23.50 -    qed
   23.51 -  qed
   23.52 -qed
   23.53 -
   23.54 -text {*
   23.55 -  How much creativity is required?  As it happens, Isabelle can prove
   23.56 -  this theorem automatically using best-first search.  Depth-first
   23.57 -  search would diverge, but best-first search successfully navigates
   23.58 -  through the large search space.  The context of Isabelle's classical
   23.59 -  prover contains rules for the relevant constructs of HOL's set
   23.60 -  theory.
   23.61 -*}
   23.62 -
   23.63 -theorem "EX S. S ~: range (f :: 'a => 'a set)"
   23.64 -  by best
   23.65 -
   23.66 -text {*
   23.67 -  While this establishes the same theorem internally, we do not get
   23.68 -  any idea of how the proof actually works.  There is currently no way
   23.69 -  to transform internal system-level representations of Isabelle
   23.70 -  proofs back into Isar text.  Writing intelligible proof documents
   23.71 -  really is a creative process, after all.
   23.72 -*}
   23.73 -
   23.74 -end
    24.1 --- a/src/HOL/Isar_examples/Drinker.thy	Tue Oct 20 19:36:52 2009 +0200
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,54 +0,0 @@
    24.4 -(*  Title:      HOL/Isar_examples/Drinker.thy
    24.5 -    Author:     Makarius
    24.6 -*)
    24.7 -
    24.8 -header {* The Drinker's Principle *}
    24.9 -
   24.10 -theory Drinker
   24.11 -imports Main
   24.12 -begin
   24.13 -
   24.14 -text {*
   24.15 -  Here is another example of classical reasoning: the Drinker's
   24.16 -  Principle says that for some person, if he is drunk, everybody else
   24.17 -  is drunk!
   24.18 -
   24.19 -  We first prove a classical part of de-Morgan's law.
   24.20 -*}
   24.21 -
   24.22 -lemma deMorgan:
   24.23 -  assumes "\<not> (\<forall>x. P x)"
   24.24 -  shows "\<exists>x. \<not> P x"
   24.25 -  using prems
   24.26 -proof (rule contrapos_np)
   24.27 -  assume a: "\<not> (\<exists>x. \<not> P x)"
   24.28 -  show "\<forall>x. P x"
   24.29 -  proof
   24.30 -    fix x
   24.31 -    show "P x"
   24.32 -    proof (rule classical)
   24.33 -      assume "\<not> P x"
   24.34 -      then have "\<exists>x. \<not> P x" ..
   24.35 -      with a show ?thesis by contradiction
   24.36 -    qed
   24.37 -  qed
   24.38 -qed
   24.39 -
   24.40 -theorem Drinker's_Principle: "\<exists>x. drunk x \<longrightarrow> (\<forall>x. drunk x)"
   24.41 -proof cases
   24.42 -  fix a assume "\<forall>x. drunk x"
   24.43 -  then have "drunk a \<longrightarrow> (\<forall>x. drunk x)" ..
   24.44 -  then show ?thesis ..
   24.45 -next
   24.46 -  assume "\<not> (\<forall>x. drunk x)"
   24.47 -  then have "\<exists>x. \<not> drunk x" by (rule deMorgan)
   24.48 -  then obtain a where a: "\<not> drunk a" ..
   24.49 -  have "drunk a \<longrightarrow> (\<forall>x. drunk x)"
   24.50 -  proof
   24.51 -    assume "drunk a"
   24.52 -    with a show "\<forall>x. drunk x" by (contradiction)
   24.53 -  qed
   24.54 -  then show ?thesis ..
   24.55 -qed
   24.56 -
   24.57 -end
    25.1 --- a/src/HOL/Isar_examples/Expr_Compiler.thy	Tue Oct 20 19:36:52 2009 +0200
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,231 +0,0 @@
    25.4 -(*  Title:      HOL/Isar_examples/Expr_Compiler.thy
    25.5 -    Author:     Markus Wenzel, TU Muenchen
    25.6 -
    25.7 -Correctness of a simple expression/stack-machine compiler.
    25.8 -*)
    25.9 -
   25.10 -header {* Correctness of a simple expression compiler *}
   25.11 -
   25.12 -theory Expr_Compiler
   25.13 -imports Main
   25.14 -begin
   25.15 -
   25.16 -text {*
   25.17 - This is a (rather trivial) example of program verification.  We model
   25.18 - a compiler for translating expressions to stack machine instructions,
   25.19 - and prove its correctness wrt.\ some evaluation semantics.
   25.20 -*}
   25.21 -
   25.22 -
   25.23 -subsection {* Binary operations *}
   25.24 -
   25.25 -text {*
   25.26 - Binary operations are just functions over some type of values.  This
   25.27 - is both for abstract syntax and semantics, i.e.\ we use a ``shallow
   25.28 - embedding'' here.
   25.29 -*}
   25.30 -
   25.31 -types
   25.32 -  'val binop = "'val => 'val => 'val"
   25.33 -
   25.34 -
   25.35 -subsection {* Expressions *}
   25.36 -
   25.37 -text {*
   25.38 - The language of expressions is defined as an inductive type,
   25.39 - consisting of variables, constants, and binary operations on
   25.40 - expressions.
   25.41 -*}
   25.42 -
   25.43 -datatype ('adr, 'val) expr =
   25.44 -  Variable 'adr |
   25.45 -  Constant 'val |
   25.46 -  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
   25.47 -
   25.48 -text {*
   25.49 - Evaluation (wrt.\ some environment of variable assignments) is
   25.50 - defined by primitive recursion over the structure of expressions.
   25.51 -*}
   25.52 -
   25.53 -consts
   25.54 -  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
   25.55 -
   25.56 -primrec
   25.57 -  "eval (Variable x) env = env x"
   25.58 -  "eval (Constant c) env = c"
   25.59 -  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
   25.60 -
   25.61 -
   25.62 -subsection {* Machine *}
   25.63 -
   25.64 -text {*
   25.65 - Next we model a simple stack machine, with three instructions.
   25.66 -*}
   25.67 -
   25.68 -datatype ('adr, 'val) instr =
   25.69 -  Const 'val |
   25.70 -  Load 'adr |
   25.71 -  Apply "'val binop"
   25.72 -
   25.73 -text {*
   25.74 - Execution of a list of stack machine instructions is easily defined
   25.75 - as follows.
   25.76 -*}
   25.77 -
   25.78 -consts
   25.79 -  exec :: "(('adr, 'val) instr) list
   25.80 -    => 'val list => ('adr => 'val) => 'val list"
   25.81 -
   25.82 -primrec
   25.83 -  "exec [] stack env = stack"
   25.84 -  "exec (instr # instrs) stack env =
   25.85 -    (case instr of
   25.86 -      Const c => exec instrs (c # stack) env
   25.87 -    | Load x => exec instrs (env x # stack) env
   25.88 -    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
   25.89 -                   # (tl (tl stack))) env)"
   25.90 -
   25.91 -constdefs
   25.92 -  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
   25.93 -  "execute instrs env == hd (exec instrs [] env)"
   25.94 -
   25.95 -
   25.96 -subsection {* Compiler *}
   25.97 -
   25.98 -text {*
   25.99 - We are ready to define the compilation function of expressions to
  25.100 - lists of stack machine instructions.
  25.101 -*}
  25.102 -
  25.103 -consts
  25.104 -  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
  25.105 -
  25.106 -primrec
  25.107 -  "compile (Variable x) = [Load x]"
  25.108 -  "compile (Constant c) = [Const c]"
  25.109 -  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
  25.110 -
  25.111 -
  25.112 -text {*
  25.113 - The main result of this development is the correctness theorem for
  25.114 - $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
  25.115 - list append.
  25.116 -*}
  25.117 -
  25.118 -lemma exec_append:
  25.119 -  "exec (xs @ ys) stack env =
  25.120 -    exec ys (exec xs stack env) env"
  25.121 -proof (induct xs arbitrary: stack)
  25.122 -  case Nil
  25.123 -  show ?case by simp
  25.124 -next
  25.125 -  case (Cons x xs)
  25.126 -  show ?case
  25.127 -  proof (induct x)
  25.128 -    case Const
  25.129 -    from Cons show ?case by simp
  25.130 -  next
  25.131 -    case Load
  25.132 -    from Cons show ?case by simp
  25.133 -  next
  25.134 -    case Apply
  25.135 -    from Cons show ?case by simp
  25.136 -  qed
  25.137 -qed
  25.138 -
  25.139 -theorem correctness: "execute (compile e) env = eval e env"
  25.140 -proof -
  25.141 -  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
  25.142 -  proof (induct e)
  25.143 -    case Variable show ?case by simp
  25.144 -  next
  25.145 -    case Constant show ?case by simp
  25.146 -  next
  25.147 -    case Binop then show ?case by (simp add: exec_append)
  25.148 -  qed
  25.149 -  then show ?thesis by (simp add: execute_def)
  25.150 -qed
  25.151 -
  25.152 -
  25.153 -text {*
  25.154 - \bigskip In the proofs above, the \name{simp} method does quite a lot
  25.155 - of work behind the scenes (mostly ``functional program execution'').
  25.156 - Subsequently, the same reasoning is elaborated in detail --- at most
  25.157 - one recursive function definition is used at a time.  Thus we get a
  25.158 - better idea of what is actually going on.
  25.159 -*}
  25.160 -
  25.161 -lemma exec_append':
  25.162 -  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
  25.163 -proof (induct xs arbitrary: stack)
  25.164 -  case (Nil s)
  25.165 -  have "exec ([] @ ys) s env = exec ys s env" by simp
  25.166 -  also have "... = exec ys (exec [] s env) env" by simp
  25.167 -  finally show ?case .
  25.168 -next
  25.169 -  case (Cons x xs s)
  25.170 -  show ?case
  25.171 -  proof (induct x)
  25.172 -    case (Const val)
  25.173 -    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
  25.174 -      by simp
  25.175 -    also have "... = exec (xs @ ys) (val # s) env" by simp
  25.176 -    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
  25.177 -    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
  25.178 -    finally show ?case .
  25.179 -  next
  25.180 -    case (Load adr)
  25.181 -    from Cons show ?case by simp -- {* same as above *}
  25.182 -  next
  25.183 -    case (Apply fn)
  25.184 -    have "exec ((Apply fn # xs) @ ys) s env =
  25.185 -        exec (Apply fn # xs @ ys) s env" by simp
  25.186 -    also have "... =
  25.187 -        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
  25.188 -    also from Cons have "... =
  25.189 -        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
  25.190 -    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
  25.191 -    finally show ?case .
  25.192 -  qed
  25.193 -qed
  25.194 -
  25.195 -theorem correctness': "execute (compile e) env = eval e env"
  25.196 -proof -
  25.197 -  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
  25.198 -  proof (induct e)
  25.199 -    case (Variable adr s)
  25.200 -    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
  25.201 -      by simp
  25.202 -    also have "... = env adr # s" by simp
  25.203 -    also have "env adr = eval (Variable adr) env" by simp
  25.204 -    finally show ?case .
  25.205 -  next
  25.206 -    case (Constant val s)
  25.207 -    show ?case by simp -- {* same as above *}
  25.208 -  next
  25.209 -    case (Binop fn e1 e2 s)
  25.210 -    have "exec (compile (Binop fn e1 e2)) s env =
  25.211 -        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
  25.212 -    also have "... = exec [Apply fn]
  25.213 -        (exec (compile e1) (exec (compile e2) s env) env) env"
  25.214 -      by (simp only: exec_append)
  25.215 -    also have "exec (compile e2) s env = eval e2 env # s" by fact
  25.216 -    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
  25.217 -    also have "exec [Apply fn] ... env =
  25.218 -        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
  25.219 -    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
  25.220 -    also have "fn (eval e1 env) (eval e2 env) =
  25.221 -        eval (Binop fn e1 e2) env"
  25.222 -      by simp
  25.223 -    finally show ?case .
  25.224 -  qed
  25.225 -
  25.226 -  have "execute (compile e) env = hd (exec (compile e) [] env)"
  25.227 -    by (simp add: execute_def)
  25.228 -  also from exec_compile
  25.229 -    have "exec (compile e) [] env = [eval e env]" .
  25.230 -  also have "hd ... = eval e env" by simp
  25.231 -  finally show ?thesis .
  25.232 -qed
  25.233 -
  25.234 -end
    26.1 --- a/src/HOL/Isar_examples/Fibonacci.thy	Tue Oct 20 19:36:52 2009 +0200
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,165 +0,0 @@
    26.4 -(*  Title:      HOL/Isar_examples/Fibonacci.thy
    26.5 -    Author:     Gertrud Bauer
    26.6 -    Copyright   1999 Technische Universitaet Muenchen
    26.7 -
    26.8 -The Fibonacci function.  Demonstrates the use of recdef.  Original
    26.9 -tactic script by Lawrence C Paulson.
   26.10 -
   26.11 -Fibonacci numbers: proofs of laws taken from
   26.12 -
   26.13 -  R. L. Graham, D. E. Knuth, O. Patashnik.
   26.14 -  Concrete Mathematics.
   26.15 -  (Addison-Wesley, 1989)
   26.16 -*)
   26.17 -
   26.18 -header {* Fib and Gcd commute *}
   26.19 -
   26.20 -theory Fibonacci
   26.21 -imports Primes
   26.22 -begin
   26.23 -
   26.24 -text_raw {*
   26.25 - \footnote{Isar version by Gertrud Bauer.  Original tactic script by
   26.26 - Larry Paulson.  A few proofs of laws taken from
   26.27 - \cite{Concrete-Math}.}
   26.28 -*}
   26.29 -
   26.30 -
   26.31 -subsection {* Fibonacci numbers *}
   26.32 -
   26.33 -fun fib :: "nat \<Rightarrow> nat" where
   26.34 -  "fib 0 = 0"
   26.35 -  | "fib (Suc 0) = 1"
   26.36 -  | "fib (Suc (Suc x)) = fib x + fib (Suc x)"
   26.37 -
   26.38 -lemma [simp]: "0 < fib (Suc n)"
   26.39 -  by (induct n rule: fib.induct) simp_all
   26.40 -
   26.41 -
   26.42 -text {* Alternative induction rule. *}
   26.43 -
   26.44 -theorem fib_induct:
   26.45 -    "P 0 ==> P 1 ==> (!!n. P (n + 1) ==> P n ==> P (n + 2)) ==> P (n::nat)"
   26.46 -  by (induct rule: fib.induct) simp_all
   26.47 -
   26.48 -
   26.49 -subsection {* Fib and gcd commute *}
   26.50 -
   26.51 -text {* A few laws taken from \cite{Concrete-Math}. *}
   26.52 -
   26.53 -lemma fib_add:
   26.54 -  "fib (n + k + 1) = fib (k + 1) * fib (n + 1) + fib k * fib n"
   26.55 -  (is "?P n")
   26.56 -  -- {* see \cite[page 280]{Concrete-Math} *}
   26.57 -proof (induct n rule: fib_induct)
   26.58 -  show "?P 0" by simp
   26.59 -  show "?P 1" by simp
   26.60 -  fix n
   26.61 -  have "fib (n + 2 + k + 1)
   26.62 -    = fib (n + k + 1) + fib (n + 1 + k + 1)" by simp
   26.63 -  also assume "fib (n + k + 1)
   26.64 -    = fib (k + 1) * fib (n + 1) + fib k * fib n"
   26.65 -      (is " _ = ?R1")
   26.66 -  also assume "fib (n + 1 + k + 1)
   26.67 -    = fib (k + 1) * fib (n + 1 + 1) + fib k * fib (n + 1)"
   26.68 -      (is " _ = ?R2")
   26.69 -  also have "?R1 + ?R2
   26.70 -    = fib (k + 1) * fib (n + 2 + 1) + fib k * fib (n + 2)"
   26.71 -    by (simp add: add_mult_distrib2)
   26.72 -  finally show "?P (n + 2)" .
   26.73 -qed
   26.74 -
   26.75 -lemma gcd_fib_Suc_eq_1: "gcd (fib n) (fib (n + 1)) = 1" (is "?P n")
   26.76 -proof (induct n rule: fib_induct)
   26.77 -  show "?P 0" by simp
   26.78 -  show "?P 1" by simp
   26.79 -  fix n
   26.80 -  have "fib (n + 2 + 1) = fib (n + 1) + fib (n + 2)"
   26.81 -    by simp
   26.82 -  also have "gcd (fib (n + 2)) ... = gcd (fib (n + 2)) (fib (n + 1))"
   26.83 -    by (simp only: gcd_add2')
   26.84 -  also have "... = gcd (fib (n + 1)) (fib (n + 1 + 1))"
   26.85 -    by (simp add: gcd_commute)
   26.86 -  also assume "... = 1"
   26.87 -  finally show "?P (n + 2)" .
   26.88 -qed
   26.89 -
   26.90 -lemma gcd_mult_add: "0 < n ==> gcd (n * k + m) n = gcd m n"
   26.91 -proof -
   26.92 -  assume "0 < n"
   26.93 -  then have "gcd (n * k + m) n = gcd n (m mod n)"
   26.94 -    by (simp add: gcd_non_0 add_commute)
   26.95 -  also from `0 < n` have "... = gcd m n" by (simp add: gcd_non_0)
   26.96 -  finally show ?thesis .
   26.97 -qed
   26.98 -
   26.99 -lemma gcd_fib_add: "gcd (fib m) (fib (n + m)) = gcd (fib m) (fib n)"
  26.100 -proof (cases m)
  26.101 -  case 0
  26.102 -  then show ?thesis by simp
  26.103 -next
  26.104 -  case (Suc k)
  26.105 -  then have "gcd (fib m) (fib (n + m)) = gcd (fib (n + k + 1)) (fib (k + 1))"
  26.106 -    by (simp add: gcd_commute)
  26.107 -  also have "fib (n + k + 1)
  26.108 -    = fib (k + 1) * fib (n + 1) + fib k * fib n"
  26.109 -    by (rule fib_add)
  26.110 -  also have "gcd ... (fib (k + 1)) = gcd (fib k * fib n) (fib (k + 1))"
  26.111 -    by (simp add: gcd_mult_add)
  26.112 -  also have "... = gcd (fib n) (fib (k + 1))"
  26.113 -    by (simp only: gcd_fib_Suc_eq_1 gcd_mult_cancel)
  26.114 -  also have "... = gcd (fib m) (fib n)"
  26.115 -    using Suc by (simp add: gcd_commute)
  26.116 -  finally show ?thesis .
  26.117 -qed
  26.118 -
  26.119 -lemma gcd_fib_diff:
  26.120 -  assumes "m <= n"
  26.121 -  shows "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib n)"
  26.122 -proof -
  26.123 -  have "gcd (fib m) (fib (n - m)) = gcd (fib m) (fib (n - m + m))"
  26.124 -    by (simp add: gcd_fib_add)
  26.125 -  also from `m <= n` have "n - m + m = n" by simp
  26.126 -  finally show ?thesis .
  26.127 -qed
  26.128 -
  26.129 -lemma gcd_fib_mod:
  26.130 -  assumes "0 < m"
  26.131 -  shows "gcd (fib m) (fib (n mod m)) = gcd (fib m) (fib n)"
  26.132 -proof (induct n rule: nat_less_induct)
  26.133 -  case (1 n) note hyp = this
  26.134 -  show ?case
  26.135 -  proof -
  26.136 -    have "n mod m = (if n < m then n else (n - m) mod m)"
  26.137 -      by (rule mod_if)
  26.138 -    also have "gcd (fib m) (fib ...) = gcd (fib m) (fib n)"
  26.139 -    proof (cases "n < m")
  26.140 -      case True then show ?thesis by simp
  26.141 -    next
  26.142 -      case False then have "m <= n" by simp
  26.143 -      from `0 < m` and False have "n - m < n" by simp
  26.144 -      with hyp have "gcd (fib m) (fib ((n - m) mod m))
  26.145 -        = gcd (fib m) (fib (n - m))" by simp
  26.146 -      also have "... = gcd (fib m) (fib n)"
  26.147 -        using `m <= n` by (rule gcd_fib_diff)
  26.148 -      finally have "gcd (fib m) (fib ((n - m) mod m)) =
  26.149 -        gcd (fib m) (fib n)" .
  26.150 -      with False show ?thesis by simp
  26.151 -    qed
  26.152 -    finally show ?thesis .
  26.153 -  qed
  26.154 -qed
  26.155 -
  26.156 -
  26.157 -theorem fib_gcd: "fib (gcd m n) = gcd (fib m) (fib n)" (is "?P m n")
  26.158 -proof (induct m n rule: gcd_induct)
  26.159 -  fix m show "fib (gcd m 0) = gcd (fib m) (fib 0)" by simp
  26.160 -  fix n :: nat assume n: "0 < n"
  26.161 -  then have "gcd m n = gcd n (m mod n)" by (rule gcd_non_0)
  26.162 -  also assume hyp: "fib ... = gcd (fib n) (fib (m mod n))"
  26.163 -  also from n have "... = gcd (fib n) (fib m)" by (rule gcd_fib_mod)
  26.164 -  also have "... = gcd (fib m) (fib n)" by (rule gcd_commute)
  26.165 -  finally show "fib (gcd m n) = gcd (fib m) (fib n)" .
  26.166 -qed
  26.167 -
  26.168 -end
    27.1 --- a/src/HOL/Isar_examples/Group.thy	Tue Oct 20 19:36:52 2009 +0200
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,267 +0,0 @@
    27.4 -(*  Title:      HOL/Isar_examples/Group.thy
    27.5 -    Author:     Markus Wenzel, TU Muenchen
    27.6 -*)
    27.7 -
    27.8 -header {* Basic group theory *}
    27.9 -
   27.10 -theory Group
   27.11 -imports Main
   27.12 -begin
   27.13 -
   27.14 -subsection {* Groups and calculational reasoning *} 
   27.15 -
   27.16 -text {*
   27.17 - Groups over signature $({\times} :: \alpha \To \alpha \To \alpha,
   27.18 - \idt{one} :: \alpha, \idt{inverse} :: \alpha \To \alpha)$ are defined
   27.19 - as an axiomatic type class as follows.  Note that the parent class
   27.20 - $\idt{times}$ is provided by the basic HOL theory.
   27.21 -*}
   27.22 -
   27.23 -consts
   27.24 -  one :: "'a"
   27.25 -  inverse :: "'a => 'a"
   27.26 -
   27.27 -axclass
   27.28 -  group < times
   27.29 -  group_assoc:         "(x * y) * z = x * (y * z)"
   27.30 -  group_left_one:      "one * x = x"
   27.31 -  group_left_inverse:  "inverse x * x = one"
   27.32 -
   27.33 -text {*
   27.34 - The group axioms only state the properties of left one and inverse,
   27.35 - the right versions may be derived as follows.
   27.36 -*}
   27.37 -
   27.38 -theorem group_right_inverse: "x * inverse x = (one::'a::group)"
   27.39 -proof -
   27.40 -  have "x * inverse x = one * (x * inverse x)"
   27.41 -    by (simp only: group_left_one)
   27.42 -  also have "... = one * x * inverse x"
   27.43 -    by (simp only: group_assoc)
   27.44 -  also have "... = inverse (inverse x) * inverse x * x * inverse x"
   27.45 -    by (simp only: group_left_inverse)
   27.46 -  also have "... = inverse (inverse x) * (inverse x * x) * inverse x"
   27.47 -    by (simp only: group_assoc)
   27.48 -  also have "... = inverse (inverse x) * one * inverse x"
   27.49 -    by (simp only: group_left_inverse)
   27.50 -  also have "... = inverse (inverse x) * (one * inverse x)"
   27.51 -    by (simp only: group_assoc)
   27.52 -  also have "... = inverse (inverse x) * inverse x"
   27.53 -    by (simp only: group_left_one)
   27.54 -  also have "... = one"
   27.55 -    by (simp only: group_left_inverse)
   27.56 -  finally show ?thesis .
   27.57 -qed
   27.58 -
   27.59 -text {*
   27.60 - With \name{group-right-inverse} already available,
   27.61 - \name{group-right-one}\label{thm:group-right-one} is now established
   27.62 - much easier.
   27.63 -*}
   27.64 -
   27.65 -theorem group_right_one: "x * one = (x::'a::group)"
   27.66 -proof -
   27.67 -  have "x * one = x * (inverse x * x)"
   27.68 -    by (simp only: group_left_inverse)
   27.69 -  also have "... = x * inverse x * x"
   27.70 -    by (simp only: group_assoc)
   27.71 -  also have "... = one * x"
   27.72 -    by (simp only: group_right_inverse)
   27.73 -  also have "... = x"
   27.74 -    by (simp only: group_left_one)
   27.75 -  finally show ?thesis .
   27.76 -qed
   27.77 -
   27.78 -text {*
   27.79 - \medskip The calculational proof style above follows typical
   27.80 - presentations given in any introductory course on algebra.  The basic
   27.81 - technique is to form a transitive chain of equations, which in turn
   27.82 - are established by simplifying with appropriate rules.  The low-level
   27.83 - logical details of equational reasoning are left implicit.
   27.84 -
   27.85 - Note that ``$\dots$'' is just a special term variable that is bound
   27.86 - automatically to the argument\footnote{The argument of a curried
   27.87 - infix expression happens to be its right-hand side.} of the last fact
   27.88 - achieved by any local assumption or proven statement.  In contrast to
   27.89 - $\var{thesis}$, the ``$\dots$'' variable is bound \emph{after} the
   27.90 - proof is finished, though.
   27.91 -
   27.92 - There are only two separate Isar language elements for calculational
   27.93 - proofs: ``\isakeyword{also}'' for initial or intermediate
   27.94 - calculational steps, and ``\isakeyword{finally}'' for exhibiting the
   27.95 - result of a calculation.  These constructs are not hardwired into
   27.96 - Isabelle/Isar, but defined on top of the basic Isar/VM interpreter.
   27.97 - Expanding the \isakeyword{also} and \isakeyword{finally} derived
   27.98 - language elements, calculations may be simulated by hand as
   27.99 - demonstrated below.
  27.100 -*}
  27.101 -
  27.102 -theorem "x * one = (x::'a::group)"
  27.103 -proof -
  27.104 -  have "x * one = x * (inverse x * x)"
  27.105 -    by (simp only: group_left_inverse)
  27.106 -
  27.107 -  note calculation = this
  27.108 -    -- {* first calculational step: init calculation register *}
  27.109 -
  27.110 -  have "... = x * inverse x * x"
  27.111 -    by (simp only: group_assoc)
  27.112 -
  27.113 -  note calculation = trans [OF calculation this]
  27.114 -    -- {* general calculational step: compose with transitivity rule *}
  27.115 -
  27.116 -  have "... = one * x"
  27.117 -    by (simp only: group_right_inverse)
  27.118 -
  27.119 -  note calculation = trans [OF calculation this]
  27.120 -    -- {* general calculational step: compose with transitivity rule *}
  27.121 -
  27.122 -  have "... = x"
  27.123 -    by (simp only: group_left_one)
  27.124 -
  27.125 -  note calculation = trans [OF calculation this]
  27.126 -    -- {* final calculational step: compose with transitivity rule ... *}
  27.127 -  from calculation
  27.128 -    -- {* ... and pick up the final result *}
  27.129 -
  27.130 -  show ?thesis .
  27.131 -qed
  27.132 -
  27.133 -text {*
  27.134 - Note that this scheme of calculations is not restricted to plain
  27.135 - transitivity.  Rules like anti-symmetry, or even forward and backward
  27.136 - substitution work as well.  For the actual implementation of
  27.137 - \isacommand{also} and \isacommand{finally}, Isabelle/Isar maintains
  27.138 - separate context information of ``transitivity'' rules.  Rule
  27.139 - selection takes place automatically by higher-order unification.
  27.140 -*}
  27.141 -
  27.142 -
  27.143 -subsection {* Groups as monoids *}
  27.144 -
  27.145 -text {*
  27.146 - Monoids over signature $({\times} :: \alpha \To \alpha \To \alpha,
  27.147 - \idt{one} :: \alpha)$ are defined like this.
  27.148 -*}
  27.149 -
  27.150 -axclass monoid < times
  27.151 -  monoid_assoc:       "(x * y) * z = x * (y * z)"
  27.152 -  monoid_left_one:   "one * x = x"
  27.153 -  monoid_right_one:  "x * one = x"
  27.154 -
  27.155 -text {*
  27.156 - Groups are \emph{not} yet monoids directly from the definition.  For
  27.157 - monoids, \name{right-one} had to be included as an axiom, but for
  27.158 - groups both \name{right-one} and \name{right-inverse} are derivable
  27.159 - from the other axioms.  With \name{group-right-one} derived as a
  27.160 - theorem of group theory (see page~\pageref{thm:group-right-one}), we
  27.161 - may still instantiate $\idt{group} \subseteq \idt{monoid}$ properly
  27.162 - as follows.
  27.163 -*}
  27.164 -
  27.165 -instance group < monoid
  27.166 -  by (intro_classes,
  27.167 -       rule group_assoc,
  27.168 -       rule group_left_one,
  27.169 -       rule group_right_one)
  27.170 -
  27.171 -text {*
  27.172 - The \isacommand{instance} command actually is a version of
  27.173 - \isacommand{theorem}, setting up a goal that reflects the intended
  27.174 - class relation (or type constructor arity).  Thus any Isar proof
  27.175 - language element may be involved to establish this statement.  When
  27.176 - concluding the proof, the result is transformed into the intended
  27.177 - type signature extension behind the scenes.
  27.178 -*}
  27.179 -
  27.180 -subsection {* More theorems of group theory *}
  27.181 -
  27.182 -text {*
  27.183 - The one element is already uniquely determined by preserving an
  27.184 - \emph{arbitrary} group element.
  27.185 -*}
  27.186 -
  27.187 -theorem group_one_equality: "e * x = x ==> one = (e::'a::group)"
  27.188 -proof -
  27.189 -  assume eq: "e * x = x"
  27.190 -  have "one = x * inverse x"
  27.191 -    by (simp only: group_right_inverse)
  27.192 -  also have "... = (e * x) * inverse x"
  27.193 -    by (simp only: eq)
  27.194 -  also have "... = e * (x * inverse x)"
  27.195 -    by (simp only: group_assoc)
  27.196 -  also have "... = e * one"
  27.197 -    by (simp only: group_right_inverse)
  27.198 -  also have "... = e"
  27.199 -    by (simp only: group_right_one)
  27.200 -  finally show ?thesis .
  27.201 -qed
  27.202 -
  27.203 -text {*
  27.204 - Likewise, the inverse is already determined by the cancel property.
  27.205 -*}
  27.206 -
  27.207 -theorem group_inverse_equality:
  27.208 -  "x' * x = one ==> inverse x = (x'::'a::group)"
  27.209 -proof -
  27.210 -  assume eq: "x' * x = one"
  27.211 -  have "inverse x = one * inverse x"
  27.212 -    by (simp only: group_left_one)
  27.213 -  also have "... = (x' * x) * inverse x"
  27.214 -    by (simp only: eq)
  27.215 -  also have "... = x' * (x * inverse x)"
  27.216 -    by (simp only: group_assoc)
  27.217 -  also have "... = x' * one"
  27.218 -    by (simp only: group_right_inverse)
  27.219 -  also have "... = x'"
  27.220 -    by (simp only: group_right_one)
  27.221 -  finally show ?thesis .
  27.222 -qed
  27.223 -
  27.224 -text {*
  27.225 - The inverse operation has some further characteristic properties.
  27.226 -*}
  27.227 -
  27.228 -theorem group_inverse_times:
  27.229 -  "inverse (x * y) = inverse y * inverse (x::'a::group)"
  27.230 -proof (rule group_inverse_equality)
  27.231 -  show "(inverse y * inverse x) * (x * y) = one"
  27.232 -  proof -
  27.233 -    have "(inverse y * inverse x) * (x * y) =
  27.234 -        (inverse y * (inverse x * x)) * y"
  27.235 -      by (simp only: group_assoc)
  27.236 -    also have "... = (inverse y * one) * y"
  27.237 -      by (simp only: group_left_inverse)
  27.238 -    also have "... = inverse y * y"
  27.239 -      by (simp only: group_right_one)
  27.240 -    also have "... = one"
  27.241 -      by (simp only: group_left_inverse)
  27.242 -    finally show ?thesis .
  27.243 -  qed
  27.244 -qed
  27.245 -
  27.246 -theorem inverse_inverse: "inverse (inverse x) = (x::'a::group)"
  27.247 -proof (rule group_inverse_equality)
  27.248 -  show "x * inverse x = one"
  27.249 -    by (simp only: group_right_inverse)
  27.250 -qed
  27.251 -
  27.252 -theorem inverse_inject: "inverse x = inverse y ==> x = (y::'a::group)"
  27.253 -proof -
  27.254 -  assume eq: "inverse x = inverse y"
  27.255 -  have "x = x * one"
  27.256 -    by (simp only: group_right_one)
  27.257 -  also have "... = x * (inverse y * y)"
  27.258 -    by (simp only: group_left_inverse)
  27.259 -  also have "... = x * (inverse x * y)"
  27.260 -    by (simp only: eq)
  27.261 -  also have "... = (x * inverse x) * y"
  27.262 -    by (simp only: group_assoc)
  27.263 -  also have "... = one * y"
  27.264 -    by (simp only: group_right_inverse)
  27.265 -  also have "... = y"
  27.266 -    by (simp only: group_left_one)
  27.267 -  finally show ?thesis .
  27.268 -qed
  27.269 -
  27.270 -end
  27.271 \ No newline at end of file
    28.1 --- a/src/HOL/Isar_examples/Hoare.thy	Tue Oct 20 19:36:52 2009 +0200
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,463 +0,0 @@
    28.4 -(*  Title:      HOL/Isar_examples/Hoare.thy
    28.5 -    Author:     Markus Wenzel, TU Muenchen
    28.6 -
    28.7 -A formulation of Hoare logic suitable for Isar.
    28.8 -*)
    28.9 -
   28.10 -header {* Hoare Logic *}
   28.11 -
   28.12 -theory Hoare
   28.13 -imports Main
   28.14 -uses ("~~/src/HOL/Hoare/hoare_tac.ML")
   28.15 -begin
   28.16 -
   28.17 -subsection {* Abstract syntax and semantics *}
   28.18 -
   28.19 -text {*
   28.20 - The following abstract syntax and semantics of Hoare Logic over
   28.21 - \texttt{WHILE} programs closely follows the existing tradition in
   28.22 - Isabelle/HOL of formalizing the presentation given in
   28.23 - \cite[\S6]{Winskel:1993}.  See also
   28.24 - \url{http://isabelle.in.tum.de/library/Hoare/} and
   28.25 - \cite{Nipkow:1998:Winskel}.
   28.26 -*}
   28.27 -
   28.28 -types
   28.29 -  'a bexp = "'a set"
   28.30 -  'a assn = "'a set"
   28.31 -
   28.32 -datatype 'a com =
   28.33 -    Basic "'a => 'a"
   28.34 -  | Seq "'a com" "'a com"    ("(_;/ _)" [60, 61] 60)
   28.35 -  | Cond "'a bexp" "'a com" "'a com"
   28.36 -  | While "'a bexp" "'a assn" "'a com"
   28.37 -
   28.38 -abbreviation
   28.39 -  Skip  ("SKIP") where
   28.40 -  "SKIP == Basic id"
   28.41 -
   28.42 -types
   28.43 -  'a sem = "'a => 'a => bool"
   28.44 -
   28.45 -consts
   28.46 -  iter :: "nat => 'a bexp => 'a sem => 'a sem"
   28.47 -primrec
   28.48 -  "iter 0 b S s s' = (s ~: b & s = s')"
   28.49 -  "iter (Suc n) b S s s' =
   28.50 -    (s : b & (EX s''. S s s'' & iter n b S s'' s'))"
   28.51 -
   28.52 -consts
   28.53 -  Sem :: "'a com => 'a sem"
   28.54 -primrec
   28.55 -  "Sem (Basic f) s s' = (s' = f s)"
   28.56 -  "Sem (c1; c2) s s' = (EX s''. Sem c1 s s'' & Sem c2 s'' s')"
   28.57 -  "Sem (Cond b c1 c2) s s' =
   28.58 -    (if s : b then Sem c1 s s' else Sem c2 s s')"
   28.59 -  "Sem (While b x c) s s' = (EX n. iter n b (Sem c) s s')"
   28.60 -
   28.61 -constdefs
   28.62 -  Valid :: "'a bexp => 'a com => 'a bexp => bool"
   28.63 -    ("(3|- _/ (2_)/ _)" [100, 55, 100] 50)
   28.64 -  "|- P c Q == ALL s s'. Sem c s s' --> s : P --> s' : Q"
   28.65 -
   28.66 -syntax (xsymbols)
   28.67 -  Valid :: "'a bexp => 'a com => 'a bexp => bool"
   28.68 -    ("(3\<turnstile> _/ (2_)/ _)" [100, 55, 100] 50)
   28.69 -
   28.70 -lemma ValidI [intro?]:
   28.71 -    "(!!s s'. Sem c s s' ==> s : P ==> s' : Q) ==> |- P c Q"
   28.72 -  by (simp add: Valid_def)
   28.73 -
   28.74 -lemma ValidD [dest?]:
   28.75 -    "|- P c Q ==> Sem c s s' ==> s : P ==> s' : Q"
   28.76 -  by (simp add: Valid_def)
   28.77 -
   28.78 -
   28.79 -subsection {* Primitive Hoare rules *}
   28.80 -
   28.81 -text {*
   28.82 - From the semantics defined above, we derive the standard set of
   28.83 - primitive Hoare rules; e.g.\ see \cite[\S6]{Winskel:1993}.  Usually,
   28.84 - variant forms of these rules are applied in actual proof, see also
   28.85 - \S\ref{sec:hoare-isar} and \S\ref{sec:hoare-vcg}.
   28.86 -
   28.87 - \medskip The \name{basic} rule represents any kind of atomic access
   28.88 - to the state space.  This subsumes the common rules of \name{skip}
   28.89 - and \name{assign}, as formulated in \S\ref{sec:hoare-isar}.
   28.90 -*}
   28.91 -
   28.92 -theorem basic: "|- {s. f s : P} (Basic f) P"
   28.93 -proof
   28.94 -  fix s s' assume s: "s : {s. f s : P}"
   28.95 -  assume "Sem (Basic f) s s'"
   28.96 -  hence "s' = f s" by simp
   28.97 -  with s show "s' : P" by simp
   28.98 -qed
   28.99 -
  28.100 -text {*
  28.101 - The rules for sequential commands and semantic consequences are
  28.102 - established in a straight forward manner as follows.
  28.103 -*}
  28.104 -
  28.105 -theorem seq: "|- P c1 Q ==> |- Q c2 R ==> |- P (c1; c2) R"
  28.106 -proof
  28.107 -  assume cmd1: "|- P c1 Q" and cmd2: "|- Q c2 R"
  28.108 -  fix s s' assume s: "s : P"
  28.109 -  assume "Sem (c1; c2) s s'"
  28.110 -  then obtain s'' where sem1: "Sem c1 s s''" and sem2: "Sem c2 s'' s'"
  28.111 -    by auto
  28.112 -  from cmd1 sem1 s have "s'' : Q" ..
  28.113 -  with cmd2 sem2 show "s' : R" ..
  28.114 -qed
  28.115 -
  28.116 -theorem conseq: "P' <= P ==> |- P c Q ==> Q <= Q' ==> |- P' c Q'"
  28.117 -proof
  28.118 -  assume P'P: "P' <= P" and QQ': "Q <= Q'"
  28.119 -  assume cmd: "|- P c Q"
  28.120 -  fix s s' :: 'a
  28.121 -  assume sem: "Sem c s s'"
  28.122 -  assume "s : P'" with P'P have "s : P" ..
  28.123 -  with cmd sem have "s' : Q" ..
  28.124 -  with QQ' show "s' : Q'" ..
  28.125 -qed
  28.126 -
  28.127 -text {*
  28.128 - The rule for conditional commands is directly reflected by the
  28.129 - corresponding semantics; in the proof we just have to look closely
  28.130 - which cases apply.
  28.131 -*}
  28.132 -
  28.133 -theorem cond:
  28.134 -  "|- (P Int b) c1 Q ==> |- (P Int -b) c2 Q ==> |- P (Cond b c1 c2) Q"
  28.135 -proof
  28.136 -  assume case_b: "|- (P Int b) c1 Q" and case_nb: "|- (P Int -b) c2 Q"
  28.137 -  fix s s' assume s: "s : P"
  28.138 -  assume sem: "Sem (Cond b c1 c2) s s'"
  28.139 -  show "s' : Q"
  28.140 -  proof cases
  28.141 -    assume b: "s : b"
  28.142 -    from case_b show ?thesis
  28.143 -    proof
  28.144 -      from sem b show "Sem c1 s s'" by simp
  28.145 -      from s b show "s : P Int b" by simp
  28.146 -    qed
  28.147 -  next
  28.148 -    assume nb: "s ~: b"
  28.149 -    from case_nb show ?thesis
  28.150 -    proof
  28.151 -      from sem nb show "Sem c2 s s'" by simp
  28.152 -      from s nb show "s : P Int -b" by simp
  28.153 -    qed
  28.154 -  qed
  28.155 -qed
  28.156 -
  28.157 -text {*
  28.158 - The \name{while} rule is slightly less trivial --- it is the only one
  28.159 - based on recursion, which is expressed in the semantics by a
  28.160 - Kleene-style least fixed-point construction.  The auxiliary statement
  28.161 - below, which is by induction on the number of iterations is the main
  28.162 - point to be proven; the rest is by routine application of the
  28.163 - semantics of \texttt{WHILE}.
  28.164 -*}
  28.165 -
  28.166 -theorem while:
  28.167 -  assumes body: "|- (P Int b) c P"
  28.168 -  shows "|- P (While b X c) (P Int -b)"
  28.169 -proof
  28.170 -  fix s s' assume s: "s : P"
  28.171 -  assume "Sem (While b X c) s s'"
  28.172 -  then obtain n where "iter n b (Sem c) s s'" by auto
  28.173 -  from this and s show "s' : P Int -b"
  28.174 -  proof (induct n arbitrary: s)
  28.175 -    case 0
  28.176 -    thus ?case by auto
  28.177 -  next
  28.178 -    case (Suc n)
  28.179 -    then obtain s'' where b: "s : b" and sem: "Sem c s s''"
  28.180 -      and iter: "iter n b (Sem c) s'' s'"
  28.181 -      by auto
  28.182 -    from Suc and b have "s : P Int b" by simp
  28.183 -    with body sem have "s'' : P" ..
  28.184 -    with iter show ?case by (rule Suc)
  28.185 -  qed
  28.186 -qed
  28.187 -
  28.188 -
  28.189 -subsection {* Concrete syntax for assertions *}
  28.190 -
  28.191 -text {*
  28.192 - We now introduce concrete syntax for describing commands (with
  28.193 - embedded expressions) and assertions. The basic technique is that of
  28.194 - semantic ``quote-antiquote''.  A \emph{quotation} is a syntactic
  28.195 - entity delimited by an implicit abstraction, say over the state
  28.196 - space.  An \emph{antiquotation} is a marked expression within a
  28.197 - quotation that refers the implicit argument; a typical antiquotation
  28.198 - would select (or even update) components from the state.
  28.199 -
  28.200 - We will see some examples later in the concrete rules and
  28.201 - applications.
  28.202 -*}
  28.203 -
  28.204 -text {*
  28.205 - The following specification of syntax and translations is for
  28.206 - Isabelle experts only; feel free to ignore it.
  28.207 -
  28.208 - While the first part is still a somewhat intelligible specification
  28.209 - of the concrete syntactic representation of our Hoare language, the
  28.210 - actual ``ML drivers'' is quite involved.  Just note that the we
  28.211 - re-use the basic quote/antiquote translations as already defined in
  28.212 - Isabelle/Pure (see \verb,Syntax.quote_tr, and
  28.213 - \verb,Syntax.quote_tr',).
  28.214 -*}
  28.215 -
  28.216 -syntax
  28.217 -  "_quote"       :: "'b => ('a => 'b)"       ("(.'(_').)" [0] 1000)
  28.218 -  "_antiquote"   :: "('a => 'b) => 'b"       ("\<acute>_" [1000] 1000)
  28.219 -  "_Subst"       :: "'a bexp \<Rightarrow> 'b \<Rightarrow> idt \<Rightarrow> 'a bexp"
  28.220 -        ("_[_'/\<acute>_]" [1000] 999)
  28.221 -  "_Assert"      :: "'a => 'a set"           ("(.{_}.)" [0] 1000)
  28.222 -  "_Assign"      :: "idt => 'b => 'a com"    ("(\<acute>_ :=/ _)" [70, 65] 61)
  28.223 -  "_Cond"        :: "'a bexp => 'a com => 'a com => 'a com"
  28.224 -        ("(0IF _/ THEN _/ ELSE _/ FI)" [0, 0, 0] 61)
  28.225 -  "_While_inv"   :: "'a bexp => 'a assn => 'a com => 'a com"
  28.226 -        ("(0WHILE _/ INV _ //DO _ /OD)"  [0, 0, 0] 61)
  28.227 -  "_While"       :: "'a bexp => 'a com => 'a com"
  28.228 -        ("(0WHILE _ //DO _ /OD)"  [0, 0] 61)
  28.229 -
  28.230 -syntax (xsymbols)
  28.231 -  "_Assert"      :: "'a => 'a set"            ("(\<lbrace>_\<rbrace>)" [0] 1000)
  28.232 -
  28.233 -translations
  28.234 -  ".{b}."                   => "Collect .(b)."
  28.235 -  "B [a/\<acute>x]"                => ".{\<acute>(_update_name x (\<lambda>_. a)) \<in> B}."
  28.236 -  "\<acute>x := a"                 => "Basic .(\<acute>(_update_name x (\<lambda>_. a)))."
  28.237 -  "IF b THEN c1 ELSE c2 FI" => "Cond .{b}. c1 c2"
  28.238 -  "WHILE b INV i DO c OD"   => "While .{b}. i c"
  28.239 -  "WHILE b DO c OD"         == "WHILE b INV CONST undefined DO c OD"
  28.240 -
  28.241 -parse_translation {*
  28.242 -  let
  28.243 -    fun quote_tr [t] = Syntax.quote_tr "_antiquote" t
  28.244 -      | quote_tr ts = raise TERM ("quote_tr", ts);
  28.245 -  in [("_quote", quote_tr)] end
  28.246 -*}
  28.247 -
  28.248 -text {*
  28.249 - As usual in Isabelle syntax translations, the part for printing is
  28.250 - more complicated --- we cannot express parts as macro rules as above.
  28.251 - Don't look here, unless you have to do similar things for yourself.
  28.252 -*}
  28.253 -
  28.254 -print_translation {*
  28.255 -  let
  28.256 -    fun quote_tr' f (t :: ts) =
  28.257 -          Term.list_comb (f $ Syntax.quote_tr' "_antiquote" t, ts)
  28.258 -      | quote_tr' _ _ = raise Match;
  28.259 -
  28.260 -    val assert_tr' = quote_tr' (Syntax.const "_Assert");
  28.261 -
  28.262 -    fun bexp_tr' name ((Const ("Collect", _) $ t) :: ts) =
  28.263 -          quote_tr' (Syntax.const name) (t :: ts)
  28.264 -      | bexp_tr' _ _ = raise Match;
  28.265 -
  28.266 -    fun upd_tr' (x_upd, T) =
  28.267 -      (case try (unsuffix Record.updateN) x_upd of
  28.268 -        SOME x => (x, if T = dummyT then T else Term.domain_type T)
  28.269 -      | NONE => raise Match);
  28.270 -
  28.271 -    fun update_name_tr' (Free x) = Free (upd_tr' x)
  28.272 -      | update_name_tr' ((c as Const ("_free", _)) $ Free x) =
  28.273 -          c $ Free (upd_tr' x)
  28.274 -      | update_name_tr' (Const x) = Const (upd_tr' x)
  28.275 -      | update_name_tr' _ = raise Match;
  28.276 -
  28.277 -    fun K_tr' (Abs (_,_,t)) = if null (loose_bnos t) then t else raise Match
  28.278 -      | K_tr' (Abs (_,_,Abs (_,_,t)$Bound 0)) = if null (loose_bnos t) then t else raise Match
  28.279 -      | K_tr' _ = raise Match;
  28.280 -
  28.281 -    fun assign_tr' (Abs (x, _, f $ k $ Bound 0) :: ts) =
  28.282 -          quote_tr' (Syntax.const "_Assign" $ update_name_tr' f)
  28.283 -            (Abs (x, dummyT, K_tr' k) :: ts)
  28.284 -      | assign_tr' _ = raise Match;
  28.285 -  in
  28.286 -    [("Collect", assert_tr'), ("Basic", assign_tr'),
  28.287 -      ("Cond", bexp_tr' "_Cond"), ("While", bexp_tr' "_While_inv")]
  28.288 -  end
  28.289 -*}
  28.290 -
  28.291 -
  28.292 -subsection {* Rules for single-step proof \label{sec:hoare-isar} *}
  28.293 -
  28.294 -text {*
  28.295 - We are now ready to introduce a set of Hoare rules to be used in
  28.296 - single-step structured proofs in Isabelle/Isar.  We refer to the
  28.297 - concrete syntax introduce above.
  28.298 -
  28.299 - \medskip Assertions of Hoare Logic may be manipulated in
  28.300 - calculational proofs, with the inclusion expressed in terms of sets
  28.301 - or predicates.  Reversed order is supported as well.
  28.302 -*}
  28.303 -
  28.304 -lemma [trans]: "|- P c Q ==> P' <= P ==> |- P' c Q"
  28.305 -  by (unfold Valid_def) blast
  28.306 -lemma [trans] : "P' <= P ==> |- P c Q ==> |- P' c Q"
  28.307 -  by (unfold Valid_def) blast
  28.308 -
  28.309 -lemma [trans]: "Q <= Q' ==> |- P c Q ==> |- P c Q'"
  28.310 -  by (unfold Valid_def) blast
  28.311 -lemma [trans]: "|- P c Q ==> Q <= Q' ==> |- P c Q'"
  28.312 -  by (unfold Valid_def) blast
  28.313 -
  28.314 -lemma [trans]:
  28.315 -    "|- .{\<acute>P}. c Q ==> (!!s. P' s --> P s) ==> |- .{\<acute>P'}. c Q"
  28.316 -  by (simp add: Valid_def)
  28.317 -lemma [trans]:
  28.318 -    "(!!s. P' s --> P s) ==> |- .{\<acute>P}. c Q ==> |- .{\<acute>P'}. c Q"
  28.319 -  by (simp add: Valid_def)
  28.320 -
  28.321 -lemma [trans]:
  28.322 -    "|- P c .{\<acute>Q}. ==> (!!s. Q s --> Q' s) ==> |- P c .{\<acute>Q'}."
  28.323 -  by (simp add: Valid_def)
  28.324 -lemma [trans]:
  28.325 -    "(!!s. Q s --> Q' s) ==> |- P c .{\<acute>Q}. ==> |- P c .{\<acute>Q'}."
  28.326 -  by (simp add: Valid_def)
  28.327 -
  28.328 -
  28.329 -text {*
  28.330 - Identity and basic assignments.\footnote{The $\idt{hoare}$ method
  28.331 - introduced in \S\ref{sec:hoare-vcg} is able to provide proper
  28.332 - instances for any number of basic assignments, without producing
  28.333 - additional verification conditions.}
  28.334 -*}
  28.335 -
  28.336 -lemma skip [intro?]: "|- P SKIP P"
  28.337 -proof -
  28.338 -  have "|- {s. id s : P} SKIP P" by (rule basic)
  28.339 -  thus ?thesis by simp
  28.340 -qed
  28.341 -
  28.342 -lemma assign: "|- P [\<acute>a/\<acute>x] \<acute>x := \<acute>a P"
  28.343 -  by (rule basic)
  28.344 -
  28.345 -text {*
  28.346 - Note that above formulation of assignment corresponds to our
  28.347 - preferred way to model state spaces, using (extensible) record types
  28.348 - in HOL \cite{Naraschewski-Wenzel:1998:HOOL}.  For any record field
  28.349 - $x$, Isabelle/HOL provides a functions $x$ (selector) and
  28.350 - $\idt{x{\dsh}update}$ (update).  Above, there is only a place-holder
  28.351 - appearing for the latter kind of function: due to concrete syntax
  28.352 - \isa{\'x := \'a} also contains \isa{x\_update}.\footnote{Note that due
  28.353 - to the external nature of HOL record fields, we could not even state
  28.354 - a general theorem relating selector and update functions (if this
  28.355 - were required here); this would only work for any particular instance
  28.356 - of record fields introduced so far.}
  28.357 -*}
  28.358 -
  28.359 -text {*
  28.360 - Sequential composition --- normalizing with associativity achieves
  28.361 - proper of chunks of code verified separately.
  28.362 -*}
  28.363 -
  28.364 -lemmas [trans, intro?] = seq
  28.365 -
  28.366 -lemma seq_assoc [simp]: "( |- P c1;(c2;c3) Q) = ( |- P (c1;c2);c3 Q)"
  28.367 -  by (auto simp add: Valid_def)
  28.368 -
  28.369 -text {*
  28.370 - Conditional statements.
  28.371 -*}
  28.372 -
  28.373 -lemmas [trans, intro?] = cond
  28.374 -
  28.375 -lemma [trans, intro?]:
  28.376 -  "|- .{\<acute>P & \<acute>b}. c1 Q
  28.377 -      ==> |- .{\<acute>P & ~ \<acute>b}. c2 Q
  28.378 -      ==> |- .{\<acute>P}. IF \<acute>b THEN c1 ELSE c2 FI Q"
  28.379 -    by (rule cond) (simp_all add: Valid_def)
  28.380 -
  28.381 -text {*
  28.382 - While statements --- with optional invariant.
  28.383 -*}
  28.384 -
  28.385 -lemma [intro?]:
  28.386 -    "|- (P Int b) c P ==> |- P (While b P c) (P Int -b)"
  28.387 -  by (rule while)
  28.388 -
  28.389 -lemma [intro?]:
  28.390 -    "|- (P Int b) c P ==> |- P (While b undefined c) (P Int -b)"
  28.391 -  by (rule while)
  28.392 -
  28.393 -
  28.394 -lemma [intro?]:
  28.395 -  "|- .{\<acute>P & \<acute>b}. c .{\<acute>P}.
  28.396 -    ==> |- .{\<acute>P}. WHILE \<acute>b INV .{\<acute>P}. DO c OD .{\<acute>P & ~ \<acute>b}."
  28.397 -  by (simp add: while Collect_conj_eq Collect_neg_eq)
  28.398 -
  28.399 -lemma [intro?]:
  28.400 -  "|- .{\<acute>P & \<acute>b}. c .{\<acute>P}.
  28.401 -    ==> |- .{\<acute>P}. WHILE \<acute>b DO c OD .{\<acute>P & ~ \<acute>b}."
  28.402 -  by (simp add: while Collect_conj_eq Collect_neg_eq)
  28.403 -
  28.404 -
  28.405 -subsection {* Verification conditions \label{sec:hoare-vcg} *}
  28.406 -
  28.407 -text {*
  28.408 - We now load the \emph{original} ML file for proof scripts and tactic
  28.409 - definition for the Hoare Verification Condition Generator (see
  28.410 - \url{http://isabelle.in.tum.de/library/Hoare/}).  As far as we are
  28.411 - concerned here, the result is a proof method \name{hoare}, which may
  28.412 - be applied to a Hoare Logic assertion to extract purely logical
  28.413 - verification conditions.  It is important to note that the method
  28.414 - requires \texttt{WHILE} loops to be fully annotated with invariants
  28.415 - beforehand.  Furthermore, only \emph{concrete} pieces of code are
  28.416 - handled --- the underlying tactic fails ungracefully if supplied with
  28.417 - meta-variables or parameters, for example.
  28.418 -*}
  28.419 -
  28.420 -lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) q"
  28.421 -  by (auto simp add: Valid_def)
  28.422 -
  28.423 -lemma BasicRule: "p \<subseteq> {s. f s \<in> q} \<Longrightarrow> Valid p (Basic f) q"
  28.424 -  by (auto simp: Valid_def)
  28.425 -
  28.426 -lemma SeqRule: "Valid P c1 Q \<Longrightarrow> Valid Q c2 R \<Longrightarrow> Valid P (c1;c2) R"
  28.427 -  by (auto simp: Valid_def)
  28.428 -
  28.429 -lemma CondRule:
  28.430 -  "p \<subseteq> {s. (s \<in> b \<longrightarrow> s \<in> w) \<and> (s \<notin> b \<longrightarrow> s \<in> w')}
  28.431 -    \<Longrightarrow> Valid w c1 q \<Longrightarrow> Valid w' c2 q \<Longrightarrow> Valid p (Cond b c1 c2) q"
  28.432 -  by (auto simp: Valid_def)
  28.433 -
  28.434 -lemma iter_aux:
  28.435 -  "\<forall>s s'. Sem c s s' --> s : I & s : b --> s' : I ==>
  28.436 -       (\<And>s s'. s : I \<Longrightarrow> iter n b (Sem c) s s' \<Longrightarrow> s' : I & s' ~: b)"
  28.437 -  apply(induct n)
  28.438 -   apply clarsimp
  28.439 -   apply (simp (no_asm_use))
  28.440 -   apply blast
  28.441 -  done
  28.442 -
  28.443 -lemma WhileRule:
  28.444 -    "p \<subseteq> i \<Longrightarrow> Valid (i \<inter> b) c i \<Longrightarrow> i \<inter> (-b) \<subseteq> q \<Longrightarrow> Valid p (While b i c) q"
  28.445 -  apply (clarsimp simp: Valid_def)
  28.446 -  apply (drule iter_aux)
  28.447 -    prefer 2
  28.448 -    apply assumption
  28.449 -   apply blast
  28.450 -  apply blast
  28.451 -  done
  28.452 -
  28.453 -lemma Compl_Collect: "- Collect b = {x. \<not> b x}"
  28.454 -  by blast
  28.455 -
  28.456 -lemmas AbortRule = SkipRule  -- "dummy version"
  28.457 -
  28.458 -use "~~/src/HOL/Hoare/hoare_tac.ML"
  28.459 -
  28.460 -method_setup hoare = {*
  28.461 -  Scan.succeed (fn ctxt =>
  28.462 -    (SIMPLE_METHOD'
  28.463 -       (hoare_tac ctxt (simp_tac (HOL_basic_ss addsimps [@{thm "Record.K_record_comp"}] ))))) *}
  28.464 -  "verification condition generator for Hoare logic"
  28.465 -
  28.466 -end
    29.1 --- a/src/HOL/Isar_examples/Hoare_Ex.thy	Tue Oct 20 19:36:52 2009 +0200
    29.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.3 @@ -1,329 +0,0 @@
    29.4 -header {* Using Hoare Logic *}
    29.5 -
    29.6 -theory Hoare_Ex
    29.7 -imports Hoare
    29.8 -begin
    29.9 -
   29.10 -subsection {* State spaces *}
   29.11 -
   29.12 -text {*
   29.13 - First of all we provide a store of program variables that
   29.14 - occur in any of the programs considered later.  Slightly unexpected
   29.15 - things may happen when attempting to work with undeclared variables.
   29.16 -*}
   29.17 -
   29.18 -record vars =
   29.19 -  I :: nat
   29.20 -  M :: nat
   29.21 -  N :: nat
   29.22 -  S :: nat
   29.23 -
   29.24 -text {*
   29.25 - While all of our variables happen to have the same type, nothing
   29.26 - would prevent us from working with many-sorted programs as well, or
   29.27 - even polymorphic ones.  Also note that Isabelle/HOL's extensible
   29.28 - record types even provides simple means to extend the state space
   29.29 - later.
   29.30 -*}
   29.31 -
   29.32 -
   29.33 -subsection {* Basic examples *}
   29.34 -
   29.35 -text {*
   29.36 - We look at few trivialities involving assignment and sequential
   29.37 - composition, in order to get an idea of how to work with our
   29.38 - formulation of Hoare Logic.
   29.39 -*}
   29.40 -
   29.41 -text {*
   29.42 - Using the basic \name{assign} rule directly is a bit cumbersome.
   29.43 -*}
   29.44 -
   29.45 -lemma
   29.46 -  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   29.47 -  by (rule assign)
   29.48 -
   29.49 -text {*
   29.50 - Certainly we want the state modification already done, e.g.\ by
   29.51 - simplification.  The \name{hoare} method performs the basic state
   29.52 - update for us; we may apply the Simplifier afterwards to achieve
   29.53 - ``obvious'' consequences as well.
   29.54 -*}
   29.55 -
   29.56 -lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
   29.57 -  by hoare
   29.58 -
   29.59 -lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   29.60 -  by hoare
   29.61 -
   29.62 -lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   29.63 -  by hoare simp
   29.64 -
   29.65 -lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   29.66 -  by hoare
   29.67 -
   29.68 -lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   29.69 -  by hoare simp
   29.70 -
   29.71 -lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   29.72 -  by hoare
   29.73 -
   29.74 -lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   29.75 -  by hoare simp
   29.76 -
   29.77 -lemma
   29.78 -"|- .{\<acute>M = a & \<acute>N = b}.
   29.79 -    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
   29.80 -    .{\<acute>M = b & \<acute>N = a}."
   29.81 -  by hoare simp
   29.82 -
   29.83 -text {*
   29.84 - It is important to note that statements like the following one can
   29.85 - only be proven for each individual program variable.  Due to the
   29.86 - extra-logical nature of record fields, we cannot formulate a theorem
   29.87 - relating record selectors and updates schematically.
   29.88 -*}
   29.89 -
   29.90 -lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
   29.91 -  by hoare
   29.92 -
   29.93 -lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
   29.94 -  oops
   29.95 -
   29.96 -lemma
   29.97 -  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
   29.98 -  -- {* same statement without concrete syntax *}
   29.99 -  oops
  29.100 -
  29.101 -
  29.102 -text {*
  29.103 - In the following assignments we make use of the consequence rule in
  29.104 - order to achieve the intended precondition.  Certainly, the
  29.105 - \name{hoare} method is able to handle this case, too.
  29.106 -*}
  29.107 -
  29.108 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  29.109 -proof -
  29.110 -  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
  29.111 -    by auto
  29.112 -  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  29.113 -    by hoare
  29.114 -  finally show ?thesis .
  29.115 -qed
  29.116 -
  29.117 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  29.118 -proof -
  29.119 -  have "!!m n::nat. m = n --> m + 1 ~= n"
  29.120 -      -- {* inclusion of assertions expressed in ``pure'' logic, *}
  29.121 -      -- {* without mentioning the state space *}
  29.122 -    by simp
  29.123 -  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  29.124 -    by hoare
  29.125 -  finally show ?thesis .
  29.126 -qed
  29.127 -
  29.128 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  29.129 -  by hoare simp
  29.130 -
  29.131 -
  29.132 -subsection {* Multiplication by addition *}
  29.133 -
  29.134 -text {*
  29.135 - We now do some basic examples of actual \texttt{WHILE} programs.
  29.136 - This one is a loop for calculating the product of two natural
  29.137 - numbers, by iterated addition.  We first give detailed structured
  29.138 - proof based on single-step Hoare rules.
  29.139 -*}
  29.140 -
  29.141 -lemma
  29.142 -  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  29.143 -      WHILE \<acute>M ~= a
  29.144 -      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  29.145 -      .{\<acute>S = a * b}."
  29.146 -proof -
  29.147 -  let "|- _ ?while _" = ?thesis
  29.148 -  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
  29.149 -
  29.150 -  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
  29.151 -  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
  29.152 -  proof
  29.153 -    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
  29.154 -    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
  29.155 -      by auto
  29.156 -    also have "|- ... ?c .{\<acute>?inv}." by hoare
  29.157 -    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
  29.158 -  qed
  29.159 -  also have "... <= .{\<acute>S = a * b}." by auto
  29.160 -  finally show ?thesis .
  29.161 -qed
  29.162 -
  29.163 -text {*
  29.164 - The subsequent version of the proof applies the \name{hoare} method
  29.165 - to reduce the Hoare statement to a purely logical problem that can be
  29.166 - solved fully automatically.  Note that we have to specify the
  29.167 - \texttt{WHILE} loop invariant in the original statement.
  29.168 -*}
  29.169 -
  29.170 -lemma
  29.171 -  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  29.172 -      WHILE \<acute>M ~= a
  29.173 -      INV .{\<acute>S = \<acute>M * b}.
  29.174 -      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  29.175 -      .{\<acute>S = a * b}."
  29.176 -  by hoare auto
  29.177 -
  29.178 -
  29.179 -subsection {* Summing natural numbers *}
  29.180 -
  29.181 -text {*
  29.182 - We verify an imperative program to sum natural numbers up to a given
  29.183 - limit.  First some functional definition for proper specification of
  29.184 - the problem.
  29.185 -*}
  29.186 -
  29.187 -text {*
  29.188 - The following proof is quite explicit in the individual steps taken,
  29.189 - with the \name{hoare} method only applied locally to take care of
  29.190 - assignment and sequential composition.  Note that we express
  29.191 - intermediate proof obligation in pure logic, without referring to the
  29.192 - state space.
  29.193 -*}
  29.194 -
  29.195 -declare atLeast0LessThan[symmetric,simp]
  29.196 -
  29.197 -theorem
  29.198 -  "|- .{True}.
  29.199 -      \<acute>S := 0; \<acute>I := 1;
  29.200 -      WHILE \<acute>I ~= n
  29.201 -      DO
  29.202 -        \<acute>S := \<acute>S + \<acute>I;
  29.203 -        \<acute>I := \<acute>I + 1
  29.204 -      OD
  29.205 -      .{\<acute>S = (SUM j<n. j)}."
  29.206 -  (is "|- _ (_; ?while) _")
  29.207 -proof -
  29.208 -  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  29.209 -  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  29.210 -
  29.211 -  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  29.212 -  proof -
  29.213 -    have "True --> 0 = ?sum 1"
  29.214 -      by simp
  29.215 -    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  29.216 -      by hoare
  29.217 -    finally show ?thesis .
  29.218 -  qed
  29.219 -  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
  29.220 -  proof
  29.221 -    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
  29.222 -    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
  29.223 -      by simp
  29.224 -    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
  29.225 -      by hoare
  29.226 -    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
  29.227 -  qed
  29.228 -  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
  29.229 -    by simp
  29.230 -  finally show ?thesis .
  29.231 -qed
  29.232 -
  29.233 -text {*
  29.234 - The next version uses the \name{hoare} method, while still explaining
  29.235 - the resulting proof obligations in an abstract, structured manner.
  29.236 -*}
  29.237 -
  29.238 -theorem
  29.239 -  "|- .{True}.
  29.240 -      \<acute>S := 0; \<acute>I := 1;
  29.241 -      WHILE \<acute>I ~= n
  29.242 -      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  29.243 -      DO
  29.244 -        \<acute>S := \<acute>S + \<acute>I;
  29.245 -        \<acute>I := \<acute>I + 1
  29.246 -      OD
  29.247 -      .{\<acute>S = (SUM j<n. j)}."
  29.248 -proof -
  29.249 -  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  29.250 -  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  29.251 -
  29.252 -  show ?thesis
  29.253 -  proof hoare
  29.254 -    show "?inv 0 1" by simp
  29.255 -  next
  29.256 -    fix s i assume "?inv s i & i ~= n"
  29.257 -    thus "?inv (s + i) (i + 1)" by simp
  29.258 -  next
  29.259 -    fix s i assume "?inv s i & ~ i ~= n"
  29.260 -    thus "s = ?sum n" by simp
  29.261 -  qed
  29.262 -qed
  29.263 -
  29.264 -text {*
  29.265 - Certainly, this proof may be done fully automatic as well, provided
  29.266 - that the invariant is given beforehand.
  29.267 -*}
  29.268 -
  29.269 -theorem
  29.270 -  "|- .{True}.
  29.271 -      \<acute>S := 0; \<acute>I := 1;
  29.272 -      WHILE \<acute>I ~= n
  29.273 -      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  29.274 -      DO
  29.275 -        \<acute>S := \<acute>S + \<acute>I;
  29.276 -        \<acute>I := \<acute>I + 1
  29.277 -      OD
  29.278 -      .{\<acute>S = (SUM j<n. j)}."
  29.279 -  by hoare auto
  29.280 -
  29.281 -
  29.282 -subsection{* Time *}
  29.283 -
  29.284 -text{*
  29.285 -  A simple embedding of time in Hoare logic: function @{text timeit}
  29.286 -  inserts an extra variable to keep track of the elapsed time.
  29.287 -*}
  29.288 -
  29.289 -record tstate = time :: nat
  29.290 -
  29.291 -types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
  29.292 -
  29.293 -consts timeit :: "'a time com \<Rightarrow> 'a time com"
  29.294 -primrec
  29.295 -  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
  29.296 -  "timeit (c1; c2) = (timeit c1; timeit c2)"
  29.297 -  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
  29.298 -  "timeit (While b iv c) = While b iv (timeit c)"
  29.299 -
  29.300 -record tvars = tstate +
  29.301 -  I :: nat
  29.302 -  J :: nat
  29.303 -
  29.304 -lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
  29.305 -  by (induct n) simp_all
  29.306 -
  29.307 -lemma "|- .{i = \<acute>I & \<acute>time = 0}.
  29.308 - timeit(
  29.309 - WHILE \<acute>I \<noteq> 0
  29.310 - INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
  29.311 - DO
  29.312 -   \<acute>J := \<acute>I;
  29.313 -   WHILE \<acute>J \<noteq> 0
  29.314 -   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
  29.315 -   DO \<acute>J := \<acute>J - 1 OD;
  29.316 -   \<acute>I := \<acute>I - 1
  29.317 - OD
  29.318 - ) .{2*\<acute>time = i*i + 5*i}."
  29.319 -  apply simp
  29.320 -  apply hoare
  29.321 -      apply simp
  29.322 -     apply clarsimp
  29.323 -    apply clarsimp
  29.324 -   apply arith
  29.325 -   prefer 2
  29.326 -   apply clarsimp
  29.327 -  apply (clarsimp simp: nat_distrib)
  29.328 -  apply (frule lem)
  29.329 -  apply arith
  29.330 -  done
  29.331 -
  29.332 -end
    30.1 --- a/src/HOL/Isar_examples/Knaster_Tarski.thy	Tue Oct 20 19:36:52 2009 +0200
    30.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.3 @@ -1,111 +0,0 @@
    30.4 -(*  Title:      HOL/Isar_examples/Knaster_Tarski.thy
    30.5 -    Author:     Markus Wenzel, TU Muenchen
    30.6 -
    30.7 -Typical textbook proof example.
    30.8 -*)
    30.9 -
   30.10 -header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
   30.11 -
   30.12 -theory Knaster_Tarski
   30.13 -imports Main Lattice_Syntax
   30.14 -begin
   30.15 -
   30.16 -
   30.17 -subsection {* Prose version *}
   30.18 -
   30.19 -text {*
   30.20 -  According to the textbook \cite[pages 93--94]{davey-priestley}, the
   30.21 -  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
   30.22 -  dualized the argument, and tuned the notation a little bit.}
   30.23 -
   30.24 -  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
   30.25 -  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
   30.26 -  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
   30.27 -
   30.28 -  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
   30.29 -  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
   30.30 -  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
   30.31 -  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
   30.32 -  the reverse one (!) and thereby complete the proof that @{text a} is
   30.33 -  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
   30.34 -  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
   30.35 -*}
   30.36 -
   30.37 -
   30.38 -subsection {* Formal versions *}
   30.39 -
   30.40 -text {*
   30.41 -  The Isar proof below closely follows the original presentation.
   30.42 -  Virtually all of the prose narration has been rephrased in terms of
   30.43 -  formal Isar language elements.  Just as many textbook-style proofs,
   30.44 -  there is a strong bias towards forward proof, and several bends in
   30.45 -  the course of reasoning.
   30.46 -*}
   30.47 -
   30.48 -theorem Knaster_Tarski:
   30.49 -  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   30.50 -  assumes "mono f"
   30.51 -  shows "\<exists>a. f a = a"
   30.52 -proof
   30.53 -  let ?H = "{u. f u \<le> u}"
   30.54 -  let ?a = "\<Sqinter>?H"
   30.55 -  show "f ?a = ?a"
   30.56 -  proof -
   30.57 -    {
   30.58 -      fix x
   30.59 -      assume "x \<in> ?H"
   30.60 -      then have "?a \<le> x" by (rule Inf_lower)
   30.61 -      with `mono f` have "f ?a \<le> f x" ..
   30.62 -      also from `x \<in> ?H` have "\<dots> \<le> x" ..
   30.63 -      finally have "f ?a \<le> x" .
   30.64 -    }
   30.65 -    then have "f ?a \<le> ?a" by (rule Inf_greatest)
   30.66 -    {
   30.67 -      also presume "\<dots> \<le> f ?a"
   30.68 -      finally (order_antisym) show ?thesis .
   30.69 -    }
   30.70 -    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
   30.71 -    then have "f ?a \<in> ?H" ..
   30.72 -    then show "?a \<le> f ?a" by (rule Inf_lower)
   30.73 -  qed
   30.74 -qed
   30.75 -
   30.76 -text {*
   30.77 -  Above we have used several advanced Isar language elements, such as
   30.78 -  explicit block structure and weak assumptions.  Thus we have
   30.79 -  mimicked the particular way of reasoning of the original text.
   30.80 -
   30.81 -  In the subsequent version the order of reasoning is changed to
   30.82 -  achieve structured top-down decomposition of the problem at the
   30.83 -  outer level, while only the inner steps of reasoning are done in a
   30.84 -  forward manner.  We are certainly more at ease here, requiring only
   30.85 -  the most basic features of the Isar language.
   30.86 -*}
   30.87 -
   30.88 -theorem Knaster_Tarski':
   30.89 -  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   30.90 -  assumes "mono f"
   30.91 -  shows "\<exists>a. f a = a"
   30.92 -proof
   30.93 -  let ?H = "{u. f u \<le> u}"
   30.94 -  let ?a = "\<Sqinter>?H"
   30.95 -  show "f ?a = ?a"
   30.96 -  proof (rule order_antisym)
   30.97 -    show "f ?a \<le> ?a"
   30.98 -    proof (rule Inf_greatest)
   30.99 -      fix x
  30.100 -      assume "x \<in> ?H"
  30.101 -      then have "?a \<le> x" by (rule Inf_lower)
  30.102 -      with `mono f` have "f ?a \<le> f x" ..
  30.103 -      also from `x \<in> ?H` have "\<dots> \<le> x" ..
  30.104 -      finally show "f ?a \<le> x" .
  30.105 -    qed
  30.106 -    show "?a \<le> f ?a"
  30.107 -    proof (rule Inf_lower)
  30.108 -      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
  30.109 -      then show "f ?a \<in> ?H" ..
  30.110 -    qed
  30.111 -  qed
  30.112 -qed
  30.113 -
  30.114 -end
    31.1 --- a/src/HOL/Isar_examples/Mutilated_Checkerboard.thy	Tue Oct 20 19:36:52 2009 +0200
    31.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.3 @@ -1,300 +0,0 @@
    31.4 -(*  Title:      HOL/Isar_examples/Mutilated_Checkerboard.thy
    31.5 -    Author:     Markus Wenzel, TU Muenchen (Isar document)
    31.6 -    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
    31.7 -*)
    31.8 -
    31.9 -header {* The Mutilated Checker Board Problem *}
   31.10 -
   31.11 -theory Mutilated_Checkerboard
   31.12 -imports Main
   31.13 -begin
   31.14 -
   31.15 -text {*
   31.16 - The Mutilated Checker Board Problem, formalized inductively.  See
   31.17 - \cite{paulson-mutilated-board} and
   31.18 - \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
   31.19 - original tactic script version.
   31.20 -*}
   31.21 -
   31.22 -subsection {* Tilings *}
   31.23 -
   31.24 -inductive_set
   31.25 -  tiling :: "'a set set => 'a set set"
   31.26 -  for A :: "'a set set"
   31.27 -  where
   31.28 -    empty: "{} : tiling A"
   31.29 -  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
   31.30 -
   31.31 -
   31.32 -text "The union of two disjoint tilings is a tiling."
   31.33 -
   31.34 -lemma tiling_Un:
   31.35 -  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
   31.36 -  shows "t Un u : tiling A"
   31.37 -proof -
   31.38 -  let ?T = "tiling A"
   31.39 -  from `t : ?T` and `t Int u = {}`
   31.40 -  show "t Un u : ?T"
   31.41 -  proof (induct t)
   31.42 -    case empty
   31.43 -    with `u : ?T` show "{} Un u : ?T" by simp
   31.44 -  next
   31.45 -    case (Un a t)
   31.46 -    show "(a Un t) Un u : ?T"
   31.47 -    proof -
   31.48 -      have "a Un (t Un u) : ?T"
   31.49 -        using `a : A`
   31.50 -      proof (rule tiling.Un)
   31.51 -        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
   31.52 -        then show "t Un u: ?T" by (rule Un)
   31.53 -        from `a <= - t` and `(a Un t) Int u = {}`
   31.54 -        show "a <= - (t Un u)" by blast
   31.55 -      qed
   31.56 -      also have "a Un (t Un u) = (a Un t) Un u"
   31.57 -        by (simp only: Un_assoc)
   31.58 -      finally show ?thesis .
   31.59 -    qed
   31.60 -  qed
   31.61 -qed
   31.62 -
   31.63 -
   31.64 -subsection {* Basic properties of ``below'' *}
   31.65 -
   31.66 -constdefs
   31.67 -  below :: "nat => nat set"
   31.68 -  "below n == {i. i < n}"
   31.69 -
   31.70 -lemma below_less_iff [iff]: "(i: below k) = (i < k)"
   31.71 -  by (simp add: below_def)
   31.72 -
   31.73 -lemma below_0: "below 0 = {}"
   31.74 -  by (simp add: below_def)
   31.75 -
   31.76 -lemma Sigma_Suc1:
   31.77 -    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
   31.78 -  by (simp add: below_def less_Suc_eq) blast
   31.79 -
   31.80 -lemma Sigma_Suc2:
   31.81 -    "m = n + 2 ==> A <*> below m =
   31.82 -      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
   31.83 -  by (auto simp add: below_def)
   31.84 -
   31.85 -lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
   31.86 -
   31.87 -
   31.88 -subsection {* Basic properties of ``evnodd'' *}
   31.89 -
   31.90 -constdefs
   31.91 -  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
   31.92 -  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
   31.93 -
   31.94 -lemma evnodd_iff:
   31.95 -    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
   31.96 -  by (simp add: evnodd_def)
   31.97 -
   31.98 -lemma evnodd_subset: "evnodd A b <= A"
   31.99 -  by (unfold evnodd_def, rule Int_lower1)
  31.100 -
  31.101 -lemma evnoddD: "x : evnodd A b ==> x : A"
  31.102 -  by (rule subsetD, rule evnodd_subset)
  31.103 -
  31.104 -lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
  31.105 -  by (rule finite_subset, rule evnodd_subset)
  31.106 -
  31.107 -lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
  31.108 -  by (unfold evnodd_def) blast
  31.109 -
  31.110 -lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
  31.111 -  by (unfold evnodd_def) blast
  31.112 -
  31.113 -lemma evnodd_empty: "evnodd {} b = {}"
  31.114 -  by (simp add: evnodd_def)
  31.115 -
  31.116 -lemma evnodd_insert: "evnodd (insert (i, j) C) b =
  31.117 -    (if (i + j) mod 2 = b
  31.118 -      then insert (i, j) (evnodd C b) else evnodd C b)"
  31.119 -  by (simp add: evnodd_def)
  31.120 -
  31.121 -
  31.122 -subsection {* Dominoes *}
  31.123 -
  31.124 -inductive_set
  31.125 -  domino :: "(nat * nat) set set"
  31.126 -  where
  31.127 -    horiz: "{(i, j), (i, j + 1)} : domino"
  31.128 -  | vertl: "{(i, j), (i + 1, j)} : domino"
  31.129 -
  31.130 -lemma dominoes_tile_row:
  31.131 -  "{i} <*> below (2 * n) : tiling domino"
  31.132 -  (is "?B n : ?T")
  31.133 -proof (induct n)
  31.134 -  case 0
  31.135 -  show ?case by (simp add: below_0 tiling.empty)
  31.136 -next
  31.137 -  case (Suc n)
  31.138 -  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
  31.139 -  have "?B (Suc n) = ?a Un ?B n"
  31.140 -    by (auto simp add: Sigma_Suc Un_assoc)
  31.141 -  moreover have "... : ?T"
  31.142 -  proof (rule tiling.Un)
  31.143 -    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
  31.144 -      by (rule domino.horiz)
  31.145 -    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
  31.146 -    finally show "... : domino" .
  31.147 -    show "?B n : ?T" by (rule Suc)
  31.148 -    show "?a <= - ?B n" by blast
  31.149 -  qed
  31.150 -  ultimately show ?case by simp
  31.151 -qed
  31.152 -
  31.153 -lemma dominoes_tile_matrix:
  31.154 -  "below m <*> below (2 * n) : tiling domino"
  31.155 -  (is "?B m : ?T")
  31.156 -proof (induct m)
  31.157 -  case 0
  31.158 -  show ?case by (simp add: below_0 tiling.empty)
  31.159 -next
  31.160 -  case (Suc m)
  31.161 -  let ?t = "{m} <*> below (2 * n)"
  31.162 -  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
  31.163 -  moreover have "... : ?T"
  31.164 -  proof (rule tiling_Un)
  31.165 -    show "?t : ?T" by (rule dominoes_tile_row)
  31.166 -    show "?B m : ?T" by (rule Suc)
  31.167 -    show "?t Int ?B m = {}" by blast
  31.168 -  qed
  31.169 -  ultimately show ?case by simp
  31.170 -qed
  31.171 -
  31.172 -lemma domino_singleton:
  31.173 -  assumes d: "d : domino" and "b < 2"
  31.174 -  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
  31.175 -  using d
  31.176 -proof induct
  31.177 -  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
  31.178 -  fix i j
  31.179 -  note [simp] = evnodd_empty evnodd_insert mod_Suc
  31.180 -  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
  31.181 -  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
  31.182 -qed
  31.183 -
  31.184 -lemma domino_finite:
  31.185 -  assumes d: "d: domino"
  31.186 -  shows "finite d"
  31.187 -  using d
  31.188 -proof induct
  31.189 -  fix i j :: nat
  31.190 -  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
  31.191 -  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
  31.192 -qed
  31.193 -
  31.194 -
  31.195 -subsection {* Tilings of dominoes *}
  31.196 -
  31.197 -lemma tiling_domino_finite:
  31.198 -  assumes t: "t : tiling domino"  (is "t : ?T")
  31.199 -  shows "finite t"  (is "?F t")
  31.200 -  using t
  31.201 -proof induct
  31.202 -  show "?F {}" by (rule finite.emptyI)
  31.203 -  fix a t assume "?F t"
  31.204 -  assume "a : domino" then have "?F a" by (rule domino_finite)
  31.205 -  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
  31.206 -qed
  31.207 -
  31.208 -lemma tiling_domino_01:
  31.209 -  assumes t: "t : tiling domino"  (is "t : ?T")
  31.210 -  shows "card (evnodd t 0) = card (evnodd t 1)"
  31.211 -  using t
  31.212 -proof induct
  31.213 -  case empty
  31.214 -  show ?case by (simp add: evnodd_def)
  31.215 -next
  31.216 -  case (Un a t)
  31.217 -  let ?e = evnodd
  31.218 -  note hyp = `card (?e t 0) = card (?e t 1)`
  31.219 -    and at = `a <= - t`
  31.220 -  have card_suc:
  31.221 -    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
  31.222 -  proof -
  31.223 -    fix b :: nat assume "b < 2"
  31.224 -    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
  31.225 -    also obtain i j where e: "?e a b = {(i, j)}"
  31.226 -    proof -
  31.227 -      from `a \<in> domino` and `b < 2`
  31.228 -      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
  31.229 -      then show ?thesis by (blast intro: that)
  31.230 -    qed
  31.231 -    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
  31.232 -    moreover have "card ... = Suc (card (?e t b))"
  31.233 -    proof (rule card_insert_disjoint)
  31.234 -      from `t \<in> tiling domino` have "finite t"
  31.235 -        by (rule tiling_domino_finite)
  31.236 -      then show "finite (?e t b)"
  31.237 -        by (rule evnodd_finite)
  31.238 -      from e have "(i, j) : ?e a b" by simp
  31.239 -      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
  31.240 -    qed
  31.241 -    ultimately show "?thesis b" by simp
  31.242 -  qed
  31.243 -  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
  31.244 -  also from hyp have "card (?e t 0) = card (?e t 1)" .
  31.245 -  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
  31.246 -    by simp
  31.247 -  finally show ?case .
  31.248 -qed
  31.249 -
  31.250 -
  31.251 -subsection {* Main theorem *}
  31.252 -
  31.253 -constdefs
  31.254 -  mutilated_board :: "nat => nat => (nat * nat) set"
  31.255 -  "mutilated_board m n ==
  31.256 -    below (2 * (m + 1)) <*> below (2 * (n + 1))
  31.257 -      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
  31.258 -
  31.259 -theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
  31.260 -proof (unfold mutilated_board_def)
  31.261 -  let ?T = "tiling domino"
  31.262 -  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
  31.263 -  let ?t' = "?t - {(0, 0)}"
  31.264 -  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
  31.265 -
  31.266 -  show "?t'' ~: ?T"
  31.267 -  proof
  31.268 -    have t: "?t : ?T" by (rule dominoes_tile_matrix)
  31.269 -    assume t'': "?t'' : ?T"
  31.270 -
  31.271 -    let ?e = evnodd
  31.272 -    have fin: "finite (?e ?t 0)"
  31.273 -      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
  31.274 -
  31.275 -    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
  31.276 -    have "card (?e ?t'' 0) < card (?e ?t' 0)"
  31.277 -    proof -
  31.278 -      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
  31.279 -        < card (?e ?t' 0)"
  31.280 -      proof (rule card_Diff1_less)
  31.281 -        from _ fin show "finite (?e ?t' 0)"
  31.282 -          by (rule finite_subset) auto
  31.283 -        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
  31.284 -      qed
  31.285 -      then show ?thesis by simp
  31.286 -    qed
  31.287 -    also have "... < card (?e ?t 0)"
  31.288 -    proof -
  31.289 -      have "(0, 0) : ?e ?t 0" by simp
  31.290 -      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
  31.291 -        by (rule card_Diff1_less)
  31.292 -      then show ?thesis by simp
  31.293 -    qed
  31.294 -    also from t have "... = card (?e ?t 1)"
  31.295 -      by (rule tiling_domino_01)
  31.296 -    also have "?e ?t 1 = ?e ?t'' 1" by simp
  31.297 -    also from t'' have "card ... = card (?e ?t'' 0)"
  31.298 -      by (rule tiling_domino_01 [symmetric])
  31.299 -    finally have "... < ..." . then show False ..
  31.300 -  qed
  31.301 -qed
  31.302 -
  31.303 -end
    32.1 --- a/src/HOL/Isar_examples/Nested_Datatype.thy	Tue Oct 20 19:36:52 2009 +0200
    32.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.3 @@ -1,86 +0,0 @@
    32.4 -header {* Nested datatypes *}
    32.5 -
    32.6 -theory Nested_Datatype
    32.7 -imports Main
    32.8 -begin
    32.9 -
   32.10 -subsection {* Terms and substitution *}
   32.11 -
   32.12 -datatype ('a, 'b) "term" =
   32.13 -    Var 'a
   32.14 -  | App 'b "('a, 'b) term list"
   32.15 -
   32.16 -consts
   32.17 -  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
   32.18 -  subst_term_list ::
   32.19 -    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
   32.20 -
   32.21 -primrec (subst)
   32.22 -  "subst_term f (Var a) = f a"
   32.23 -  "subst_term f (App b ts) = App b (subst_term_list f ts)"
   32.24 -  "subst_term_list f [] = []"
   32.25 -  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   32.26 -
   32.27 -
   32.28 -text {*
   32.29 - \medskip A simple lemma about composition of substitutions.
   32.30 -*}
   32.31 -
   32.32 -lemma "subst_term (subst_term f1 o f2) t =
   32.33 -      subst_term f1 (subst_term f2 t)"
   32.34 -  and "subst_term_list (subst_term f1 o f2) ts =
   32.35 -      subst_term_list f1 (subst_term_list f2 ts)"
   32.36 -  by (induct t and ts) simp_all
   32.37 -
   32.38 -lemma "subst_term (subst_term f1 o f2) t =
   32.39 -  subst_term f1 (subst_term f2 t)"
   32.40 -proof -
   32.41 -  let "?P t" = ?thesis
   32.42 -  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   32.43 -    subst_term_list f1 (subst_term_list f2 ts)"
   32.44 -  show ?thesis
   32.45 -  proof (induct t)
   32.46 -    fix a show "?P (Var a)" by simp
   32.47 -  next
   32.48 -    fix b ts assume "?Q ts"
   32.49 -    then show "?P (App b ts)"
   32.50 -      by (simp only: subst.simps)
   32.51 -  next
   32.52 -    show "?Q []" by simp
   32.53 -  next
   32.54 -    fix t ts
   32.55 -    assume "?P t" "?Q ts" then show "?Q (t # ts)"
   32.56 -      by (simp only: subst.simps)
   32.57 -  qed
   32.58 -qed
   32.59 -
   32.60 -
   32.61 -subsection {* Alternative induction *}
   32.62 -
   32.63 -theorem term_induct' [case_names Var App]:
   32.64 -  assumes var: "!!a. P (Var a)"
   32.65 -    and app: "!!b ts. list_all P ts ==> P (App b ts)"
   32.66 -  shows "P t"
   32.67 -proof (induct t)
   32.68 -  fix a show "P (Var a)" by (rule var)
   32.69 -next
   32.70 -  fix b t ts assume "list_all P ts"
   32.71 -  then show "P (App b ts)" by (rule app)
   32.72 -next
   32.73 -  show "list_all P []" by simp
   32.74 -next
   32.75 -  fix t ts assume "P t" "list_all P ts"
   32.76 -  then show "list_all P (t # ts)" by simp
   32.77 -qed
   32.78 -
   32.79 -lemma
   32.80 -  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
   32.81 -proof (induct t rule: term_induct')
   32.82 -  case (Var a)
   32.83 -  show ?case by (simp add: o_def)
   32.84 -next
   32.85 -  case (App b ts)
   32.86 -  then show ?case by (induct ts) simp_all
   32.87 -qed
   32.88 -
   32.89 -end
    33.1 --- a/src/HOL/Isar_examples/Peirce.thy	Tue Oct 20 19:36:52 2009 +0200
    33.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.3 @@ -1,90 +0,0 @@
    33.4 -(*  Title:      HOL/Isar_examples/Peirce.thy
    33.5 -    Author:     Markus Wenzel, TU Muenchen
    33.6 -*)
    33.7 -
    33.8 -header {* Peirce's Law *}
    33.9 -
   33.10 -theory Peirce
   33.11 -imports Main
   33.12 -begin
   33.13 -
   33.14 -text {*
   33.15 - We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.  This is
   33.16 - an inherently non-intuitionistic statement, so its proof will
   33.17 - certainly involve some form of classical contradiction.
   33.18 -
   33.19 - The first proof is again a well-balanced combination of plain
   33.20 - backward and forward reasoning.  The actual classical step is where
   33.21 - the negated goal may be introduced as additional assumption.  This
   33.22 - eventually leads to a contradiction.\footnote{The rule involved there
   33.23 - is negation elimination; it holds in intuitionistic logic as well.}
   33.24 -*}
   33.25 -
   33.26 -theorem "((A --> B) --> A) --> A"
   33.27 -proof
   33.28 -  assume "(A --> B) --> A"
   33.29 -  show A
   33.30 -  proof (rule classical)
   33.31 -    assume "~ A"
   33.32 -    have "A --> B"
   33.33 -    proof
   33.34 -      assume A
   33.35 -      with `~ A` show B by contradiction
   33.36 -    qed
   33.37 -    with `(A --> B) --> A` show A ..
   33.38 -  qed
   33.39 -qed
   33.40 -
   33.41 -text {*
   33.42 - In the subsequent version the reasoning is rearranged by means of
   33.43 - ``weak assumptions'' (as introduced by \isacommand{presume}).  Before
   33.44 - assuming the negated goal $\neg A$, its intended consequence $A \impl
   33.45 - B$ is put into place in order to solve the main problem.
   33.46 - Nevertheless, we do not get anything for free, but have to establish
   33.47 - $A \impl B$ later on.  The overall effect is that of a logical
   33.48 - \emph{cut}.
   33.49 -
   33.50 - Technically speaking, whenever some goal is solved by
   33.51 - \isacommand{show} in the context of weak assumptions then the latter
   33.52 - give rise to new subgoals, which may be established separately.  In
   33.53 - contrast, strong assumptions (as introduced by \isacommand{assume})
   33.54 - are solved immediately.
   33.55 -*}
   33.56 -
   33.57 -theorem "((A --> B) --> A) --> A"
   33.58 -proof
   33.59 -  assume "(A --> B) --> A"
   33.60 -  show A
   33.61 -  proof (rule classical)
   33.62 -    presume "A --> B"
   33.63 -    with `(A --> B) --> A` show A ..
   33.64 -  next
   33.65 -    assume "~ A"
   33.66 -    show "A --> B"
   33.67 -    proof
   33.68 -      assume A
   33.69 -      with `~ A` show B by contradiction
   33.70 -    qed
   33.71 -  qed
   33.72 -qed
   33.73 -
   33.74 -text {*
   33.75 - Note that the goals stemming from weak assumptions may be even left
   33.76 - until qed time, where they get eventually solved ``by assumption'' as
   33.77 - well.  In that case there is really no fundamental difference between
   33.78 - the two kinds of assumptions, apart from the order of reducing the
   33.79 - individual parts of the proof configuration.
   33.80 -
   33.81 - Nevertheless, the ``strong'' mode of plain assumptions is quite
   33.82 - important in practice to achieve robustness of proof text
   33.83 - interpretation.  By forcing both the conclusion \emph{and} the
   33.84 - assumptions to unify with the pending goal to be solved, goal
   33.85 - selection becomes quite deterministic.  For example, decomposition
   33.86 - with rules of the ``case-analysis'' type usually gives rise to
   33.87 - several goals that only differ in there local contexts.  With strong
   33.88 - assumptions these may be still solved in any order in a predictable
   33.89 - way, while weak ones would quickly lead to great confusion,
   33.90 - eventually demanding even some backtracking.
   33.91 -*}
   33.92 -
   33.93 -end
    34.1 --- a/src/HOL/Isar_examples/Puzzle.thy	Tue Oct 20 19:36:52 2009 +0200
    34.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.3 @@ -1,85 +0,0 @@
    34.4 -header {* An old chestnut *}
    34.5 -
    34.6 -theory Puzzle
    34.7 -imports Main
    34.8 -begin
    34.9 -
   34.10 -text_raw {*
   34.11 -  \footnote{A question from ``Bundeswettbewerb Mathematik''.  Original
   34.12 -  pen-and-paper proof due to Herbert Ehler; Isabelle tactic script by
   34.13 -  Tobias Nipkow.}
   34.14 -*}
   34.15 -
   34.16 -text {*
   34.17 -  \textbf{Problem.}  Given some function $f\colon \Nat \to \Nat$ such
   34.18 -  that $f \ap (f \ap n) < f \ap (\idt{Suc} \ap n)$ for all $n$.
   34.19 -  Demonstrate that $f$ is the identity.
   34.20 -*}
   34.21 -
   34.22 -theorem
   34.23 -  assumes f_ax: "\<And>n. f (f n) < f (Suc n)"
   34.24 -  shows "f n = n"
   34.25 -proof (rule order_antisym)
   34.26 -  {
   34.27 -    fix n show "n \<le> f n"
   34.28 -    proof (induct k \<equiv> "f n" arbitrary: n rule: less_induct)
   34.29 -      case (less k n)
   34.30 -      then have hyp: "\<And>m. f m < f n \<Longrightarrow> m \<le> f m" by (simp only:)
   34.31 -      show "n \<le> f n"
   34.32 -      proof (cases n)
   34.33 -        case (Suc m)
   34.34 -        from f_ax have "f (f m) < f n" by (simp only: Suc)
   34.35 -        with hyp have "f m \<le> f (f m)" .
   34.36 -        also from f_ax have "\<dots> < f n" by (simp only: Suc)
   34.37 -        finally have "f m < f n" .
   34.38 -        with hyp have "m \<le> f m" .
   34.39 -        also note `\<dots> < f n`
   34.40 -        finally have "m < f n" .
   34.41 -        then have "n \<le> f n" by (simp only: Suc)
   34.42 -        then show ?thesis .
   34.43 -      next
   34.44 -        case 0
   34.45 -        then show ?thesis by simp
   34.46 -      qed
   34.47 -    qed
   34.48 -  } note ge = this
   34.49 -
   34.50 -  {
   34.51 -    fix m n :: nat
   34.52 -    assume "m \<le> n"
   34.53 -    then have "f m \<le> f n"
   34.54 -    proof (induct n)
   34.55 -      case 0
   34.56 -      then have "m = 0" by simp
   34.57 -      then show ?case by simp
   34.58 -    next
   34.59 -      case (Suc n)
   34.60 -      from Suc.prems show "f m \<le> f (Suc n)"
   34.61 -      proof (rule le_SucE)
   34.62 -        assume "m \<le> n"
   34.63 -        with Suc.hyps have "f m \<le> f n" .
   34.64 -        also from ge f_ax have "\<dots> < f (Suc n)"
   34.65 -          by (rule le_less_trans)
   34.66 -        finally show ?thesis by simp
   34.67 -      next
   34.68 -        assume "m = Suc n"
   34.69 -        then show ?thesis by simp
   34.70 -      qed
   34.71 -    qed
   34.72 -  } note mono = this
   34.73 -
   34.74 -  show "f n \<le> n"
   34.75 -  proof -
   34.76 -    have "\<not> n < f n"
   34.77 -    proof
   34.78 -      assume "n < f n"
   34.79 -      then have "Suc n \<le> f n" by simp
   34.80 -      then have "f (Suc n) \<le> f (f n)" by (rule mono)
   34.81 -      also have "\<dots> < f (Suc n)" by (rule f_ax)
   34.82 -      finally have "\<dots> < \<dots>" . then show False ..
   34.83 -    qed
   34.84 -    then show ?thesis by simp
   34.85 -  qed
   34.86 -qed
   34.87 -
   34.88 -end
    35.1 --- a/src/HOL/Isar_examples/README.html	Tue Oct 20 19:36:52 2009 +0200
    35.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    35.3 @@ -1,21 +0,0 @@
    35.4 -<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
    35.5 -
    35.6 -<!-- $Id$ -->
    35.7 -
    35.8 -<html>
    35.9 -
   35.10 -<head>
   35.11 -  <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
   35.12 -  <title>HOL/Isar_examples</title>
   35.13 -</head>
   35.14 -
   35.15 -<body>
   35.16 -<h1>HOL/Isar_examples</h1>
   35.17 -
   35.18 -Isar offers a new high-level proof (and theory) language interface to
   35.19 -Isabelle.  This directory contains some example Isar documents.  See
   35.20 -also the included document, or the <a
   35.21 -href="http://isabelle.in.tum.de/Isar/">Isabelle/Isar page</a> for more
   35.22 -information.
   35.23 -</body>
   35.24 -</html>
    36.1 --- a/src/HOL/Isar_examples/ROOT.ML	Tue Oct 20 19:36:52 2009 +0200
    36.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.3 @@ -1,22 +0,0 @@
    36.4 -(*  Title:      HOL/Isar_examples/ROOT.ML
    36.5 -    Author:     Markus Wenzel, TU Muenchen
    36.6 -
    36.7 -Miscellaneous Isabelle/Isar examples for Higher-Order Logic.
    36.8 -*)
    36.9 -
   36.10 -no_document use_thys ["../Old_Number_Theory/Primes", "../Old_Number_Theory/Fibonacci"];
   36.11 -
   36.12 -use_thys [
   36.13 -  "Basic_Logic",
   36.14 -  "Cantor",
   36.15 -  "Peirce",
   36.16 -  "Drinker",
   36.17 -  "Expr_Compiler",
   36.18 -  "Group",
   36.19 -  "Summation",
   36.20 -  "Knaster_Tarski",
   36.21 -  "Mutilated_Checkerboard",
   36.22 -  "Puzzle",
   36.23 -  "Nested_Datatype",
   36.24 -  "Hoare_Ex"
   36.25 -];
    37.1 --- a/src/HOL/Isar_examples/Summation.thy	Tue Oct 20 19:36:52 2009 +0200
    37.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.3 @@ -1,158 +0,0 @@
    37.4 -(*  Title:      HOL/Isar_examples/Summation.thy
    37.5 -    Author:     Markus Wenzel
    37.6 -*)
    37.7 -
    37.8 -header {* Summing natural numbers *}
    37.9 -
   37.10 -theory Summation
   37.11 -imports Main
   37.12 -begin
   37.13 -
   37.14 -text_raw {*
   37.15 - \footnote{This example is somewhat reminiscent of the
   37.16 - \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, which is
   37.17 - discussed in \cite{isabelle-ref} in the context of permutative
   37.18 - rewrite rules and ordered rewriting.}
   37.19 -*}
   37.20 -
   37.21 -text {*
   37.22 - Subsequently, we prove some summation laws of natural numbers
   37.23 - (including odds, squares, and cubes).  These examples demonstrate how
   37.24 - plain natural deduction (including induction) may be combined with
   37.25 - calculational proof.
   37.26 -*}
   37.27 -
   37.28 -
   37.29 -subsection {* Summation laws *}
   37.30 -
   37.31 -text {*
   37.32 - The sum of natural numbers $0 + \cdots + n$ equals $n \times (n +
   37.33 - 1)/2$.  Avoiding formal reasoning about division we prove this
   37.34 - equation multiplied by $2$.
   37.35 -*}
   37.36 -
   37.37 -theorem sum_of_naturals:
   37.38 -  "2 * (\<Sum>i::nat=0..n. i) = n * (n + 1)"
   37.39 -  (is "?P n" is "?S n = _")
   37.40 -proof (induct n)
   37.41 -  show "?P 0" by simp
   37.42 -next
   37.43 -  fix n have "?S (n + 1) = ?S n + 2 * (n + 1)" by simp
   37.44 -  also assume "?S n = n * (n + 1)"
   37.45 -  also have "... + 2 * (n + 1) = (n + 1) * (n + 2)" by simp
   37.46 -  finally show "?P (Suc n)" by simp
   37.47 -qed
   37.48 -
   37.49 -text {*
   37.50 - The above proof is a typical instance of mathematical induction.  The
   37.51 - main statement is viewed as some $\var{P} \ap n$ that is split by the
   37.52 - induction method into base case $\var{P} \ap 0$, and step case
   37.53 - $\var{P} \ap n \Impl \var{P} \ap (\idt{Suc} \ap n)$ for arbitrary $n$.
   37.54 -
   37.55 - The step case is established by a short calculation in forward
   37.56 - manner.  Starting from the left-hand side $\var{S} \ap (n + 1)$ of
   37.57 - the thesis, the final result is achieved by transformations involving
   37.58 - basic arithmetic reasoning (using the Simplifier).  The main point is
   37.59 - where the induction hypothesis $\var{S} \ap n = n \times (n + 1)$ is
   37.60 - introduced in order to replace a certain subterm.  So the
   37.61 - ``transitivity'' rule involved here is actual \emph{substitution}.
   37.62 - Also note how the occurrence of ``\dots'' in the subsequent step
   37.63 - documents the position where the right-hand side of the hypothesis
   37.64 - got filled in.
   37.65 -
   37.66 - \medskip A further notable point here is integration of calculations
   37.67 - with plain natural deduction.  This works so well in Isar for two
   37.68 - reasons.
   37.69 - \begin{enumerate}
   37.70 -
   37.71 - \item Facts involved in \isakeyword{also}~/ \isakeyword{finally}
   37.72 - calculational chains may be just anything.  There is nothing special
   37.73 - about \isakeyword{have}, so the natural deduction element
   37.74 - \isakeyword{assume} works just as well.
   37.75 -
   37.76 - \item There are two \emph{separate} primitives for building natural
   37.77 - deduction contexts: \isakeyword{fix}~$x$ and \isakeyword{assume}~$A$.
   37.78 - Thus it is possible to start reasoning with some new ``arbitrary, but
   37.79 - fixed'' elements before bringing in the actual assumption.  In
   37.80 - contrast, natural deduction is occasionally formalized with basic
   37.81 - context elements of the form $x:A$ instead.
   37.82 -
   37.83 - \end{enumerate}
   37.84 -*}
   37.85 -
   37.86 -text {*
   37.87 - \medskip We derive further summation laws for odds, squares, and
   37.88 - cubes as follows.  The basic technique of induction plus calculation
   37.89 - is the same as before.
   37.90 -*}
   37.91 -
   37.92 -theorem sum_of_odds:
   37.93 -  "(\<Sum>i::nat=0..<n. 2 * i + 1) = n^Suc (Suc 0)"
   37.94 -  (is "?P n" is "?S n = _")
   37.95 -proof (induct n)
   37.96 -  show "?P 0" by simp
   37.97 -next
   37.98 -  fix n have "?S (n + 1) = ?S n + 2 * n + 1" by simp
   37.99 -  also assume "?S n = n^Suc (Suc 0)"
  37.100 -  also have "... + 2 * n + 1 = (n + 1)^Suc (Suc 0)" by simp
  37.101 -  finally show "?P (Suc n)" by simp
  37.102 -qed
  37.103 -
  37.104 -text {*
  37.105 - Subsequently we require some additional tweaking of Isabelle built-in
  37.106 - arithmetic simplifications, such as bringing in distributivity by
  37.107 - hand.
  37.108 -*}
  37.109 -
  37.110 -lemmas distrib = add_mult_distrib add_mult_distrib2
  37.111 -
  37.112 -theorem sum_of_squares:
  37.113 -  "6 * (\<Sum>i::nat=0..n. i^Suc (Suc 0)) = n * (n + 1) * (2 * n + 1)"
  37.114 -  (is "?P n" is "?S n = _")
  37.115 -proof (induct n)
  37.116 -  show "?P 0" by simp
  37.117 -next
  37.118 -  fix n have "?S (n + 1) = ?S n + 6 * (n + 1)^Suc (Suc 0)"
  37.119 -    by (simp add: distrib)
  37.120 -  also assume "?S n = n * (n + 1) * (2 * n + 1)"
  37.121 -  also have "... + 6 * (n + 1)^Suc (Suc 0) =
  37.122 -    (n + 1) * (n + 2) * (2 * (n + 1) + 1)" by (simp add: distrib)
  37.123 -  finally show "?P (Suc n)" by simp
  37.124 -qed
  37.125 -
  37.126 -theorem sum_of_cubes:
  37.127 -  "4 * (\<Sum>i::nat=0..n. i^3) = (n * (n + 1))^Suc (Suc 0)"
  37.128 -  (is "?P n" is "?S n = _")
  37.129 -proof (induct n)
  37.130 -  show "?P 0" by (simp add: power_eq_if)
  37.131 -next
  37.132 -  fix n have "?S (n + 1) = ?S n + 4 * (n + 1)^3"
  37.133 -    by (simp add: power_eq_if distrib)
  37.134 -  also assume "?S n = (n * (n + 1))^Suc (Suc 0)"
  37.135 -  also have "... + 4 * (n + 1)^3 = ((n + 1) * ((n + 1) + 1))^Suc (Suc 0)"
  37.136 -    by (simp add: power_eq_if distrib)
  37.137 -  finally show "?P (Suc n)" by simp
  37.138 -qed
  37.139 -
  37.140 -text {*
  37.141 - Comparing these examples with the tactic script version
  37.142 - \url{http://isabelle.in.tum.de/library/HOL/ex/NatSum.html}, we note
  37.143 - an important difference of how induction vs.\ simplification is
  37.144 - applied.  While \cite[\S10]{isabelle-ref} advises for these examples
  37.145 - that ``induction should not be applied until the goal is in the
  37.146 - simplest form'' this would be a very bad idea in our setting.
  37.147 -
  37.148 - Simplification normalizes all arithmetic expressions involved,
  37.149 - producing huge intermediate goals.  With applying induction
  37.150 - afterwards, the Isar proof text would have to reflect the emerging
  37.151 - configuration by appropriate sub-proofs.  This would result in badly
  37.152 - structured, low-level technical reasoning, without any good idea of
  37.153 - the actual point.
  37.154 -
  37.155 - \medskip As a general rule of good proof style, automatic methods
  37.156 - such as $\idt{simp}$ or $\idt{auto}$ should normally be never used as
  37.157 - initial proof methods, but only as terminal ones, solving certain
  37.158 - goals completely.
  37.159 -*}
  37.160 -
  37.161 -end
    38.1 --- a/src/HOL/Isar_examples/document/proof.sty	Tue Oct 20 19:36:52 2009 +0200
    38.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.3 @@ -1,254 +0,0 @@
    38.4 -%       proof.sty       (Proof Figure Macros)
    38.5 -%
    38.6 -%       version 1.0
    38.7 -%       October 13, 1990
    38.8 -%       Copyright (C) 1990 Makoto Tatsuta (tatsuta@riec.tohoku.ac.jp)
    38.9 -%
   38.10 -% This program is free software; you can redistribute it or modify
   38.11 -% it under the terms of the GNU General Public License as published by
   38.12 -% the Free Software Foundation; either versions 1, or (at your option)
   38.13 -% any later version.
   38.14 -%
   38.15 -% This program is distributed in the hope that it will be useful
   38.16 -% but WITHOUT ANY WARRANTY; without even the implied warranty of
   38.17 -% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   38.18 -% GNU General Public License for more details.
   38.19 -%
   38.20 -%       Usage:
   38.21 -%               In \documentstyle, specify an optional style `proof', say,
   38.22 -%                       \documentstyle[proof]{article}.
   38.23 -%
   38.24 -%       The following macros are available:
   38.25 -%
   38.26 -%       In all the following macros, all the arguments such as
   38.27 -%       <Lowers> and <Uppers> are processed in math mode.
   38.28 -%
   38.29 -%       \infer<Lower><Uppers>
   38.30 -%               draws an inference.
   38.31 -%
   38.32 -%               Use & in <Uppers> to delimit upper formulae.
   38.33 -%               <Uppers> consists more than 0 formulae.
   38.34 -%
   38.35 -%               \infer returns \hbox{ ... } or \vbox{ ... } and
   38.36 -%               sets \@LeftOffset and \@RightOffset globally.
   38.37 -%
   38.38 -%       \infer[<Label>]<Lower><Uppers>
   38.39 -%               draws an inference labeled with <Label>.
   38.40 -%
   38.41 -%       \infer*<Lower><Uppers>
   38.42 -%               draws a many step deduction.
   38.43 -%
   38.44 -%       \infer*[<Label>]<Lower><Uppers>
   38.45 -%               draws a many step deduction labeled with <Label>.
   38.46 -%
   38.47 -%       \deduce<Lower><Uppers>
   38.48 -%               draws an inference without a rule.
   38.49 -%
   38.50 -%       \deduce[<Proof>]<Lower><Uppers>
   38.51 -%               draws a many step deduction with a proof name.
   38.52 -%
   38.53 -%       Example:
   38.54 -%               If you want to write
   38.55 -%                           B C
   38.56 -%                          -----
   38.57 -%                      A     D
   38.58 -%                     ----------
   38.59 -%                         E
   38.60 -%       use
   38.61 -%               \infer{E}{
   38.62 -%                       A
   38.63 -%                       &
   38.64 -%                       \infer{D}{B & C}
   38.65 -%               }
   38.66 -%
   38.67 -
   38.68 -%       Style Parameters
   38.69 -
   38.70 -\newdimen\inferLineSkip         \inferLineSkip=2pt
   38.71 -\newdimen\inferLabelSkip        \inferLabelSkip=5pt
   38.72 -\def\inferTabSkip{\quad}
   38.73 -
   38.74 -%       Variables
   38.75 -
   38.76 -\newdimen\@LeftOffset   % global
   38.77 -\newdimen\@RightOffset  % global
   38.78 -\newdimen\@SavedLeftOffset      % safe from users
   38.79 -
   38.80 -\newdimen\UpperWidth
   38.81 -\newdimen\LowerWidth
   38.82 -\newdimen\LowerHeight
   38.83 -\newdimen\UpperLeftOffset
   38.84 -\newdimen\UpperRightOffset
   38.85 -\newdimen\UpperCenter
   38.86 -\newdimen\LowerCenter
   38.87 -\newdimen\UpperAdjust
   38.88 -\newdimen\RuleAdjust
   38.89 -\newdimen\LowerAdjust
   38.90 -\newdimen\RuleWidth
   38.91 -\newdimen\HLabelAdjust
   38.92 -\newdimen\VLabelAdjust
   38.93 -\newdimen\WidthAdjust
   38.94 -
   38.95 -\newbox\@UpperPart
   38.96 -\newbox\@LowerPart
   38.97 -\newbox\@LabelPart
   38.98 -\newbox\ResultBox
   38.99 -
  38.100 -%       Flags
  38.101 -
  38.102 -\newif\if@inferRule     % whether \@infer draws a rule.
  38.103 -\newif\if@ReturnLeftOffset      % whether \@infer returns \@LeftOffset.
  38.104 -\newif\if@MathSaved     % whether inner math mode where \infer or
  38.105 -                        % \deduce appears.
  38.106 -
  38.107 -%       Special Fonts
  38.108 -
  38.109 -\def\DeduceSym{\vtop{\baselineskip4\p@ \lineskiplimit\z@
  38.110 -    \vbox{\hbox{.}\hbox{.}\hbox{.}}\hbox{.}}}
  38.111 -
  38.112 -%       Math Save Macros
  38.113 -%
  38.114 -%       \@SaveMath is called in the very begining of toplevel macros
  38.115 -%       which are \infer and \deduce.
  38.116 -%       \@RestoreMath is called in the very last before toplevel macros end.
  38.117 -%       Remark \infer and \deduce ends calling \@infer.
  38.118 -
  38.119 -%\def\@SaveMath{\@MathSavedfalse \ifmmode \ifinner
  38.120 -%        \relax $\relax \@MathSavedtrue \fi\fi }
  38.121 -%
  38.122 -%\def\@RestoreMath{\if@MathSaved \relax $\relax\fi }
  38.123 -
  38.124 -\def\@SaveMath{\relax}
  38.125 -\def\@RestoreMath{\relax}
  38.126 -
  38.127 -
  38.128 -%       Macros
  38.129 -
  38.130 -\def\@ifEmpty#1#2#3{\def\@tempa{\@empty}\def\@tempb{#1}\relax
  38.131 -        \ifx \@tempa \@tempb #2\else #3\fi }
  38.132 -
  38.133 -\def\infer{\@SaveMath \@ifnextchar *{\@inferSteps}{\@inferOneStep}}
  38.134 -
  38.135 -\def\@inferOneStep{\@inferRuletrue
  38.136 -        \@ifnextchar [{\@infer}{\@infer[\@empty]}}
  38.137 -
  38.138 -\def\@inferSteps*{\@ifnextchar [{\@@inferSteps}{\@@inferSteps[\@empty]}}
  38.139 -
  38.140 -\def\@@inferSteps[#1]{\@deduce{#1}[\DeduceSym]}
  38.141 -
  38.142 -\def\deduce{\@SaveMath \@ifnextchar [{\@deduce{\@empty}}
  38.143 -        {\@inferRulefalse \@infer[\@empty]}}
  38.144 -
  38.145 -%       \@deduce<Proof Label>[<Proof>]<Lower><Uppers>
  38.146 -
  38.147 -\def\@deduce#1[#2]#3#4{\@inferRulefalse
  38.148 -        \@infer[\@empty]{#3}{\@SaveMath \@infer[{#1}]{#2}{#4}}}
  38.149 -
  38.150 -%       \@infer[<Label>]<Lower><Uppers>
  38.151 -%               If \@inferRuletrue, draws a rule and <Label> is right to
  38.152 -%               a rule.
  38.153 -%               Otherwise, draws no rule and <Label> is right to <Lower>.
  38.154 -
  38.155 -\def\@infer[#1]#2#3{\relax
  38.156 -% Get parameters
  38.157 -        \if@ReturnLeftOffset \else \@SavedLeftOffset=\@LeftOffset \fi
  38.158 -        \setbox\@LabelPart=\hbox{$#1$}\relax
  38.159 -        \setbox\@LowerPart=\hbox{$#2$}\relax
  38.160 -%
  38.161 -        \global\@LeftOffset=0pt
  38.162 -        \setbox\@UpperPart=\vbox{\tabskip=0pt \halign{\relax
  38.163 -                \global\@RightOffset=0pt \@ReturnLeftOffsettrue $##$&&
  38.164 -                \inferTabSkip
  38.165 -                \global\@RightOffset=0pt \@ReturnLeftOffsetfalse $##$\cr
  38.166 -                #3\cr}}\relax
  38.167 -%                       Here is a little trick.
  38.168 -%                       \@ReturnLeftOffsettrue(false) influences on \infer or
  38.169 -%                       \deduce placed in ## locally
  38.170 -%                       because of \@SaveMath and \@RestoreMath.
  38.171 -        \UpperLeftOffset=\@LeftOffset
  38.172 -        \UpperRightOffset=\@RightOffset
  38.173 -% Calculate Adjustments
  38.174 -        \LowerWidth=\wd\@LowerPart
  38.175 -        \LowerHeight=\ht\@LowerPart
  38.176 -        \LowerCenter=0.5\LowerWidth
  38.177 -%
  38.178 -        \UpperWidth=\wd\@UpperPart \advance\UpperWidth by -\UpperLeftOffset
  38.179 -        \advance\UpperWidth by -\UpperRightOffset
  38.180 -        \UpperCenter=\UpperLeftOffset
  38.181 -        \advance\UpperCenter by 0.5\UpperWidth
  38.182 -%
  38.183 -        \ifdim \UpperWidth > \LowerWidth
  38.184 -                % \UpperCenter > \LowerCenter
  38.185 -        \UpperAdjust=0pt
  38.186 -        \RuleAdjust=\UpperLeftOffset
  38.187 -        \LowerAdjust=\UpperCenter \advance\LowerAdjust by -\LowerCenter
  38.188 -        \RuleWidth=\UpperWidth
  38.189 -        \global\@LeftOffset=\LowerAdjust
  38.190 -%
  38.191 -        \else   % \UpperWidth <= \LowerWidth
  38.192 -        \ifdim \UpperCenter > \LowerCenter
  38.193 -%
  38.194 -        \UpperAdjust=0pt
  38.195 -        \RuleAdjust=\UpperCenter \advance\RuleAdjust by -\LowerCenter
  38.196 -        \LowerAdjust=\RuleAdjust
  38.197 -        \RuleWidth=\LowerWidth
  38.198 -        \global\@LeftOffset=\LowerAdjust
  38.199 -%
  38.200 -        \else   % \UpperWidth <= \LowerWidth
  38.201 -                % \UpperCenter <= \LowerCenter
  38.202 -%
  38.203 -        \UpperAdjust=\LowerCenter \advance\UpperAdjust by -\UpperCenter
  38.204 -        \RuleAdjust=0pt
  38.205 -        \LowerAdjust=0pt
  38.206 -        \RuleWidth=\LowerWidth
  38.207 -        \global\@LeftOffset=0pt
  38.208 -%
  38.209 -        \fi\fi
  38.210 -% Make a box
  38.211 -        \if@inferRule
  38.212 -%
  38.213 -        \setbox\ResultBox=\vbox{
  38.214 -                \moveright \UpperAdjust \box\@UpperPart
  38.215 -                \nointerlineskip \kern\inferLineSkip
  38.216 -                \moveright \RuleAdjust \vbox{\hrule width\RuleWidth}\relax
  38.217 -                \nointerlineskip \kern\inferLineSkip
  38.218 -                \moveright \LowerAdjust \box\@LowerPart }\relax
  38.219 -%
  38.220 -        \@ifEmpty{#1}{}{\relax
  38.221 -%
  38.222 -        \HLabelAdjust=\wd\ResultBox     \advance\HLabelAdjust by -\RuleAdjust
  38.223 -        \advance\HLabelAdjust by -\RuleWidth
  38.224 -        \WidthAdjust=\HLabelAdjust
  38.225 -        \advance\WidthAdjust by -\inferLabelSkip
  38.226 -        \advance\WidthAdjust by -\wd\@LabelPart
  38.227 -        \ifdim \WidthAdjust < 0pt \WidthAdjust=0pt \fi
  38.228 -%
  38.229 -        \VLabelAdjust=\dp\@LabelPart
  38.230 -        \advance\VLabelAdjust by -\ht\@LabelPart
  38.231 -        \VLabelAdjust=0.5\VLabelAdjust  \advance\VLabelAdjust by \LowerHeight
  38.232 -        \advance\VLabelAdjust by \inferLineSkip
  38.233 -%
  38.234 -        \setbox\ResultBox=\hbox{\box\ResultBox
  38.235 -                \kern -\HLabelAdjust \kern\inferLabelSkip
  38.236 -                \raise\VLabelAdjust \box\@LabelPart \kern\WidthAdjust}\relax
  38.237 -%
  38.238 -        }\relax % end @ifEmpty
  38.239 -%
  38.240 -        \else % \@inferRulefalse
  38.241 -%
  38.242 -        \setbox\ResultBox=\vbox{
  38.243 -                \moveright \UpperAdjust \box\@UpperPart
  38.244 -                \nointerlineskip \kern\inferLineSkip
  38.245 -                \moveright \LowerAdjust \hbox{\unhbox\@LowerPart
  38.246 -                        \@ifEmpty{#1}{}{\relax
  38.247 -                        \kern\inferLabelSkip \unhbox\@LabelPart}}}\relax
  38.248 -        \fi
  38.249 -%
  38.250 -        \global\@RightOffset=\wd\ResultBox
  38.251 -        \global\advance\@RightOffset by -\@LeftOffset
  38.252 -        \global\advance\@RightOffset by -\LowerWidth
  38.253 -        \if@ReturnLeftOffset \else \global\@LeftOffset=\@SavedLeftOffset \fi
  38.254 -%
  38.255 -        \box\ResultBox
  38.256 -        \@RestoreMath
  38.257 -}
    39.1 --- a/src/HOL/Isar_examples/document/root.bib	Tue Oct 20 19:36:52 2009 +0200
    39.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.3 @@ -1,91 +0,0 @@
    39.4 -
    39.5 -@string{CUCL="Comp. Lab., Univ. Camb."}
    39.6 -@string{CUP="Cambridge University Press"}
    39.7 -@string{Springer="Springer-Verlag"}
    39.8 -@string{TUM="TU Munich"}
    39.9 -
   39.10 -@Book{Concrete-Math,
   39.11 -  author = 	 {R. L. Graham and D. E. Knuth and O. Patashnik},
   39.12 -  title = 	 {Concrete Mathematics},
   39.13 -  publisher = 	 {Addison-Wesley},
   39.14 -  year = 	 1989
   39.15 -}
   39.16 -
   39.17 -@InProceedings{Naraschewski-Wenzel:1998:HOOL,
   39.18 -  author	= {Wolfgang Naraschewski and Markus Wenzel},
   39.19 -  title		= {Object-Oriented Verification based on Record Subtyping in
   39.20 -                  {H}igher-{O}rder {L}ogic},
   39.21 -  crossref      = {tphols98}}
   39.22 -
   39.23 -@Article{Nipkow:1998:Winskel,
   39.24 -  author = 	 {Tobias Nipkow},
   39.25 -  title = 	 {Winskel is (almost) Right: Towards a Mechanized Semantics Textbook},
   39.26 -  journal = 	 {Formal Aspects of Computing},
   39.27 -  year = 	 1998,
   39.28 -  volume =	 10,
   39.29 -  pages =	 {171--186}
   39.30 -}
   39.31 -
   39.32 -@InProceedings{Wenzel:1999:TPHOL,
   39.33 -  author = 	 {Markus Wenzel},
   39.34 -  title = 	 {{Isar} --- a Generic Interpretative Approach to Readable Formal Proof Documents},
   39.35 -  crossref =     {tphols99}}
   39.36 -
   39.37 -@Book{Winskel:1993,
   39.38 -  author = 	 {G. Winskel},
   39.39 -  title = 	 {The Formal Semantics of Programming Languages},
   39.40 -  publisher = 	 {MIT Press},
   39.41 -  year = 	 1993
   39.42 -}
   39.43 -
   39.44 -@Book{davey-priestley,
   39.45 -  author	= {B. A. Davey and H. A. Priestley},
   39.46 -  title		= {Introduction to Lattices and Order},
   39.47 -  publisher	= CUP,
   39.48 -  year		= 1990}
   39.49 -
   39.50 -@manual{isabelle-HOL,
   39.51 -  author	= {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel},
   39.52 -  title		= {{Isabelle}'s Logics: {HOL}},
   39.53 -  institution	= {Institut f\"ur Informatik, Technische Universi\"at
   39.54 -                  M\"unchen and Computer Laboratory, University of Cambridge}}
   39.55 -
   39.56 -@manual{isabelle-intro,
   39.57 -  author	= {Lawrence C. Paulson},
   39.58 -  title		= {Introduction to {Isabelle}},
   39.59 -  institution	= CUCL}
   39.60 -
   39.61 -@manual{isabelle-isar-ref,
   39.62 -  author	= {Markus Wenzel},
   39.63 -  title		= {The {Isabelle/Isar} Reference Manual},
   39.64 -  institution	= TUM}
   39.65 -
   39.66 -@manual{isabelle-ref,
   39.67 -  author	= {Lawrence C. Paulson},
   39.68 -  title		= {The {Isabelle} Reference Manual},
   39.69 -  institution	= CUCL}
   39.70 -
   39.71 -@TechReport{paulson-mutilated-board,
   39.72 -  author = 	 {Lawrence C. Paulson},
   39.73 -  title = 	 {A Simple Formalization and Proof for the Mutilated Chess Board},
   39.74 -  institution =  CUCL,
   39.75 -  year = 	 1996,
   39.76 -  number =	 394,
   39.77 -  note = {\url{http://www.cl.cam.ac.uk/users/lcp/papers/Reports/mutil.pdf}}
   39.78 -}
   39.79 -
   39.80 -@Proceedings{tphols98,
   39.81 -  title		= {Theorem Proving in Higher Order Logics: {TPHOLs} '98},
   39.82 -  booktitle	= {Theorem Proving in Higher Order Logics: {TPHOLs} '98},
   39.83 -  editor	= {Jim Grundy and Malcom Newey},
   39.84 -  series	= {LNCS},
   39.85 -  volume        = 1479,
   39.86 -  year		= 1998}
   39.87 -
   39.88 -@Proceedings{tphols99,
   39.89 -  title		= {Theorem Proving in Higher Order Logics: {TPHOLs} '99},
   39.90 -  booktitle	= {Theorem Proving in Higher Order Logics: {TPHOLs} '99},
   39.91 -  editor	= {Bertot, Y. and Dowek, G. and Hirschowitz, A. and
   39.92 -                  Paulin, C. and Thery, L.},
   39.93 -  series	= {LNCS 1690},
   39.94 -  year		= 1999}
    40.1 --- a/src/HOL/Isar_examples/document/root.tex	Tue Oct 20 19:36:52 2009 +0200
    40.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    40.3 @@ -1,30 +0,0 @@
    40.4 -\input{style}
    40.5 -
    40.6 -\hyphenation{Isabelle}
    40.7 -
    40.8 -\begin{document}
    40.9 -
   40.10 -\title{Miscellaneous Isabelle/Isar examples for Higher-Order Logic}
   40.11 -\author{Markus Wenzel \\ \url{http://www.in.tum.de/~wenzelm/} \\[2ex]
   40.12 -  With contributions by Gertrud Bauer and Tobias Nipkow}
   40.13 -\maketitle
   40.14 -
   40.15 -\begin{abstract}
   40.16 -  Isar offers a high-level proof (and theory) language for Isabelle.
   40.17 -  We give various examples of Isabelle/Isar proof developments,
   40.18 -  ranging from simple demonstrations of certain language features to a
   40.19 -  bit more advanced applications.  The ``real'' applications of
   40.20 -  Isabelle/Isar are found elsewhere.
   40.21 -\end{abstract}
   40.22 -
   40.23 -\tableofcontents
   40.24 -
   40.25 -\parindent 0pt \parskip 0.5ex
   40.26 -
   40.27 -\input{session}
   40.28 -
   40.29 -\nocite{isabelle-isar-ref,Wenzel:1999:TPHOL}
   40.30 -\bibliographystyle{abbrv}
   40.31 -\bibliography{root}
   40.32 -
   40.33 -\end{document}
    41.1 --- a/src/HOL/Isar_examples/document/style.tex	Tue Oct 20 19:36:52 2009 +0200
    41.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    41.3 @@ -1,40 +0,0 @@
    41.4 -\documentclass[11pt,a4paper]{article}
    41.5 -\usepackage[only,bigsqcap]{stmaryrd}
    41.6 -\usepackage{ifthen,proof,amssymb,isabelle,isabellesym}
    41.7 -\isabellestyle{it}
    41.8 -\usepackage{pdfsetup}\urlstyle{rm}
    41.9 -
   41.10 -\renewcommand{\isamarkupheader}[1]{\section{#1}}
   41.11 -
   41.12 -\renewcommand{\isacommand}[1]
   41.13 -{\ifthenelse{\equal{sorry}{#1}}{$\;$\dummyproof}
   41.14 -  {\ifthenelse{\equal{oops}{#1}}{$\vdots$}{\isakeyword{#1}}}}
   41.15 -
   41.16 -\newcommand{\DUMMYPROOF}{{\langle\idt{proof}\rangle}}
   41.17 -\newcommand{\dummyproof}{$\DUMMYPROOF$}
   41.18 -
   41.19 -\newcommand{\name}[1]{\textsl{#1}}
   41.20 -
   41.21 -\newcommand{\idt}[1]{{\mathord{\mathit{#1}}}}
   41.22 -\newcommand{\var}[1]{{?\!\idt{#1}}}
   41.23 -\DeclareMathSymbol{\dshsym}{\mathalpha}{letters}{"2D}
   41.24 -\newcommand{\dsh}{\dshsym}
   41.25 -
   41.26 -\newcommand{\To}{\to}
   41.27 -\newcommand{\dt}{{\mathpunct.}}
   41.28 -\newcommand{\ap}{\mathbin{\!}}
   41.29 -\newcommand{\lam}[1]{\mathop{\lambda} #1\dt\;}
   41.30 -\newcommand{\all}[1]{\forall #1\dt\;}
   41.31 -\newcommand{\ex}[1]{\exists #1\dt\;}
   41.32 -\newcommand{\impl}{\to}
   41.33 -\newcommand{\conj}{\land}
   41.34 -\newcommand{\disj}{\lor}
   41.35 -\newcommand{\Impl}{\Longrightarrow}
   41.36 -
   41.37 -\newcommand{\Nat}{\mathord{\mathrm{I}\mkern-3.8mu\mathrm{N}}}
   41.38 -
   41.39 -
   41.40 -%%% Local Variables: 
   41.41 -%%% mode: latex
   41.42 -%%% TeX-master: "root"
   41.43 -%%% End: 
    42.1 --- a/src/HOL/README.html	Tue Oct 20 19:36:52 2009 +0200
    42.2 +++ b/src/HOL/README.html	Tue Oct 20 19:37:09 2009 +0200
    42.3 @@ -60,7 +60,7 @@
    42.4  <dt>IOA
    42.5  <dd>a simple theory of Input/Output Automata
    42.6  
    42.7 -<dt>Isar_examples
    42.8 +<dt>Isar_Examples
    42.9  <dd>several introductory examples using Isabelle/Isar
   42.10  
   42.11  <dt>Lambda
    43.1 --- a/src/HOL/ex/document/root.bib	Tue Oct 20 19:36:52 2009 +0200
    43.2 +++ b/src/HOL/ex/document/root.bib	Tue Oct 20 19:37:09 2009 +0200
    43.3 @@ -80,7 +80,7 @@
    43.4                    Higher-Order Logic},
    43.5    year =         2001,
    43.6    note =         {Part of the Isabelle distribution,
    43.7 -                  \url{http://isabelle.in.tum.de/library/HOL/Isar_examples/document.pdf}}
    43.8 +                  \url{http://isabelle.in.tum.de/library/HOL/Isar_Examples/document.pdf}}
    43.9  }
   43.10  
   43.11  @PhdThesis{Wenzel:2001:Thesis,