observe standard theory naming conventions;
authorwenzelm
Mon Jun 22 23:48:24 2009 +0200 (2009-06-22)
changeset 317583edd5f813f01
parent 31757 c1262feb61c7
child 31760 05e3e5980677
observe standard theory naming conventions;
modernized headers;
src/HOL/IsaMakefile
src/HOL/Isar_examples/BasicLogic.thy
src/HOL/Isar_examples/Basic_Logic.thy
src/HOL/Isar_examples/Cantor.thy
src/HOL/Isar_examples/Drinker.thy
src/HOL/Isar_examples/ExprCompiler.thy
src/HOL/Isar_examples/Expr_Compiler.thy
src/HOL/Isar_examples/Fibonacci.thy
src/HOL/Isar_examples/Group.thy
src/HOL/Isar_examples/Hoare.thy
src/HOL/Isar_examples/HoareEx.thy
src/HOL/Isar_examples/Hoare_Ex.thy
src/HOL/Isar_examples/KnasterTarski.thy
src/HOL/Isar_examples/Knaster_Tarski.thy
src/HOL/Isar_examples/MutilatedCheckerboard.thy
src/HOL/Isar_examples/Mutilated_Checkerboard.thy
src/HOL/Isar_examples/NestedDatatype.thy
src/HOL/Isar_examples/Nested_Datatype.thy
src/HOL/Isar_examples/Peirce.thy
src/HOL/Isar_examples/Puzzle.thy
src/HOL/Isar_examples/ROOT.ML
src/HOL/Isar_examples/Summation.thy
src/HOL/Isar_examples/document/root.tex
     1.1 --- a/src/HOL/IsaMakefile	Mon Jun 22 22:51:08 2009 +0200
     1.2 +++ b/src/HOL/IsaMakefile	Mon Jun 22 23:48:24 2009 +0200
     1.3 @@ -886,13 +886,13 @@
     1.4  
     1.5  HOL-Isar_examples: HOL $(LOG)/HOL-Isar_examples.gz
     1.6  
     1.7 -$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/BasicLogic.thy	\
     1.8 +$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/Basic_Logic.thy	\
     1.9    Isar_examples/Cantor.thy Isar_examples/Drinker.thy			\
    1.10 -  Isar_examples/ExprCompiler.thy Isar_examples/Fibonacci.thy		\
    1.11 +  Isar_examples/Expr_Compiler.thy Isar_examples/Fibonacci.thy		\
    1.12    Isar_examples/Group.thy Isar_examples/Hoare.thy			\
    1.13 -  Isar_examples/HoareEx.thy Isar_examples/KnasterTarski.thy		\
    1.14 -  Isar_examples/MutilatedCheckerboard.thy				\
    1.15 -  Isar_examples/NestedDatatype.thy Isar_examples/Peirce.thy		\
    1.16 +  Isar_examples/Hoare_Ex.thy Isar_examples/Knaster_Tarski.thy		\
    1.17 +  Isar_examples/Mutilated_Checkerboard.thy				\
    1.18 +  Isar_examples/Nested_Datatype.thy Isar_examples/Peirce.thy		\
    1.19    Isar_examples/Puzzle.thy Isar_examples/Summation.thy			\
    1.20    Isar_examples/ROOT.ML Isar_examples/document/proof.sty		\
    1.21    Isar_examples/document/root.bib Isar_examples/document/root.tex	\
     2.1 --- a/src/HOL/Isar_examples/BasicLogic.thy	Mon Jun 22 22:51:08 2009 +0200
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,447 +0,0 @@
     2.4 -(*  Title:      HOL/Isar_examples/BasicLogic.thy
     2.5 -    ID:         $Id$
     2.6 -    Author:     Markus Wenzel, TU Muenchen
     2.7 -
     2.8 -Basic propositional and quantifier reasoning.
     2.9 -*)
    2.10 -
    2.11 -header {* Basic logical reasoning *}
    2.12 -
    2.13 -theory BasicLogic imports Main begin
    2.14 -
    2.15 -
    2.16 -subsection {* Pure backward reasoning *}
    2.17 -
    2.18 -text {*
    2.19 -  In order to get a first idea of how Isabelle/Isar proof documents
    2.20 -  may look like, we consider the propositions @{text I}, @{text K},
    2.21 -  and @{text S}.  The following (rather explicit) proofs should
    2.22 -  require little extra explanations.
    2.23 -*}
    2.24 -
    2.25 -lemma I: "A --> A"
    2.26 -proof
    2.27 -  assume A
    2.28 -  show A by fact
    2.29 -qed
    2.30 -
    2.31 -lemma K: "A --> B --> A"
    2.32 -proof
    2.33 -  assume A
    2.34 -  show "B --> A"
    2.35 -  proof
    2.36 -    show A by fact
    2.37 -  qed
    2.38 -qed
    2.39 -
    2.40 -lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
    2.41 -proof
    2.42 -  assume "A --> B --> C"
    2.43 -  show "(A --> B) --> A --> C"
    2.44 -  proof
    2.45 -    assume "A --> B"
    2.46 -    show "A --> C"
    2.47 -    proof
    2.48 -      assume A
    2.49 -      show C
    2.50 -      proof (rule mp)
    2.51 -        show "B --> C" by (rule mp) fact+
    2.52 -        show B by (rule mp) fact+
    2.53 -      qed
    2.54 -    qed
    2.55 -  qed
    2.56 -qed
    2.57 -
    2.58 -text {*
    2.59 -  Isar provides several ways to fine-tune the reasoning, avoiding
    2.60 -  excessive detail.  Several abbreviated language elements are
    2.61 -  available, enabling the writer to express proofs in a more concise
    2.62 -  way, even without referring to any automated proof tools yet.
    2.63 -
    2.64 -  First of all, proof by assumption may be abbreviated as a single
    2.65 -  dot.
    2.66 -*}
    2.67 -
    2.68 -lemma "A --> A"
    2.69 -proof
    2.70 -  assume A
    2.71 -  show A by fact+
    2.72 -qed
    2.73 -
    2.74 -text {*
    2.75 -  In fact, concluding any (sub-)proof already involves solving any
    2.76 -  remaining goals by assumption\footnote{This is not a completely
    2.77 -  trivial operation, as proof by assumption may involve full
    2.78 -  higher-order unification.}.  Thus we may skip the rather vacuous
    2.79 -  body of the above proof as well.
    2.80 -*}
    2.81 -
    2.82 -lemma "A --> A"
    2.83 -proof
    2.84 -qed
    2.85 -
    2.86 -text {*
    2.87 -  Note that the \isacommand{proof} command refers to the @{text rule}
    2.88 -  method (without arguments) by default.  Thus it implicitly applies a
    2.89 -  single rule, as determined from the syntactic form of the statements
    2.90 -  involved.  The \isacommand{by} command abbreviates any proof with
    2.91 -  empty body, so the proof may be further pruned.
    2.92 -*}
    2.93 -
    2.94 -lemma "A --> A"
    2.95 -  by rule
    2.96 -
    2.97 -text {*
    2.98 -  Proof by a single rule may be abbreviated as double-dot.
    2.99 -*}
   2.100 -
   2.101 -lemma "A --> A" ..
   2.102 -
   2.103 -text {*
   2.104 -  Thus we have arrived at an adequate representation of the proof of a
   2.105 -  tautology that holds by a single standard rule.\footnote{Apparently,
   2.106 -  the rule here is implication introduction.}
   2.107 -*}
   2.108 -
   2.109 -text {*
   2.110 -  Let us also reconsider @{text K}.  Its statement is composed of
   2.111 -  iterated connectives.  Basic decomposition is by a single rule at a
   2.112 -  time, which is why our first version above was by nesting two
   2.113 -  proofs.
   2.114 -
   2.115 -  The @{text intro} proof method repeatedly decomposes a goal's
   2.116 -  conclusion.\footnote{The dual method is @{text elim}, acting on a
   2.117 -  goal's premises.}
   2.118 -*}
   2.119 -
   2.120 -lemma "A --> B --> A"
   2.121 -proof (intro impI)
   2.122 -  assume A
   2.123 -  show A by fact
   2.124 -qed
   2.125 -
   2.126 -text {*
   2.127 -  Again, the body may be collapsed.
   2.128 -*}
   2.129 -
   2.130 -lemma "A --> B --> A"
   2.131 -  by (intro impI)
   2.132 -
   2.133 -text {*
   2.134 -  Just like @{text rule}, the @{text intro} and @{text elim} proof
   2.135 -  methods pick standard structural rules, in case no explicit
   2.136 -  arguments are given.  While implicit rules are usually just fine for
   2.137 -  single rule application, this may go too far with iteration.  Thus
   2.138 -  in practice, @{text intro} and @{text elim} would be typically
   2.139 -  restricted to certain structures by giving a few rules only, e.g.\
   2.140 -  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
   2.141 -  and universal quantifiers.
   2.142 -
   2.143 -  Such well-tuned iterated decomposition of certain structures is the
   2.144 -  prime application of @{text intro} and @{text elim}.  In contrast,
   2.145 -  terminal steps that solve a goal completely are usually performed by
   2.146 -  actual automated proof methods (such as \isacommand{by}~@{text
   2.147 -  blast}.
   2.148 -*}
   2.149 -
   2.150 -
   2.151 -subsection {* Variations of backward vs.\ forward reasoning *}
   2.152 -
   2.153 -text {*
   2.154 -  Certainly, any proof may be performed in backward-style only.  On
   2.155 -  the other hand, small steps of reasoning are often more naturally
   2.156 -  expressed in forward-style.  Isar supports both backward and forward
   2.157 -  reasoning as a first-class concept.  In order to demonstrate the
   2.158 -  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
   2.159 -
   2.160 -  The first version is purely backward.
   2.161 -*}
   2.162 -
   2.163 -lemma "A & B --> B & A"
   2.164 -proof
   2.165 -  assume "A & B"
   2.166 -  show "B & A"
   2.167 -  proof
   2.168 -    show B by (rule conjunct2) fact
   2.169 -    show A by (rule conjunct1) fact
   2.170 -  qed
   2.171 -qed
   2.172 -
   2.173 -text {*
   2.174 -  Above, the @{text "conjunct_1/2"} projection rules had to be named
   2.175 -  explicitly, since the goals @{text B} and @{text A} did not provide
   2.176 -  any structural clue.  This may be avoided using \isacommand{from} to
   2.177 -  focus on the @{text "A \<and> B"} assumption as the current facts,
   2.178 -  enabling the use of double-dot proofs.  Note that \isacommand{from}
   2.179 -  already does forward-chaining, involving the \name{conjE} rule here.
   2.180 -*}
   2.181 -
   2.182 -lemma "A & B --> B & A"
   2.183 -proof
   2.184 -  assume "A & B"
   2.185 -  show "B & A"
   2.186 -  proof
   2.187 -    from `A & B` show B ..
   2.188 -    from `A & B` show A ..
   2.189 -  qed
   2.190 -qed
   2.191 -
   2.192 -text {*
   2.193 -  In the next version, we move the forward step one level upwards.
   2.194 -  Forward-chaining from the most recent facts is indicated by the
   2.195 -  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
   2.196 -  @{text "A \<and> B"} actually becomes an elimination, rather than an
   2.197 -  introduction.  The resulting proof structure directly corresponds to
   2.198 -  that of the @{text conjE} rule, including the repeated goal
   2.199 -  proposition that is abbreviated as @{text ?thesis} below.
   2.200 -*}
   2.201 -
   2.202 -lemma "A & B --> B & A"
   2.203 -proof
   2.204 -  assume "A & B"
   2.205 -  then show "B & A"
   2.206 -  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
   2.207 -    assume B A
   2.208 -    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
   2.209 -  qed
   2.210 -qed
   2.211 -
   2.212 -text {*
   2.213 -  In the subsequent version we flatten the structure of the main body
   2.214 -  by doing forward reasoning all the time.  Only the outermost
   2.215 -  decomposition step is left as backward.
   2.216 -*}
   2.217 -
   2.218 -lemma "A & B --> B & A"
   2.219 -proof
   2.220 -  assume "A & B"
   2.221 -  from `A & B` have A ..
   2.222 -  from `A & B` have B ..
   2.223 -  from `B` `A` show "B & A" ..
   2.224 -qed
   2.225 -
   2.226 -text {*
   2.227 -  We can still push forward-reasoning a bit further, even at the risk
   2.228 -  of getting ridiculous.  Note that we force the initial proof step to
   2.229 -  do nothing here, by referring to the ``-'' proof method.
   2.230 -*}
   2.231 -
   2.232 -lemma "A & B --> B & A"
   2.233 -proof -
   2.234 -  {
   2.235 -    assume "A & B"
   2.236 -    from `A & B` have A ..
   2.237 -    from `A & B` have B ..
   2.238 -    from `B` `A` have "B & A" ..
   2.239 -  }
   2.240 -  then show ?thesis ..         -- {* rule \name{impI} *}
   2.241 -qed
   2.242 -
   2.243 -text {*
   2.244 -  \medskip With these examples we have shifted through a whole range
   2.245 -  from purely backward to purely forward reasoning.  Apparently, in
   2.246 -  the extreme ends we get slightly ill-structured proofs, which also
   2.247 -  require much explicit naming of either rules (backward) or local
   2.248 -  facts (forward).
   2.249 -
   2.250 -  The general lesson learned here is that good proof style would
   2.251 -  achieve just the \emph{right} balance of top-down backward
   2.252 -  decomposition, and bottom-up forward composition.  In general, there
   2.253 -  is no single best way to arrange some pieces of formal reasoning, of
   2.254 -  course.  Depending on the actual applications, the intended audience
   2.255 -  etc., rules (and methods) on the one hand vs.\ facts on the other
   2.256 -  hand have to be emphasized in an appropriate way.  This requires the
   2.257 -  proof writer to develop good taste, and some practice, of course.
   2.258 -*}
   2.259 -
   2.260 -text {*
   2.261 -  For our example the most appropriate way of reasoning is probably
   2.262 -  the middle one, with conjunction introduction done after
   2.263 -  elimination.
   2.264 -*}
   2.265 -
   2.266 -lemma "A & B --> B & A"
   2.267 -proof
   2.268 -  assume "A & B"
   2.269 -  then show "B & A"
   2.270 -  proof
   2.271 -    assume B A
   2.272 -    then show ?thesis ..
   2.273 -  qed
   2.274 -qed
   2.275 -
   2.276 -
   2.277 -
   2.278 -subsection {* A few examples from ``Introduction to Isabelle'' *}
   2.279 -
   2.280 -text {*
   2.281 -  We rephrase some of the basic reasoning examples of
   2.282 -  \cite{isabelle-intro}, using HOL rather than FOL.
   2.283 -*}
   2.284 -
   2.285 -subsubsection {* A propositional proof *}
   2.286 -
   2.287 -text {*
   2.288 -  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
   2.289 -  involves forward-chaining from @{text "P \<or> P"}, followed by an
   2.290 -  explicit case-analysis on the two \emph{identical} cases.
   2.291 -*}
   2.292 -
   2.293 -lemma "P | P --> P"
   2.294 -proof
   2.295 -  assume "P | P"
   2.296 -  then show P
   2.297 -  proof                    -- {*
   2.298 -    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
   2.299 -  *}
   2.300 -    assume P show P by fact
   2.301 -  next
   2.302 -    assume P show P by fact
   2.303 -  qed
   2.304 -qed
   2.305 -
   2.306 -text {*
   2.307 -  Case splits are \emph{not} hardwired into the Isar language as a
   2.308 -  special feature.  The \isacommand{next} command used to separate the
   2.309 -  cases above is just a short form of managing block structure.
   2.310 -
   2.311 -  \medskip In general, applying proof methods may split up a goal into
   2.312 -  separate ``cases'', i.e.\ new subgoals with individual local
   2.313 -  assumptions.  The corresponding proof text typically mimics this by
   2.314 -  establishing results in appropriate contexts, separated by blocks.
   2.315 -
   2.316 -  In order to avoid too much explicit parentheses, the Isar system
   2.317 -  implicitly opens an additional block for any new goal, the
   2.318 -  \isacommand{next} statement then closes one block level, opening a
   2.319 -  new one.  The resulting behavior is what one would expect from
   2.320 -  separating cases, only that it is more flexible.  E.g.\ an induction
   2.321 -  base case (which does not introduce local assumptions) would
   2.322 -  \emph{not} require \isacommand{next} to separate the subsequent step
   2.323 -  case.
   2.324 -
   2.325 -  \medskip In our example the situation is even simpler, since the two
   2.326 -  cases actually coincide.  Consequently the proof may be rephrased as
   2.327 -  follows.
   2.328 -*}
   2.329 -
   2.330 -lemma "P | P --> P"
   2.331 -proof
   2.332 -  assume "P | P"
   2.333 -  then show P
   2.334 -  proof
   2.335 -    assume P
   2.336 -    show P by fact
   2.337 -    show P by fact
   2.338 -  qed
   2.339 -qed
   2.340 -
   2.341 -text {*
   2.342 -  Again, the rather vacuous body of the proof may be collapsed.  Thus
   2.343 -  the case analysis degenerates into two assumption steps, which are
   2.344 -  implicitly performed when concluding the single rule step of the
   2.345 -  double-dot proof as follows.
   2.346 -*}
   2.347 -
   2.348 -lemma "P | P --> P"
   2.349 -proof
   2.350 -  assume "P | P"
   2.351 -  then show P ..
   2.352 -qed
   2.353 -
   2.354 -
   2.355 -subsubsection {* A quantifier proof *}
   2.356 -
   2.357 -text {*
   2.358 -  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
   2.359 -  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
   2.360 -  with @{text "P (f a)"} may be taken as a witness for the second
   2.361 -  existential statement.
   2.362 -
   2.363 -  The first proof is rather verbose, exhibiting quite a lot of
   2.364 -  (redundant) detail.  It gives explicit rules, even with some
   2.365 -  instantiation.  Furthermore, we encounter two new language elements:
   2.366 -  the \isacommand{fix} command augments the context by some new
   2.367 -  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
   2.368 -  binds term abbreviations by higher-order pattern matching.
   2.369 -*}
   2.370 -
   2.371 -lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.372 -proof
   2.373 -  assume "EX x. P (f x)"
   2.374 -  then show "EX y. P y"
   2.375 -  proof (rule exE)             -- {*
   2.376 -    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
   2.377 -  *}
   2.378 -    fix a
   2.379 -    assume "P (f a)" (is "P ?witness")
   2.380 -    then show ?thesis by (rule exI [of P ?witness])
   2.381 -  qed
   2.382 -qed
   2.383 -
   2.384 -text {*
   2.385 -  While explicit rule instantiation may occasionally improve
   2.386 -  readability of certain aspects of reasoning, it is usually quite
   2.387 -  redundant.  Above, the basic proof outline gives already enough
   2.388 -  structural clues for the system to infer both the rules and their
   2.389 -  instances (by higher-order unification).  Thus we may as well prune
   2.390 -  the text as follows.
   2.391 -*}
   2.392 -
   2.393 -lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.394 -proof
   2.395 -  assume "EX x. P (f x)"
   2.396 -  then show "EX y. P y"
   2.397 -  proof
   2.398 -    fix a
   2.399 -    assume "P (f a)"
   2.400 -    then show ?thesis ..
   2.401 -  qed
   2.402 -qed
   2.403 -
   2.404 -text {*
   2.405 -  Explicit @{text \<exists>}-elimination as seen above can become quite
   2.406 -  cumbersome in practice.  The derived Isar language element
   2.407 -  ``\isakeyword{obtain}'' provides a more handsome way to do
   2.408 -  generalized existence reasoning.
   2.409 -*}
   2.410 -
   2.411 -lemma "(EX x. P (f x)) --> (EX y. P y)"
   2.412 -proof
   2.413 -  assume "EX x. P (f x)"
   2.414 -  then obtain a where "P (f a)" ..
   2.415 -  then show "EX y. P y" ..
   2.416 -qed
   2.417 -
   2.418 -text {*
   2.419 -  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
   2.420 -  \isakeyword{assume} together with a soundness proof of the
   2.421 -  elimination involved.  Thus it behaves similar to any other forward
   2.422 -  proof element.  Also note that due to the nature of general
   2.423 -  existence reasoning involved here, any result exported from the
   2.424 -  context of an \isakeyword{obtain} statement may \emph{not} refer to
   2.425 -  the parameters introduced there.
   2.426 -*}
   2.427 -
   2.428 -
   2.429 -
   2.430 -subsubsection {* Deriving rules in Isabelle *}
   2.431 -
   2.432 -text {*
   2.433 -  We derive the conjunction elimination rule from the corresponding
   2.434 -  projections.  The proof is quite straight-forward, since
   2.435 -  Isabelle/Isar supports non-atomic goals and assumptions fully
   2.436 -  transparently.
   2.437 -*}
   2.438 -
   2.439 -theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
   2.440 -proof -
   2.441 -  assume "A & B"
   2.442 -  assume r: "A ==> B ==> C"
   2.443 -  show C
   2.444 -  proof (rule r)
   2.445 -    show A by (rule conjunct1) fact
   2.446 -    show B by (rule conjunct2) fact
   2.447 -  qed
   2.448 -qed
   2.449 -
   2.450 -end
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/src/HOL/Isar_examples/Basic_Logic.thy	Mon Jun 22 23:48:24 2009 +0200
     3.3 @@ -0,0 +1,448 @@
     3.4 +(*  Title:      HOL/Isar_examples/Basic_Logic.thy
     3.5 +    Author:     Markus Wenzel, TU Muenchen
     3.6 +
     3.7 +Basic propositional and quantifier reasoning.
     3.8 +*)
     3.9 +
    3.10 +header {* Basic logical reasoning *}
    3.11 +
    3.12 +theory Basic_Logic
    3.13 +imports Main
    3.14 +begin
    3.15 +
    3.16 +
    3.17 +subsection {* Pure backward reasoning *}
    3.18 +
    3.19 +text {*
    3.20 +  In order to get a first idea of how Isabelle/Isar proof documents
    3.21 +  may look like, we consider the propositions @{text I}, @{text K},
    3.22 +  and @{text S}.  The following (rather explicit) proofs should
    3.23 +  require little extra explanations.
    3.24 +*}
    3.25 +
    3.26 +lemma I: "A --> A"
    3.27 +proof
    3.28 +  assume A
    3.29 +  show A by fact
    3.30 +qed
    3.31 +
    3.32 +lemma K: "A --> B --> A"
    3.33 +proof
    3.34 +  assume A
    3.35 +  show "B --> A"
    3.36 +  proof
    3.37 +    show A by fact
    3.38 +  qed
    3.39 +qed
    3.40 +
    3.41 +lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
    3.42 +proof
    3.43 +  assume "A --> B --> C"
    3.44 +  show "(A --> B) --> A --> C"
    3.45 +  proof
    3.46 +    assume "A --> B"
    3.47 +    show "A --> C"
    3.48 +    proof
    3.49 +      assume A
    3.50 +      show C
    3.51 +      proof (rule mp)
    3.52 +        show "B --> C" by (rule mp) fact+
    3.53 +        show B by (rule mp) fact+
    3.54 +      qed
    3.55 +    qed
    3.56 +  qed
    3.57 +qed
    3.58 +
    3.59 +text {*
    3.60 +  Isar provides several ways to fine-tune the reasoning, avoiding
    3.61 +  excessive detail.  Several abbreviated language elements are
    3.62 +  available, enabling the writer to express proofs in a more concise
    3.63 +  way, even without referring to any automated proof tools yet.
    3.64 +
    3.65 +  First of all, proof by assumption may be abbreviated as a single
    3.66 +  dot.
    3.67 +*}
    3.68 +
    3.69 +lemma "A --> A"
    3.70 +proof
    3.71 +  assume A
    3.72 +  show A by fact+
    3.73 +qed
    3.74 +
    3.75 +text {*
    3.76 +  In fact, concluding any (sub-)proof already involves solving any
    3.77 +  remaining goals by assumption\footnote{This is not a completely
    3.78 +  trivial operation, as proof by assumption may involve full
    3.79 +  higher-order unification.}.  Thus we may skip the rather vacuous
    3.80 +  body of the above proof as well.
    3.81 +*}
    3.82 +
    3.83 +lemma "A --> A"
    3.84 +proof
    3.85 +qed
    3.86 +
    3.87 +text {*
    3.88 +  Note that the \isacommand{proof} command refers to the @{text rule}
    3.89 +  method (without arguments) by default.  Thus it implicitly applies a
    3.90 +  single rule, as determined from the syntactic form of the statements
    3.91 +  involved.  The \isacommand{by} command abbreviates any proof with
    3.92 +  empty body, so the proof may be further pruned.
    3.93 +*}
    3.94 +
    3.95 +lemma "A --> A"
    3.96 +  by rule
    3.97 +
    3.98 +text {*
    3.99 +  Proof by a single rule may be abbreviated as double-dot.
   3.100 +*}
   3.101 +
   3.102 +lemma "A --> A" ..
   3.103 +
   3.104 +text {*
   3.105 +  Thus we have arrived at an adequate representation of the proof of a
   3.106 +  tautology that holds by a single standard rule.\footnote{Apparently,
   3.107 +  the rule here is implication introduction.}
   3.108 +*}
   3.109 +
   3.110 +text {*
   3.111 +  Let us also reconsider @{text K}.  Its statement is composed of
   3.112 +  iterated connectives.  Basic decomposition is by a single rule at a
   3.113 +  time, which is why our first version above was by nesting two
   3.114 +  proofs.
   3.115 +
   3.116 +  The @{text intro} proof method repeatedly decomposes a goal's
   3.117 +  conclusion.\footnote{The dual method is @{text elim}, acting on a
   3.118 +  goal's premises.}
   3.119 +*}
   3.120 +
   3.121 +lemma "A --> B --> A"
   3.122 +proof (intro impI)
   3.123 +  assume A
   3.124 +  show A by fact
   3.125 +qed
   3.126 +
   3.127 +text {*
   3.128 +  Again, the body may be collapsed.
   3.129 +*}
   3.130 +
   3.131 +lemma "A --> B --> A"
   3.132 +  by (intro impI)
   3.133 +
   3.134 +text {*
   3.135 +  Just like @{text rule}, the @{text intro} and @{text elim} proof
   3.136 +  methods pick standard structural rules, in case no explicit
   3.137 +  arguments are given.  While implicit rules are usually just fine for
   3.138 +  single rule application, this may go too far with iteration.  Thus
   3.139 +  in practice, @{text intro} and @{text elim} would be typically
   3.140 +  restricted to certain structures by giving a few rules only, e.g.\
   3.141 +  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
   3.142 +  and universal quantifiers.
   3.143 +
   3.144 +  Such well-tuned iterated decomposition of certain structures is the
   3.145 +  prime application of @{text intro} and @{text elim}.  In contrast,
   3.146 +  terminal steps that solve a goal completely are usually performed by
   3.147 +  actual automated proof methods (such as \isacommand{by}~@{text
   3.148 +  blast}.
   3.149 +*}
   3.150 +
   3.151 +
   3.152 +subsection {* Variations of backward vs.\ forward reasoning *}
   3.153 +
   3.154 +text {*
   3.155 +  Certainly, any proof may be performed in backward-style only.  On
   3.156 +  the other hand, small steps of reasoning are often more naturally
   3.157 +  expressed in forward-style.  Isar supports both backward and forward
   3.158 +  reasoning as a first-class concept.  In order to demonstrate the
   3.159 +  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
   3.160 +
   3.161 +  The first version is purely backward.
   3.162 +*}
   3.163 +
   3.164 +lemma "A & B --> B & A"
   3.165 +proof
   3.166 +  assume "A & B"
   3.167 +  show "B & A"
   3.168 +  proof
   3.169 +    show B by (rule conjunct2) fact
   3.170 +    show A by (rule conjunct1) fact
   3.171 +  qed
   3.172 +qed
   3.173 +
   3.174 +text {*
   3.175 +  Above, the @{text "conjunct_1/2"} projection rules had to be named
   3.176 +  explicitly, since the goals @{text B} and @{text A} did not provide
   3.177 +  any structural clue.  This may be avoided using \isacommand{from} to
   3.178 +  focus on the @{text "A \<and> B"} assumption as the current facts,
   3.179 +  enabling the use of double-dot proofs.  Note that \isacommand{from}
   3.180 +  already does forward-chaining, involving the \name{conjE} rule here.
   3.181 +*}
   3.182 +
   3.183 +lemma "A & B --> B & A"
   3.184 +proof
   3.185 +  assume "A & B"
   3.186 +  show "B & A"
   3.187 +  proof
   3.188 +    from `A & B` show B ..
   3.189 +    from `A & B` show A ..
   3.190 +  qed
   3.191 +qed
   3.192 +
   3.193 +text {*
   3.194 +  In the next version, we move the forward step one level upwards.
   3.195 +  Forward-chaining from the most recent facts is indicated by the
   3.196 +  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
   3.197 +  @{text "A \<and> B"} actually becomes an elimination, rather than an
   3.198 +  introduction.  The resulting proof structure directly corresponds to
   3.199 +  that of the @{text conjE} rule, including the repeated goal
   3.200 +  proposition that is abbreviated as @{text ?thesis} below.
   3.201 +*}
   3.202 +
   3.203 +lemma "A & B --> B & A"
   3.204 +proof
   3.205 +  assume "A & B"
   3.206 +  then show "B & A"
   3.207 +  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
   3.208 +    assume B A
   3.209 +    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
   3.210 +  qed
   3.211 +qed
   3.212 +
   3.213 +text {*
   3.214 +  In the subsequent version we flatten the structure of the main body
   3.215 +  by doing forward reasoning all the time.  Only the outermost
   3.216 +  decomposition step is left as backward.
   3.217 +*}
   3.218 +
   3.219 +lemma "A & B --> B & A"
   3.220 +proof
   3.221 +  assume "A & B"
   3.222 +  from `A & B` have A ..
   3.223 +  from `A & B` have B ..
   3.224 +  from `B` `A` show "B & A" ..
   3.225 +qed
   3.226 +
   3.227 +text {*
   3.228 +  We can still push forward-reasoning a bit further, even at the risk
   3.229 +  of getting ridiculous.  Note that we force the initial proof step to
   3.230 +  do nothing here, by referring to the ``-'' proof method.
   3.231 +*}
   3.232 +
   3.233 +lemma "A & B --> B & A"
   3.234 +proof -
   3.235 +  {
   3.236 +    assume "A & B"
   3.237 +    from `A & B` have A ..
   3.238 +    from `A & B` have B ..
   3.239 +    from `B` `A` have "B & A" ..
   3.240 +  }
   3.241 +  then show ?thesis ..         -- {* rule \name{impI} *}
   3.242 +qed
   3.243 +
   3.244 +text {*
   3.245 +  \medskip With these examples we have shifted through a whole range
   3.246 +  from purely backward to purely forward reasoning.  Apparently, in
   3.247 +  the extreme ends we get slightly ill-structured proofs, which also
   3.248 +  require much explicit naming of either rules (backward) or local
   3.249 +  facts (forward).
   3.250 +
   3.251 +  The general lesson learned here is that good proof style would
   3.252 +  achieve just the \emph{right} balance of top-down backward
   3.253 +  decomposition, and bottom-up forward composition.  In general, there
   3.254 +  is no single best way to arrange some pieces of formal reasoning, of
   3.255 +  course.  Depending on the actual applications, the intended audience
   3.256 +  etc., rules (and methods) on the one hand vs.\ facts on the other
   3.257 +  hand have to be emphasized in an appropriate way.  This requires the
   3.258 +  proof writer to develop good taste, and some practice, of course.
   3.259 +*}
   3.260 +
   3.261 +text {*
   3.262 +  For our example the most appropriate way of reasoning is probably
   3.263 +  the middle one, with conjunction introduction done after
   3.264 +  elimination.
   3.265 +*}
   3.266 +
   3.267 +lemma "A & B --> B & A"
   3.268 +proof
   3.269 +  assume "A & B"
   3.270 +  then show "B & A"
   3.271 +  proof
   3.272 +    assume B A
   3.273 +    then show ?thesis ..
   3.274 +  qed
   3.275 +qed
   3.276 +
   3.277 +
   3.278 +
   3.279 +subsection {* A few examples from ``Introduction to Isabelle'' *}
   3.280 +
   3.281 +text {*
   3.282 +  We rephrase some of the basic reasoning examples of
   3.283 +  \cite{isabelle-intro}, using HOL rather than FOL.
   3.284 +*}
   3.285 +
   3.286 +subsubsection {* A propositional proof *}
   3.287 +
   3.288 +text {*
   3.289 +  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
   3.290 +  involves forward-chaining from @{text "P \<or> P"}, followed by an
   3.291 +  explicit case-analysis on the two \emph{identical} cases.
   3.292 +*}
   3.293 +
   3.294 +lemma "P | P --> P"
   3.295 +proof
   3.296 +  assume "P | P"
   3.297 +  then show P
   3.298 +  proof                    -- {*
   3.299 +    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
   3.300 +  *}
   3.301 +    assume P show P by fact
   3.302 +  next
   3.303 +    assume P show P by fact
   3.304 +  qed
   3.305 +qed
   3.306 +
   3.307 +text {*
   3.308 +  Case splits are \emph{not} hardwired into the Isar language as a
   3.309 +  special feature.  The \isacommand{next} command used to separate the
   3.310 +  cases above is just a short form of managing block structure.
   3.311 +
   3.312 +  \medskip In general, applying proof methods may split up a goal into
   3.313 +  separate ``cases'', i.e.\ new subgoals with individual local
   3.314 +  assumptions.  The corresponding proof text typically mimics this by
   3.315 +  establishing results in appropriate contexts, separated by blocks.
   3.316 +
   3.317 +  In order to avoid too much explicit parentheses, the Isar system
   3.318 +  implicitly opens an additional block for any new goal, the
   3.319 +  \isacommand{next} statement then closes one block level, opening a
   3.320 +  new one.  The resulting behavior is what one would expect from
   3.321 +  separating cases, only that it is more flexible.  E.g.\ an induction
   3.322 +  base case (which does not introduce local assumptions) would
   3.323 +  \emph{not} require \isacommand{next} to separate the subsequent step
   3.324 +  case.
   3.325 +
   3.326 +  \medskip In our example the situation is even simpler, since the two
   3.327 +  cases actually coincide.  Consequently the proof may be rephrased as
   3.328 +  follows.
   3.329 +*}
   3.330 +
   3.331 +lemma "P | P --> P"
   3.332 +proof
   3.333 +  assume "P | P"
   3.334 +  then show P
   3.335 +  proof
   3.336 +    assume P
   3.337 +    show P by fact
   3.338 +    show P by fact
   3.339 +  qed
   3.340 +qed
   3.341 +
   3.342 +text {*
   3.343 +  Again, the rather vacuous body of the proof may be collapsed.  Thus
   3.344 +  the case analysis degenerates into two assumption steps, which are
   3.345 +  implicitly performed when concluding the single rule step of the
   3.346 +  double-dot proof as follows.
   3.347 +*}
   3.348 +
   3.349 +lemma "P | P --> P"
   3.350 +proof
   3.351 +  assume "P | P"
   3.352 +  then show P ..
   3.353 +qed
   3.354 +
   3.355 +
   3.356 +subsubsection {* A quantifier proof *}
   3.357 +
   3.358 +text {*
   3.359 +  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
   3.360 +  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
   3.361 +  with @{text "P (f a)"} may be taken as a witness for the second
   3.362 +  existential statement.
   3.363 +
   3.364 +  The first proof is rather verbose, exhibiting quite a lot of
   3.365 +  (redundant) detail.  It gives explicit rules, even with some
   3.366 +  instantiation.  Furthermore, we encounter two new language elements:
   3.367 +  the \isacommand{fix} command augments the context by some new
   3.368 +  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
   3.369 +  binds term abbreviations by higher-order pattern matching.
   3.370 +*}
   3.371 +
   3.372 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   3.373 +proof
   3.374 +  assume "EX x. P (f x)"
   3.375 +  then show "EX y. P y"
   3.376 +  proof (rule exE)             -- {*
   3.377 +    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
   3.378 +  *}
   3.379 +    fix a
   3.380 +    assume "P (f a)" (is "P ?witness")
   3.381 +    then show ?thesis by (rule exI [of P ?witness])
   3.382 +  qed
   3.383 +qed
   3.384 +
   3.385 +text {*
   3.386 +  While explicit rule instantiation may occasionally improve
   3.387 +  readability of certain aspects of reasoning, it is usually quite
   3.388 +  redundant.  Above, the basic proof outline gives already enough
   3.389 +  structural clues for the system to infer both the rules and their
   3.390 +  instances (by higher-order unification).  Thus we may as well prune
   3.391 +  the text as follows.
   3.392 +*}
   3.393 +
   3.394 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   3.395 +proof
   3.396 +  assume "EX x. P (f x)"
   3.397 +  then show "EX y. P y"
   3.398 +  proof
   3.399 +    fix a
   3.400 +    assume "P (f a)"
   3.401 +    then show ?thesis ..
   3.402 +  qed
   3.403 +qed
   3.404 +
   3.405 +text {*
   3.406 +  Explicit @{text \<exists>}-elimination as seen above can become quite
   3.407 +  cumbersome in practice.  The derived Isar language element
   3.408 +  ``\isakeyword{obtain}'' provides a more handsome way to do
   3.409 +  generalized existence reasoning.
   3.410 +*}
   3.411 +
   3.412 +lemma "(EX x. P (f x)) --> (EX y. P y)"
   3.413 +proof
   3.414 +  assume "EX x. P (f x)"
   3.415 +  then obtain a where "P (f a)" ..
   3.416 +  then show "EX y. P y" ..
   3.417 +qed
   3.418 +
   3.419 +text {*
   3.420 +  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
   3.421 +  \isakeyword{assume} together with a soundness proof of the
   3.422 +  elimination involved.  Thus it behaves similar to any other forward
   3.423 +  proof element.  Also note that due to the nature of general
   3.424 +  existence reasoning involved here, any result exported from the
   3.425 +  context of an \isakeyword{obtain} statement may \emph{not} refer to
   3.426 +  the parameters introduced there.
   3.427 +*}
   3.428 +
   3.429 +
   3.430 +
   3.431 +subsubsection {* Deriving rules in Isabelle *}
   3.432 +
   3.433 +text {*
   3.434 +  We derive the conjunction elimination rule from the corresponding
   3.435 +  projections.  The proof is quite straight-forward, since
   3.436 +  Isabelle/Isar supports non-atomic goals and assumptions fully
   3.437 +  transparently.
   3.438 +*}
   3.439 +
   3.440 +theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
   3.441 +proof -
   3.442 +  assume "A & B"
   3.443 +  assume r: "A ==> B ==> C"
   3.444 +  show C
   3.445 +  proof (rule r)
   3.446 +    show A by (rule conjunct1) fact
   3.447 +    show B by (rule conjunct2) fact
   3.448 +  qed
   3.449 +qed
   3.450 +
   3.451 +end
     4.1 --- a/src/HOL/Isar_examples/Cantor.thy	Mon Jun 22 22:51:08 2009 +0200
     4.2 +++ b/src/HOL/Isar_examples/Cantor.thy	Mon Jun 22 23:48:24 2009 +0200
     4.3 @@ -1,11 +1,12 @@
     4.4  (*  Title:      HOL/Isar_examples/Cantor.thy
     4.5 -    ID:         $Id$
     4.6      Author:     Markus Wenzel, TU Muenchen
     4.7  *)
     4.8  
     4.9  header {* Cantor's Theorem *}
    4.10  
    4.11 -theory Cantor imports Main begin
    4.12 +theory Cantor
    4.13 +imports Main
    4.14 +begin
    4.15  
    4.16  text_raw {*
    4.17    \footnote{This is an Isar version of the final example of the
     5.1 --- a/src/HOL/Isar_examples/Drinker.thy	Mon Jun 22 22:51:08 2009 +0200
     5.2 +++ b/src/HOL/Isar_examples/Drinker.thy	Mon Jun 22 23:48:24 2009 +0200
     5.3 @@ -1,11 +1,12 @@
     5.4  (*  Title:      HOL/Isar_examples/Drinker.thy
     5.5 -    ID:         $Id$
     5.6      Author:     Makarius
     5.7  *)
     5.8  
     5.9  header {* The Drinker's Principle *}
    5.10  
    5.11 -theory Drinker imports Main begin
    5.12 +theory Drinker
    5.13 +imports Main
    5.14 +begin
    5.15  
    5.16  text {*
    5.17    Here is another example of classical reasoning: the Drinker's
     6.1 --- a/src/HOL/Isar_examples/ExprCompiler.thy	Mon Jun 22 22:51:08 2009 +0200
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,230 +0,0 @@
     6.4 -(*  Title:      HOL/Isar_examples/ExprCompiler.thy
     6.5 -    ID:         $Id$
     6.6 -    Author:     Markus Wenzel, TU Muenchen
     6.7 -
     6.8 -Correctness of a simple expression/stack-machine compiler.
     6.9 -*)
    6.10 -
    6.11 -header {* Correctness of a simple expression compiler *}
    6.12 -
    6.13 -theory ExprCompiler imports Main begin
    6.14 -
    6.15 -text {*
    6.16 - This is a (rather trivial) example of program verification.  We model
    6.17 - a compiler for translating expressions to stack machine instructions,
    6.18 - and prove its correctness wrt.\ some evaluation semantics.
    6.19 -*}
    6.20 -
    6.21 -
    6.22 -subsection {* Binary operations *}
    6.23 -
    6.24 -text {*
    6.25 - Binary operations are just functions over some type of values.  This
    6.26 - is both for abstract syntax and semantics, i.e.\ we use a ``shallow
    6.27 - embedding'' here.
    6.28 -*}
    6.29 -
    6.30 -types
    6.31 -  'val binop = "'val => 'val => 'val"
    6.32 -
    6.33 -
    6.34 -subsection {* Expressions *}
    6.35 -
    6.36 -text {*
    6.37 - The language of expressions is defined as an inductive type,
    6.38 - consisting of variables, constants, and binary operations on
    6.39 - expressions.
    6.40 -*}
    6.41 -
    6.42 -datatype ('adr, 'val) expr =
    6.43 -  Variable 'adr |
    6.44 -  Constant 'val |
    6.45 -  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
    6.46 -
    6.47 -text {*
    6.48 - Evaluation (wrt.\ some environment of variable assignments) is
    6.49 - defined by primitive recursion over the structure of expressions.
    6.50 -*}
    6.51 -
    6.52 -consts
    6.53 -  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
    6.54 -
    6.55 -primrec
    6.56 -  "eval (Variable x) env = env x"
    6.57 -  "eval (Constant c) env = c"
    6.58 -  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
    6.59 -
    6.60 -
    6.61 -subsection {* Machine *}
    6.62 -
    6.63 -text {*
    6.64 - Next we model a simple stack machine, with three instructions.
    6.65 -*}
    6.66 -
    6.67 -datatype ('adr, 'val) instr =
    6.68 -  Const 'val |
    6.69 -  Load 'adr |
    6.70 -  Apply "'val binop"
    6.71 -
    6.72 -text {*
    6.73 - Execution of a list of stack machine instructions is easily defined
    6.74 - as follows.
    6.75 -*}
    6.76 -
    6.77 -consts
    6.78 -  exec :: "(('adr, 'val) instr) list
    6.79 -    => 'val list => ('adr => 'val) => 'val list"
    6.80 -
    6.81 -primrec
    6.82 -  "exec [] stack env = stack"
    6.83 -  "exec (instr # instrs) stack env =
    6.84 -    (case instr of
    6.85 -      Const c => exec instrs (c # stack) env
    6.86 -    | Load x => exec instrs (env x # stack) env
    6.87 -    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
    6.88 -                   # (tl (tl stack))) env)"
    6.89 -
    6.90 -constdefs
    6.91 -  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
    6.92 -  "execute instrs env == hd (exec instrs [] env)"
    6.93 -
    6.94 -
    6.95 -subsection {* Compiler *}
    6.96 -
    6.97 -text {*
    6.98 - We are ready to define the compilation function of expressions to
    6.99 - lists of stack machine instructions.
   6.100 -*}
   6.101 -
   6.102 -consts
   6.103 -  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
   6.104 -
   6.105 -primrec
   6.106 -  "compile (Variable x) = [Load x]"
   6.107 -  "compile (Constant c) = [Const c]"
   6.108 -  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
   6.109 -
   6.110 -
   6.111 -text {*
   6.112 - The main result of this development is the correctness theorem for
   6.113 - $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
   6.114 - list append.
   6.115 -*}
   6.116 -
   6.117 -lemma exec_append:
   6.118 -  "exec (xs @ ys) stack env =
   6.119 -    exec ys (exec xs stack env) env"
   6.120 -proof (induct xs arbitrary: stack)
   6.121 -  case Nil
   6.122 -  show ?case by simp
   6.123 -next
   6.124 -  case (Cons x xs)
   6.125 -  show ?case
   6.126 -  proof (induct x)
   6.127 -    case Const
   6.128 -    from Cons show ?case by simp
   6.129 -  next
   6.130 -    case Load
   6.131 -    from Cons show ?case by simp
   6.132 -  next
   6.133 -    case Apply
   6.134 -    from Cons show ?case by simp
   6.135 -  qed
   6.136 -qed
   6.137 -
   6.138 -theorem correctness: "execute (compile e) env = eval e env"
   6.139 -proof -
   6.140 -  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
   6.141 -  proof (induct e)
   6.142 -    case Variable show ?case by simp
   6.143 -  next
   6.144 -    case Constant show ?case by simp
   6.145 -  next
   6.146 -    case Binop then show ?case by (simp add: exec_append)
   6.147 -  qed
   6.148 -  then show ?thesis by (simp add: execute_def)
   6.149 -qed
   6.150 -
   6.151 -
   6.152 -text {*
   6.153 - \bigskip In the proofs above, the \name{simp} method does quite a lot
   6.154 - of work behind the scenes (mostly ``functional program execution'').
   6.155 - Subsequently, the same reasoning is elaborated in detail --- at most
   6.156 - one recursive function definition is used at a time.  Thus we get a
   6.157 - better idea of what is actually going on.
   6.158 -*}
   6.159 -
   6.160 -lemma exec_append':
   6.161 -  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
   6.162 -proof (induct xs arbitrary: stack)
   6.163 -  case (Nil s)
   6.164 -  have "exec ([] @ ys) s env = exec ys s env" by simp
   6.165 -  also have "... = exec ys (exec [] s env) env" by simp
   6.166 -  finally show ?case .
   6.167 -next
   6.168 -  case (Cons x xs s)
   6.169 -  show ?case
   6.170 -  proof (induct x)
   6.171 -    case (Const val)
   6.172 -    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
   6.173 -      by simp
   6.174 -    also have "... = exec (xs @ ys) (val # s) env" by simp
   6.175 -    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
   6.176 -    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
   6.177 -    finally show ?case .
   6.178 -  next
   6.179 -    case (Load adr)
   6.180 -    from Cons show ?case by simp -- {* same as above *}
   6.181 -  next
   6.182 -    case (Apply fn)
   6.183 -    have "exec ((Apply fn # xs) @ ys) s env =
   6.184 -        exec (Apply fn # xs @ ys) s env" by simp
   6.185 -    also have "... =
   6.186 -        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
   6.187 -    also from Cons have "... =
   6.188 -        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
   6.189 -    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
   6.190 -    finally show ?case .
   6.191 -  qed
   6.192 -qed
   6.193 -
   6.194 -theorem correctness': "execute (compile e) env = eval e env"
   6.195 -proof -
   6.196 -  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
   6.197 -  proof (induct e)
   6.198 -    case (Variable adr s)
   6.199 -    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
   6.200 -      by simp
   6.201 -    also have "... = env adr # s" by simp
   6.202 -    also have "env adr = eval (Variable adr) env" by simp
   6.203 -    finally show ?case .
   6.204 -  next
   6.205 -    case (Constant val s)
   6.206 -    show ?case by simp -- {* same as above *}
   6.207 -  next
   6.208 -    case (Binop fn e1 e2 s)
   6.209 -    have "exec (compile (Binop fn e1 e2)) s env =
   6.210 -        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
   6.211 -    also have "... = exec [Apply fn]
   6.212 -        (exec (compile e1) (exec (compile e2) s env) env) env"
   6.213 -      by (simp only: exec_append)
   6.214 -    also have "exec (compile e2) s env = eval e2 env # s" by fact
   6.215 -    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
   6.216 -    also have "exec [Apply fn] ... env =
   6.217 -        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
   6.218 -    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
   6.219 -    also have "fn (eval e1 env) (eval e2 env) =
   6.220 -        eval (Binop fn e1 e2) env"
   6.221 -      by simp
   6.222 -    finally show ?case .
   6.223 -  qed
   6.224 -
   6.225 -  have "execute (compile e) env = hd (exec (compile e) [] env)"
   6.226 -    by (simp add: execute_def)
   6.227 -  also from exec_compile
   6.228 -    have "exec (compile e) [] env = [eval e env]" .
   6.229 -  also have "hd ... = eval e env" by simp
   6.230 -  finally show ?thesis .
   6.231 -qed
   6.232 -
   6.233 -end
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/src/HOL/Isar_examples/Expr_Compiler.thy	Mon Jun 22 23:48:24 2009 +0200
     7.3 @@ -0,0 +1,231 @@
     7.4 +(*  Title:      HOL/Isar_examples/Expr_Compiler.thy
     7.5 +    Author:     Markus Wenzel, TU Muenchen
     7.6 +
     7.7 +Correctness of a simple expression/stack-machine compiler.
     7.8 +*)
     7.9 +
    7.10 +header {* Correctness of a simple expression compiler *}
    7.11 +
    7.12 +theory Expr_Compiler
    7.13 +imports Main
    7.14 +begin
    7.15 +
    7.16 +text {*
    7.17 + This is a (rather trivial) example of program verification.  We model
    7.18 + a compiler for translating expressions to stack machine instructions,
    7.19 + and prove its correctness wrt.\ some evaluation semantics.
    7.20 +*}
    7.21 +
    7.22 +
    7.23 +subsection {* Binary operations *}
    7.24 +
    7.25 +text {*
    7.26 + Binary operations are just functions over some type of values.  This
    7.27 + is both for abstract syntax and semantics, i.e.\ we use a ``shallow
    7.28 + embedding'' here.
    7.29 +*}
    7.30 +
    7.31 +types
    7.32 +  'val binop = "'val => 'val => 'val"
    7.33 +
    7.34 +
    7.35 +subsection {* Expressions *}
    7.36 +
    7.37 +text {*
    7.38 + The language of expressions is defined as an inductive type,
    7.39 + consisting of variables, constants, and binary operations on
    7.40 + expressions.
    7.41 +*}
    7.42 +
    7.43 +datatype ('adr, 'val) expr =
    7.44 +  Variable 'adr |
    7.45 +  Constant 'val |
    7.46 +  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
    7.47 +
    7.48 +text {*
    7.49 + Evaluation (wrt.\ some environment of variable assignments) is
    7.50 + defined by primitive recursion over the structure of expressions.
    7.51 +*}
    7.52 +
    7.53 +consts
    7.54 +  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
    7.55 +
    7.56 +primrec
    7.57 +  "eval (Variable x) env = env x"
    7.58 +  "eval (Constant c) env = c"
    7.59 +  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
    7.60 +
    7.61 +
    7.62 +subsection {* Machine *}
    7.63 +
    7.64 +text {*
    7.65 + Next we model a simple stack machine, with three instructions.
    7.66 +*}
    7.67 +
    7.68 +datatype ('adr, 'val) instr =
    7.69 +  Const 'val |
    7.70 +  Load 'adr |
    7.71 +  Apply "'val binop"
    7.72 +
    7.73 +text {*
    7.74 + Execution of a list of stack machine instructions is easily defined
    7.75 + as follows.
    7.76 +*}
    7.77 +
    7.78 +consts
    7.79 +  exec :: "(('adr, 'val) instr) list
    7.80 +    => 'val list => ('adr => 'val) => 'val list"
    7.81 +
    7.82 +primrec
    7.83 +  "exec [] stack env = stack"
    7.84 +  "exec (instr # instrs) stack env =
    7.85 +    (case instr of
    7.86 +      Const c => exec instrs (c # stack) env
    7.87 +    | Load x => exec instrs (env x # stack) env
    7.88 +    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
    7.89 +                   # (tl (tl stack))) env)"
    7.90 +
    7.91 +constdefs
    7.92 +  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
    7.93 +  "execute instrs env == hd (exec instrs [] env)"
    7.94 +
    7.95 +
    7.96 +subsection {* Compiler *}
    7.97 +
    7.98 +text {*
    7.99 + We are ready to define the compilation function of expressions to
   7.100 + lists of stack machine instructions.
   7.101 +*}
   7.102 +
   7.103 +consts
   7.104 +  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
   7.105 +
   7.106 +primrec
   7.107 +  "compile (Variable x) = [Load x]"
   7.108 +  "compile (Constant c) = [Const c]"
   7.109 +  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
   7.110 +
   7.111 +
   7.112 +text {*
   7.113 + The main result of this development is the correctness theorem for
   7.114 + $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
   7.115 + list append.
   7.116 +*}
   7.117 +
   7.118 +lemma exec_append:
   7.119 +  "exec (xs @ ys) stack env =
   7.120 +    exec ys (exec xs stack env) env"
   7.121 +proof (induct xs arbitrary: stack)
   7.122 +  case Nil
   7.123 +  show ?case by simp
   7.124 +next
   7.125 +  case (Cons x xs)
   7.126 +  show ?case
   7.127 +  proof (induct x)
   7.128 +    case Const
   7.129 +    from Cons show ?case by simp
   7.130 +  next
   7.131 +    case Load
   7.132 +    from Cons show ?case by simp
   7.133 +  next
   7.134 +    case Apply
   7.135 +    from Cons show ?case by simp
   7.136 +  qed
   7.137 +qed
   7.138 +
   7.139 +theorem correctness: "execute (compile e) env = eval e env"
   7.140 +proof -
   7.141 +  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
   7.142 +  proof (induct e)
   7.143 +    case Variable show ?case by simp
   7.144 +  next
   7.145 +    case Constant show ?case by simp
   7.146 +  next
   7.147 +    case Binop then show ?case by (simp add: exec_append)
   7.148 +  qed
   7.149 +  then show ?thesis by (simp add: execute_def)
   7.150 +qed
   7.151 +
   7.152 +
   7.153 +text {*
   7.154 + \bigskip In the proofs above, the \name{simp} method does quite a lot
   7.155 + of work behind the scenes (mostly ``functional program execution'').
   7.156 + Subsequently, the same reasoning is elaborated in detail --- at most
   7.157 + one recursive function definition is used at a time.  Thus we get a
   7.158 + better idea of what is actually going on.
   7.159 +*}
   7.160 +
   7.161 +lemma exec_append':
   7.162 +  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
   7.163 +proof (induct xs arbitrary: stack)
   7.164 +  case (Nil s)
   7.165 +  have "exec ([] @ ys) s env = exec ys s env" by simp
   7.166 +  also have "... = exec ys (exec [] s env) env" by simp
   7.167 +  finally show ?case .
   7.168 +next
   7.169 +  case (Cons x xs s)
   7.170 +  show ?case
   7.171 +  proof (induct x)
   7.172 +    case (Const val)
   7.173 +    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
   7.174 +      by simp
   7.175 +    also have "... = exec (xs @ ys) (val # s) env" by simp
   7.176 +    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
   7.177 +    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
   7.178 +    finally show ?case .
   7.179 +  next
   7.180 +    case (Load adr)
   7.181 +    from Cons show ?case by simp -- {* same as above *}
   7.182 +  next
   7.183 +    case (Apply fn)
   7.184 +    have "exec ((Apply fn # xs) @ ys) s env =
   7.185 +        exec (Apply fn # xs @ ys) s env" by simp
   7.186 +    also have "... =
   7.187 +        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
   7.188 +    also from Cons have "... =
   7.189 +        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
   7.190 +    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
   7.191 +    finally show ?case .
   7.192 +  qed
   7.193 +qed
   7.194 +
   7.195 +theorem correctness': "execute (compile e) env = eval e env"
   7.196 +proof -
   7.197 +  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
   7.198 +  proof (induct e)
   7.199 +    case (Variable adr s)
   7.200 +    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
   7.201 +      by simp
   7.202 +    also have "... = env adr # s" by simp
   7.203 +    also have "env adr = eval (Variable adr) env" by simp
   7.204 +    finally show ?case .
   7.205 +  next
   7.206 +    case (Constant val s)
   7.207 +    show ?case by simp -- {* same as above *}
   7.208 +  next
   7.209 +    case (Binop fn e1 e2 s)
   7.210 +    have "exec (compile (Binop fn e1 e2)) s env =
   7.211 +        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
   7.212 +    also have "... = exec [Apply fn]
   7.213 +        (exec (compile e1) (exec (compile e2) s env) env) env"
   7.214 +      by (simp only: exec_append)
   7.215 +    also have "exec (compile e2) s env = eval e2 env # s" by fact
   7.216 +    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
   7.217 +    also have "exec [Apply fn] ... env =
   7.218 +        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
   7.219 +    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
   7.220 +    also have "fn (eval e1 env) (eval e2 env) =
   7.221 +        eval (Binop fn e1 e2) env"
   7.222 +      by simp
   7.223 +    finally show ?case .
   7.224 +  qed
   7.225 +
   7.226 +  have "execute (compile e) env = hd (exec (compile e) [] env)"
   7.227 +    by (simp add: execute_def)
   7.228 +  also from exec_compile
   7.229 +    have "exec (compile e) [] env = [eval e env]" .
   7.230 +  also have "hd ... = eval e env" by simp
   7.231 +  finally show ?thesis .
   7.232 +qed
   7.233 +
   7.234 +end
     8.1 --- a/src/HOL/Isar_examples/Fibonacci.thy	Mon Jun 22 22:51:08 2009 +0200
     8.2 +++ b/src/HOL/Isar_examples/Fibonacci.thy	Mon Jun 22 23:48:24 2009 +0200
     8.3 @@ -1,5 +1,4 @@
     8.4  (*  Title:      HOL/Isar_examples/Fibonacci.thy
     8.5 -    ID:         $Id$
     8.6      Author:     Gertrud Bauer
     8.7      Copyright   1999 Technische Universitaet Muenchen
     8.8  
     9.1 --- a/src/HOL/Isar_examples/Group.thy	Mon Jun 22 22:51:08 2009 +0200
     9.2 +++ b/src/HOL/Isar_examples/Group.thy	Mon Jun 22 23:48:24 2009 +0200
     9.3 @@ -1,11 +1,12 @@
     9.4  (*  Title:      HOL/Isar_examples/Group.thy
     9.5 -    ID:         $Id$
     9.6      Author:     Markus Wenzel, TU Muenchen
     9.7  *)
     9.8  
     9.9  header {* Basic group theory *}
    9.10  
    9.11 -theory Group imports Main begin
    9.12 +theory Group
    9.13 +imports Main
    9.14 +begin
    9.15  
    9.16  subsection {* Groups and calculational reasoning *} 
    9.17  
    10.1 --- a/src/HOL/Isar_examples/Hoare.thy	Mon Jun 22 22:51:08 2009 +0200
    10.2 +++ b/src/HOL/Isar_examples/Hoare.thy	Mon Jun 22 23:48:24 2009 +0200
    10.3 @@ -1,5 +1,4 @@
    10.4  (*  Title:      HOL/Isar_examples/Hoare.thy
    10.5 -    ID:         $Id$
    10.6      Author:     Markus Wenzel, TU Muenchen
    10.7  
    10.8  A formulation of Hoare logic suitable for Isar.
    10.9 @@ -7,8 +6,10 @@
   10.10  
   10.11  header {* Hoare Logic *}
   10.12  
   10.13 -theory Hoare imports Main
   10.14 -uses ("~~/src/HOL/Hoare/hoare_tac.ML") begin
   10.15 +theory Hoare
   10.16 +imports Main
   10.17 +uses ("~~/src/HOL/Hoare/hoare_tac.ML")
   10.18 +begin
   10.19  
   10.20  subsection {* Abstract syntax and semantics *}
   10.21  
    11.1 --- a/src/HOL/Isar_examples/HoareEx.thy	Mon Jun 22 22:51:08 2009 +0200
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,328 +0,0 @@
    11.4 -
    11.5 -header {* Using Hoare Logic *}
    11.6 -
    11.7 -theory HoareEx imports Hoare begin
    11.8 -
    11.9 -subsection {* State spaces *}
   11.10 -
   11.11 -text {*
   11.12 - First of all we provide a store of program variables that
   11.13 - occur in any of the programs considered later.  Slightly unexpected
   11.14 - things may happen when attempting to work with undeclared variables.
   11.15 -*}
   11.16 -
   11.17 -record vars =
   11.18 -  I :: nat
   11.19 -  M :: nat
   11.20 -  N :: nat
   11.21 -  S :: nat
   11.22 -
   11.23 -text {*
   11.24 - While all of our variables happen to have the same type, nothing
   11.25 - would prevent us from working with many-sorted programs as well, or
   11.26 - even polymorphic ones.  Also note that Isabelle/HOL's extensible
   11.27 - record types even provides simple means to extend the state space
   11.28 - later.
   11.29 -*}
   11.30 -
   11.31 -
   11.32 -subsection {* Basic examples *}
   11.33 -
   11.34 -text {*
   11.35 - We look at few trivialities involving assignment and sequential
   11.36 - composition, in order to get an idea of how to work with our
   11.37 - formulation of Hoare Logic.
   11.38 -*}
   11.39 -
   11.40 -text {*
   11.41 - Using the basic \name{assign} rule directly is a bit cumbersome.
   11.42 -*}
   11.43 -
   11.44 -lemma
   11.45 -  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   11.46 -  by (rule assign)
   11.47 -
   11.48 -text {*
   11.49 - Certainly we want the state modification already done, e.g.\ by
   11.50 - simplification.  The \name{hoare} method performs the basic state
   11.51 - update for us; we may apply the Simplifier afterwards to achieve
   11.52 - ``obvious'' consequences as well.
   11.53 -*}
   11.54 -
   11.55 -lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
   11.56 -  by hoare
   11.57 -
   11.58 -lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   11.59 -  by hoare
   11.60 -
   11.61 -lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   11.62 -  by hoare simp
   11.63 -
   11.64 -lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   11.65 -  by hoare
   11.66 -
   11.67 -lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   11.68 -  by hoare simp
   11.69 -
   11.70 -lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   11.71 -  by hoare
   11.72 -
   11.73 -lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   11.74 -  by hoare simp
   11.75 -
   11.76 -lemma
   11.77 -"|- .{\<acute>M = a & \<acute>N = b}.
   11.78 -    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
   11.79 -    .{\<acute>M = b & \<acute>N = a}."
   11.80 -  by hoare simp
   11.81 -
   11.82 -text {*
   11.83 - It is important to note that statements like the following one can
   11.84 - only be proven for each individual program variable.  Due to the
   11.85 - extra-logical nature of record fields, we cannot formulate a theorem
   11.86 - relating record selectors and updates schematically.
   11.87 -*}
   11.88 -
   11.89 -lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
   11.90 -  by hoare
   11.91 -
   11.92 -lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
   11.93 -  oops
   11.94 -
   11.95 -lemma
   11.96 -  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
   11.97 -  -- {* same statement without concrete syntax *}
   11.98 -  oops
   11.99 -
  11.100 -
  11.101 -text {*
  11.102 - In the following assignments we make use of the consequence rule in
  11.103 - order to achieve the intended precondition.  Certainly, the
  11.104 - \name{hoare} method is able to handle this case, too.
  11.105 -*}
  11.106 -
  11.107 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  11.108 -proof -
  11.109 -  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
  11.110 -    by auto
  11.111 -  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  11.112 -    by hoare
  11.113 -  finally show ?thesis .
  11.114 -qed
  11.115 -
  11.116 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  11.117 -proof -
  11.118 -  have "!!m n::nat. m = n --> m + 1 ~= n"
  11.119 -      -- {* inclusion of assertions expressed in ``pure'' logic, *}
  11.120 -      -- {* without mentioning the state space *}
  11.121 -    by simp
  11.122 -  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  11.123 -    by hoare
  11.124 -  finally show ?thesis .
  11.125 -qed
  11.126 -
  11.127 -lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  11.128 -  by hoare simp
  11.129 -
  11.130 -
  11.131 -subsection {* Multiplication by addition *}
  11.132 -
  11.133 -text {*
  11.134 - We now do some basic examples of actual \texttt{WHILE} programs.
  11.135 - This one is a loop for calculating the product of two natural
  11.136 - numbers, by iterated addition.  We first give detailed structured
  11.137 - proof based on single-step Hoare rules.
  11.138 -*}
  11.139 -
  11.140 -lemma
  11.141 -  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  11.142 -      WHILE \<acute>M ~= a
  11.143 -      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  11.144 -      .{\<acute>S = a * b}."
  11.145 -proof -
  11.146 -  let "|- _ ?while _" = ?thesis
  11.147 -  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
  11.148 -
  11.149 -  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
  11.150 -  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
  11.151 -  proof
  11.152 -    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
  11.153 -    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
  11.154 -      by auto
  11.155 -    also have "|- ... ?c .{\<acute>?inv}." by hoare
  11.156 -    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
  11.157 -  qed
  11.158 -  also have "... <= .{\<acute>S = a * b}." by auto
  11.159 -  finally show ?thesis .
  11.160 -qed
  11.161 -
  11.162 -text {*
  11.163 - The subsequent version of the proof applies the \name{hoare} method
  11.164 - to reduce the Hoare statement to a purely logical problem that can be
  11.165 - solved fully automatically.  Note that we have to specify the
  11.166 - \texttt{WHILE} loop invariant in the original statement.
  11.167 -*}
  11.168 -
  11.169 -lemma
  11.170 -  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  11.171 -      WHILE \<acute>M ~= a
  11.172 -      INV .{\<acute>S = \<acute>M * b}.
  11.173 -      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  11.174 -      .{\<acute>S = a * b}."
  11.175 -  by hoare auto
  11.176 -
  11.177 -
  11.178 -subsection {* Summing natural numbers *}
  11.179 -
  11.180 -text {*
  11.181 - We verify an imperative program to sum natural numbers up to a given
  11.182 - limit.  First some functional definition for proper specification of
  11.183 - the problem.
  11.184 -*}
  11.185 -
  11.186 -text {*
  11.187 - The following proof is quite explicit in the individual steps taken,
  11.188 - with the \name{hoare} method only applied locally to take care of
  11.189 - assignment and sequential composition.  Note that we express
  11.190 - intermediate proof obligation in pure logic, without referring to the
  11.191 - state space.
  11.192 -*}
  11.193 -
  11.194 -declare atLeast0LessThan[symmetric,simp]
  11.195 -
  11.196 -theorem
  11.197 -  "|- .{True}.
  11.198 -      \<acute>S := 0; \<acute>I := 1;
  11.199 -      WHILE \<acute>I ~= n
  11.200 -      DO
  11.201 -        \<acute>S := \<acute>S + \<acute>I;
  11.202 -        \<acute>I := \<acute>I + 1
  11.203 -      OD
  11.204 -      .{\<acute>S = (SUM j<n. j)}."
  11.205 -  (is "|- _ (_; ?while) _")
  11.206 -proof -
  11.207 -  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  11.208 -  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  11.209 -
  11.210 -  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  11.211 -  proof -
  11.212 -    have "True --> 0 = ?sum 1"
  11.213 -      by simp
  11.214 -    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  11.215 -      by hoare
  11.216 -    finally show ?thesis .
  11.217 -  qed
  11.218 -  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
  11.219 -  proof
  11.220 -    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
  11.221 -    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
  11.222 -      by simp
  11.223 -    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
  11.224 -      by hoare
  11.225 -    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
  11.226 -  qed
  11.227 -  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
  11.228 -    by simp
  11.229 -  finally show ?thesis .
  11.230 -qed
  11.231 -
  11.232 -text {*
  11.233 - The next version uses the \name{hoare} method, while still explaining
  11.234 - the resulting proof obligations in an abstract, structured manner.
  11.235 -*}
  11.236 -
  11.237 -theorem
  11.238 -  "|- .{True}.
  11.239 -      \<acute>S := 0; \<acute>I := 1;
  11.240 -      WHILE \<acute>I ~= n
  11.241 -      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  11.242 -      DO
  11.243 -        \<acute>S := \<acute>S + \<acute>I;
  11.244 -        \<acute>I := \<acute>I + 1
  11.245 -      OD
  11.246 -      .{\<acute>S = (SUM j<n. j)}."
  11.247 -proof -
  11.248 -  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  11.249 -  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  11.250 -
  11.251 -  show ?thesis
  11.252 -  proof hoare
  11.253 -    show "?inv 0 1" by simp
  11.254 -  next
  11.255 -    fix s i assume "?inv s i & i ~= n"
  11.256 -    thus "?inv (s + i) (i + 1)" by simp
  11.257 -  next
  11.258 -    fix s i assume "?inv s i & ~ i ~= n"
  11.259 -    thus "s = ?sum n" by simp
  11.260 -  qed
  11.261 -qed
  11.262 -
  11.263 -text {*
  11.264 - Certainly, this proof may be done fully automatic as well, provided
  11.265 - that the invariant is given beforehand.
  11.266 -*}
  11.267 -
  11.268 -theorem
  11.269 -  "|- .{True}.
  11.270 -      \<acute>S := 0; \<acute>I := 1;
  11.271 -      WHILE \<acute>I ~= n
  11.272 -      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  11.273 -      DO
  11.274 -        \<acute>S := \<acute>S + \<acute>I;
  11.275 -        \<acute>I := \<acute>I + 1
  11.276 -      OD
  11.277 -      .{\<acute>S = (SUM j<n. j)}."
  11.278 -  by hoare auto
  11.279 -
  11.280 -
  11.281 -subsection{* Time *}
  11.282 -
  11.283 -text{*
  11.284 -  A simple embedding of time in Hoare logic: function @{text timeit}
  11.285 -  inserts an extra variable to keep track of the elapsed time.
  11.286 -*}
  11.287 -
  11.288 -record tstate = time :: nat
  11.289 -
  11.290 -types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
  11.291 -
  11.292 -consts timeit :: "'a time com \<Rightarrow> 'a time com"
  11.293 -primrec
  11.294 -  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
  11.295 -  "timeit (c1; c2) = (timeit c1; timeit c2)"
  11.296 -  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
  11.297 -  "timeit (While b iv c) = While b iv (timeit c)"
  11.298 -
  11.299 -record tvars = tstate +
  11.300 -  I :: nat
  11.301 -  J :: nat
  11.302 -
  11.303 -lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
  11.304 -  by (induct n) simp_all
  11.305 -
  11.306 -lemma "|- .{i = \<acute>I & \<acute>time = 0}.
  11.307 - timeit(
  11.308 - WHILE \<acute>I \<noteq> 0
  11.309 - INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
  11.310 - DO
  11.311 -   \<acute>J := \<acute>I;
  11.312 -   WHILE \<acute>J \<noteq> 0
  11.313 -   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
  11.314 -   DO \<acute>J := \<acute>J - 1 OD;
  11.315 -   \<acute>I := \<acute>I - 1
  11.316 - OD
  11.317 - ) .{2*\<acute>time = i*i + 5*i}."
  11.318 -  apply simp
  11.319 -  apply hoare
  11.320 -      apply simp
  11.321 -     apply clarsimp
  11.322 -    apply clarsimp
  11.323 -   apply arith
  11.324 -   prefer 2
  11.325 -   apply clarsimp
  11.326 -  apply (clarsimp simp: nat_distrib)
  11.327 -  apply (frule lem)
  11.328 -  apply arith
  11.329 -  done
  11.330 -
  11.331 -end
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/src/HOL/Isar_examples/Hoare_Ex.thy	Mon Jun 22 23:48:24 2009 +0200
    12.3 @@ -0,0 +1,329 @@
    12.4 +header {* Using Hoare Logic *}
    12.5 +
    12.6 +theory Hoare_Ex
    12.7 +imports Hoare
    12.8 +begin
    12.9 +
   12.10 +subsection {* State spaces *}
   12.11 +
   12.12 +text {*
   12.13 + First of all we provide a store of program variables that
   12.14 + occur in any of the programs considered later.  Slightly unexpected
   12.15 + things may happen when attempting to work with undeclared variables.
   12.16 +*}
   12.17 +
   12.18 +record vars =
   12.19 +  I :: nat
   12.20 +  M :: nat
   12.21 +  N :: nat
   12.22 +  S :: nat
   12.23 +
   12.24 +text {*
   12.25 + While all of our variables happen to have the same type, nothing
   12.26 + would prevent us from working with many-sorted programs as well, or
   12.27 + even polymorphic ones.  Also note that Isabelle/HOL's extensible
   12.28 + record types even provides simple means to extend the state space
   12.29 + later.
   12.30 +*}
   12.31 +
   12.32 +
   12.33 +subsection {* Basic examples *}
   12.34 +
   12.35 +text {*
   12.36 + We look at few trivialities involving assignment and sequential
   12.37 + composition, in order to get an idea of how to work with our
   12.38 + formulation of Hoare Logic.
   12.39 +*}
   12.40 +
   12.41 +text {*
   12.42 + Using the basic \name{assign} rule directly is a bit cumbersome.
   12.43 +*}
   12.44 +
   12.45 +lemma
   12.46 +  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   12.47 +  by (rule assign)
   12.48 +
   12.49 +text {*
   12.50 + Certainly we want the state modification already done, e.g.\ by
   12.51 + simplification.  The \name{hoare} method performs the basic state
   12.52 + update for us; we may apply the Simplifier afterwards to achieve
   12.53 + ``obvious'' consequences as well.
   12.54 +*}
   12.55 +
   12.56 +lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
   12.57 +  by hoare
   12.58 +
   12.59 +lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   12.60 +  by hoare
   12.61 +
   12.62 +lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
   12.63 +  by hoare simp
   12.64 +
   12.65 +lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   12.66 +  by hoare
   12.67 +
   12.68 +lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
   12.69 +  by hoare simp
   12.70 +
   12.71 +lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   12.72 +  by hoare
   12.73 +
   12.74 +lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
   12.75 +  by hoare simp
   12.76 +
   12.77 +lemma
   12.78 +"|- .{\<acute>M = a & \<acute>N = b}.
   12.79 +    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
   12.80 +    .{\<acute>M = b & \<acute>N = a}."
   12.81 +  by hoare simp
   12.82 +
   12.83 +text {*
   12.84 + It is important to note that statements like the following one can
   12.85 + only be proven for each individual program variable.  Due to the
   12.86 + extra-logical nature of record fields, we cannot formulate a theorem
   12.87 + relating record selectors and updates schematically.
   12.88 +*}
   12.89 +
   12.90 +lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
   12.91 +  by hoare
   12.92 +
   12.93 +lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
   12.94 +  oops
   12.95 +
   12.96 +lemma
   12.97 +  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
   12.98 +  -- {* same statement without concrete syntax *}
   12.99 +  oops
  12.100 +
  12.101 +
  12.102 +text {*
  12.103 + In the following assignments we make use of the consequence rule in
  12.104 + order to achieve the intended precondition.  Certainly, the
  12.105 + \name{hoare} method is able to handle this case, too.
  12.106 +*}
  12.107 +
  12.108 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  12.109 +proof -
  12.110 +  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
  12.111 +    by auto
  12.112 +  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  12.113 +    by hoare
  12.114 +  finally show ?thesis .
  12.115 +qed
  12.116 +
  12.117 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  12.118 +proof -
  12.119 +  have "!!m n::nat. m = n --> m + 1 ~= n"
  12.120 +      -- {* inclusion of assertions expressed in ``pure'' logic, *}
  12.121 +      -- {* without mentioning the state space *}
  12.122 +    by simp
  12.123 +  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  12.124 +    by hoare
  12.125 +  finally show ?thesis .
  12.126 +qed
  12.127 +
  12.128 +lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
  12.129 +  by hoare simp
  12.130 +
  12.131 +
  12.132 +subsection {* Multiplication by addition *}
  12.133 +
  12.134 +text {*
  12.135 + We now do some basic examples of actual \texttt{WHILE} programs.
  12.136 + This one is a loop for calculating the product of two natural
  12.137 + numbers, by iterated addition.  We first give detailed structured
  12.138 + proof based on single-step Hoare rules.
  12.139 +*}
  12.140 +
  12.141 +lemma
  12.142 +  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  12.143 +      WHILE \<acute>M ~= a
  12.144 +      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  12.145 +      .{\<acute>S = a * b}."
  12.146 +proof -
  12.147 +  let "|- _ ?while _" = ?thesis
  12.148 +  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
  12.149 +
  12.150 +  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
  12.151 +  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
  12.152 +  proof
  12.153 +    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
  12.154 +    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
  12.155 +      by auto
  12.156 +    also have "|- ... ?c .{\<acute>?inv}." by hoare
  12.157 +    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
  12.158 +  qed
  12.159 +  also have "... <= .{\<acute>S = a * b}." by auto
  12.160 +  finally show ?thesis .
  12.161 +qed
  12.162 +
  12.163 +text {*
  12.164 + The subsequent version of the proof applies the \name{hoare} method
  12.165 + to reduce the Hoare statement to a purely logical problem that can be
  12.166 + solved fully automatically.  Note that we have to specify the
  12.167 + \texttt{WHILE} loop invariant in the original statement.
  12.168 +*}
  12.169 +
  12.170 +lemma
  12.171 +  "|- .{\<acute>M = 0 & \<acute>S = 0}.
  12.172 +      WHILE \<acute>M ~= a
  12.173 +      INV .{\<acute>S = \<acute>M * b}.
  12.174 +      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
  12.175 +      .{\<acute>S = a * b}."
  12.176 +  by hoare auto
  12.177 +
  12.178 +
  12.179 +subsection {* Summing natural numbers *}
  12.180 +
  12.181 +text {*
  12.182 + We verify an imperative program to sum natural numbers up to a given
  12.183 + limit.  First some functional definition for proper specification of
  12.184 + the problem.
  12.185 +*}
  12.186 +
  12.187 +text {*
  12.188 + The following proof is quite explicit in the individual steps taken,
  12.189 + with the \name{hoare} method only applied locally to take care of
  12.190 + assignment and sequential composition.  Note that we express
  12.191 + intermediate proof obligation in pure logic, without referring to the
  12.192 + state space.
  12.193 +*}
  12.194 +
  12.195 +declare atLeast0LessThan[symmetric,simp]
  12.196 +
  12.197 +theorem
  12.198 +  "|- .{True}.
  12.199 +      \<acute>S := 0; \<acute>I := 1;
  12.200 +      WHILE \<acute>I ~= n
  12.201 +      DO
  12.202 +        \<acute>S := \<acute>S + \<acute>I;
  12.203 +        \<acute>I := \<acute>I + 1
  12.204 +      OD
  12.205 +      .{\<acute>S = (SUM j<n. j)}."
  12.206 +  (is "|- _ (_; ?while) _")
  12.207 +proof -
  12.208 +  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  12.209 +  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  12.210 +
  12.211 +  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  12.212 +  proof -
  12.213 +    have "True --> 0 = ?sum 1"
  12.214 +      by simp
  12.215 +    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
  12.216 +      by hoare
  12.217 +    finally show ?thesis .
  12.218 +  qed
  12.219 +  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
  12.220 +  proof
  12.221 +    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
  12.222 +    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
  12.223 +      by simp
  12.224 +    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
  12.225 +      by hoare
  12.226 +    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
  12.227 +  qed
  12.228 +  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
  12.229 +    by simp
  12.230 +  finally show ?thesis .
  12.231 +qed
  12.232 +
  12.233 +text {*
  12.234 + The next version uses the \name{hoare} method, while still explaining
  12.235 + the resulting proof obligations in an abstract, structured manner.
  12.236 +*}
  12.237 +
  12.238 +theorem
  12.239 +  "|- .{True}.
  12.240 +      \<acute>S := 0; \<acute>I := 1;
  12.241 +      WHILE \<acute>I ~= n
  12.242 +      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  12.243 +      DO
  12.244 +        \<acute>S := \<acute>S + \<acute>I;
  12.245 +        \<acute>I := \<acute>I + 1
  12.246 +      OD
  12.247 +      .{\<acute>S = (SUM j<n. j)}."
  12.248 +proof -
  12.249 +  let ?sum = "\<lambda>k::nat. SUM j<k. j"
  12.250 +  let ?inv = "\<lambda>s i::nat. s = ?sum i"
  12.251 +
  12.252 +  show ?thesis
  12.253 +  proof hoare
  12.254 +    show "?inv 0 1" by simp
  12.255 +  next
  12.256 +    fix s i assume "?inv s i & i ~= n"
  12.257 +    thus "?inv (s + i) (i + 1)" by simp
  12.258 +  next
  12.259 +    fix s i assume "?inv s i & ~ i ~= n"
  12.260 +    thus "s = ?sum n" by simp
  12.261 +  qed
  12.262 +qed
  12.263 +
  12.264 +text {*
  12.265 + Certainly, this proof may be done fully automatic as well, provided
  12.266 + that the invariant is given beforehand.
  12.267 +*}
  12.268 +
  12.269 +theorem
  12.270 +  "|- .{True}.
  12.271 +      \<acute>S := 0; \<acute>I := 1;
  12.272 +      WHILE \<acute>I ~= n
  12.273 +      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
  12.274 +      DO
  12.275 +        \<acute>S := \<acute>S + \<acute>I;
  12.276 +        \<acute>I := \<acute>I + 1
  12.277 +      OD
  12.278 +      .{\<acute>S = (SUM j<n. j)}."
  12.279 +  by hoare auto
  12.280 +
  12.281 +
  12.282 +subsection{* Time *}
  12.283 +
  12.284 +text{*
  12.285 +  A simple embedding of time in Hoare logic: function @{text timeit}
  12.286 +  inserts an extra variable to keep track of the elapsed time.
  12.287 +*}
  12.288 +
  12.289 +record tstate = time :: nat
  12.290 +
  12.291 +types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
  12.292 +
  12.293 +consts timeit :: "'a time com \<Rightarrow> 'a time com"
  12.294 +primrec
  12.295 +  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
  12.296 +  "timeit (c1; c2) = (timeit c1; timeit c2)"
  12.297 +  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
  12.298 +  "timeit (While b iv c) = While b iv (timeit c)"
  12.299 +
  12.300 +record tvars = tstate +
  12.301 +  I :: nat
  12.302 +  J :: nat
  12.303 +
  12.304 +lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
  12.305 +  by (induct n) simp_all
  12.306 +
  12.307 +lemma "|- .{i = \<acute>I & \<acute>time = 0}.
  12.308 + timeit(
  12.309 + WHILE \<acute>I \<noteq> 0
  12.310 + INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
  12.311 + DO
  12.312 +   \<acute>J := \<acute>I;
  12.313 +   WHILE \<acute>J \<noteq> 0
  12.314 +   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
  12.315 +   DO \<acute>J := \<acute>J - 1 OD;
  12.316 +   \<acute>I := \<acute>I - 1
  12.317 + OD
  12.318 + ) .{2*\<acute>time = i*i + 5*i}."
  12.319 +  apply simp
  12.320 +  apply hoare
  12.321 +      apply simp
  12.322 +     apply clarsimp
  12.323 +    apply clarsimp
  12.324 +   apply arith
  12.325 +   prefer 2
  12.326 +   apply clarsimp
  12.327 +  apply (clarsimp simp: nat_distrib)
  12.328 +  apply (frule lem)
  12.329 +  apply arith
  12.330 +  done
  12.331 +
  12.332 +end
    13.1 --- a/src/HOL/Isar_examples/KnasterTarski.thy	Mon Jun 22 22:51:08 2009 +0200
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,111 +0,0 @@
    13.4 -(*  Title:      HOL/Isar_examples/KnasterTarski.thy
    13.5 -    Author:     Markus Wenzel, TU Muenchen
    13.6 -
    13.7 -Typical textbook proof example.
    13.8 -*)
    13.9 -
   13.10 -header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
   13.11 -
   13.12 -theory KnasterTarski
   13.13 -imports Main Lattice_Syntax
   13.14 -begin
   13.15 -
   13.16 -
   13.17 -subsection {* Prose version *}
   13.18 -
   13.19 -text {*
   13.20 -  According to the textbook \cite[pages 93--94]{davey-priestley}, the
   13.21 -  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
   13.22 -  dualized the argument, and tuned the notation a little bit.}
   13.23 -
   13.24 -  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
   13.25 -  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
   13.26 -  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
   13.27 -
   13.28 -  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
   13.29 -  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
   13.30 -  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
   13.31 -  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
   13.32 -  the reverse one (!) and thereby complete the proof that @{text a} is
   13.33 -  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
   13.34 -  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
   13.35 -*}
   13.36 -
   13.37 -
   13.38 -subsection {* Formal versions *}
   13.39 -
   13.40 -text {*
   13.41 -  The Isar proof below closely follows the original presentation.
   13.42 -  Virtually all of the prose narration has been rephrased in terms of
   13.43 -  formal Isar language elements.  Just as many textbook-style proofs,
   13.44 -  there is a strong bias towards forward proof, and several bends in
   13.45 -  the course of reasoning.
   13.46 -*}
   13.47 -
   13.48 -theorem Knaster_Tarski:
   13.49 -  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   13.50 -  assumes "mono f"
   13.51 -  shows "\<exists>a. f a = a"
   13.52 -proof
   13.53 -  let ?H = "{u. f u \<le> u}"
   13.54 -  let ?a = "\<Sqinter>?H"
   13.55 -  show "f ?a = ?a"
   13.56 -  proof -
   13.57 -    {
   13.58 -      fix x
   13.59 -      assume "x \<in> ?H"
   13.60 -      then have "?a \<le> x" by (rule Inf_lower)
   13.61 -      with `mono f` have "f ?a \<le> f x" ..
   13.62 -      also from `x \<in> ?H` have "\<dots> \<le> x" ..
   13.63 -      finally have "f ?a \<le> x" .
   13.64 -    }
   13.65 -    then have "f ?a \<le> ?a" by (rule Inf_greatest)
   13.66 -    {
   13.67 -      also presume "\<dots> \<le> f ?a"
   13.68 -      finally (order_antisym) show ?thesis .
   13.69 -    }
   13.70 -    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
   13.71 -    then have "f ?a \<in> ?H" ..
   13.72 -    then show "?a \<le> f ?a" by (rule Inf_lower)
   13.73 -  qed
   13.74 -qed
   13.75 -
   13.76 -text {*
   13.77 -  Above we have used several advanced Isar language elements, such as
   13.78 -  explicit block structure and weak assumptions.  Thus we have
   13.79 -  mimicked the particular way of reasoning of the original text.
   13.80 -
   13.81 -  In the subsequent version the order of reasoning is changed to
   13.82 -  achieve structured top-down decomposition of the problem at the
   13.83 -  outer level, while only the inner steps of reasoning are done in a
   13.84 -  forward manner.  We are certainly more at ease here, requiring only
   13.85 -  the most basic features of the Isar language.
   13.86 -*}
   13.87 -
   13.88 -theorem Knaster_Tarski':
   13.89 -  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   13.90 -  assumes "mono f"
   13.91 -  shows "\<exists>a. f a = a"
   13.92 -proof
   13.93 -  let ?H = "{u. f u \<le> u}"
   13.94 -  let ?a = "\<Sqinter>?H"
   13.95 -  show "f ?a = ?a"
   13.96 -  proof (rule order_antisym)
   13.97 -    show "f ?a \<le> ?a"
   13.98 -    proof (rule Inf_greatest)
   13.99 -      fix x
  13.100 -      assume "x \<in> ?H"
  13.101 -      then have "?a \<le> x" by (rule Inf_lower)
  13.102 -      with `mono f` have "f ?a \<le> f x" ..
  13.103 -      also from `x \<in> ?H` have "\<dots> \<le> x" ..
  13.104 -      finally show "f ?a \<le> x" .
  13.105 -    qed
  13.106 -    show "?a \<le> f ?a"
  13.107 -    proof (rule Inf_lower)
  13.108 -      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
  13.109 -      then show "f ?a \<in> ?H" ..
  13.110 -    qed
  13.111 -  qed
  13.112 -qed
  13.113 -
  13.114 -end
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/src/HOL/Isar_examples/Knaster_Tarski.thy	Mon Jun 22 23:48:24 2009 +0200
    14.3 @@ -0,0 +1,111 @@
    14.4 +(*  Title:      HOL/Isar_examples/Knaster_Tarski.thy
    14.5 +    Author:     Markus Wenzel, TU Muenchen
    14.6 +
    14.7 +Typical textbook proof example.
    14.8 +*)
    14.9 +
   14.10 +header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
   14.11 +
   14.12 +theory Knaster_Tarski
   14.13 +imports Main Lattice_Syntax
   14.14 +begin
   14.15 +
   14.16 +
   14.17 +subsection {* Prose version *}
   14.18 +
   14.19 +text {*
   14.20 +  According to the textbook \cite[pages 93--94]{davey-priestley}, the
   14.21 +  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
   14.22 +  dualized the argument, and tuned the notation a little bit.}
   14.23 +
   14.24 +  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
   14.25 +  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
   14.26 +  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
   14.27 +
   14.28 +  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
   14.29 +  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
   14.30 +  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
   14.31 +  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
   14.32 +  the reverse one (!) and thereby complete the proof that @{text a} is
   14.33 +  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
   14.34 +  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
   14.35 +*}
   14.36 +
   14.37 +
   14.38 +subsection {* Formal versions *}
   14.39 +
   14.40 +text {*
   14.41 +  The Isar proof below closely follows the original presentation.
   14.42 +  Virtually all of the prose narration has been rephrased in terms of
   14.43 +  formal Isar language elements.  Just as many textbook-style proofs,
   14.44 +  there is a strong bias towards forward proof, and several bends in
   14.45 +  the course of reasoning.
   14.46 +*}
   14.47 +
   14.48 +theorem Knaster_Tarski:
   14.49 +  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   14.50 +  assumes "mono f"
   14.51 +  shows "\<exists>a. f a = a"
   14.52 +proof
   14.53 +  let ?H = "{u. f u \<le> u}"
   14.54 +  let ?a = "\<Sqinter>?H"
   14.55 +  show "f ?a = ?a"
   14.56 +  proof -
   14.57 +    {
   14.58 +      fix x
   14.59 +      assume "x \<in> ?H"
   14.60 +      then have "?a \<le> x" by (rule Inf_lower)
   14.61 +      with `mono f` have "f ?a \<le> f x" ..
   14.62 +      also from `x \<in> ?H` have "\<dots> \<le> x" ..
   14.63 +      finally have "f ?a \<le> x" .
   14.64 +    }
   14.65 +    then have "f ?a \<le> ?a" by (rule Inf_greatest)
   14.66 +    {
   14.67 +      also presume "\<dots> \<le> f ?a"
   14.68 +      finally (order_antisym) show ?thesis .
   14.69 +    }
   14.70 +    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
   14.71 +    then have "f ?a \<in> ?H" ..
   14.72 +    then show "?a \<le> f ?a" by (rule Inf_lower)
   14.73 +  qed
   14.74 +qed
   14.75 +
   14.76 +text {*
   14.77 +  Above we have used several advanced Isar language elements, such as
   14.78 +  explicit block structure and weak assumptions.  Thus we have
   14.79 +  mimicked the particular way of reasoning of the original text.
   14.80 +
   14.81 +  In the subsequent version the order of reasoning is changed to
   14.82 +  achieve structured top-down decomposition of the problem at the
   14.83 +  outer level, while only the inner steps of reasoning are done in a
   14.84 +  forward manner.  We are certainly more at ease here, requiring only
   14.85 +  the most basic features of the Isar language.
   14.86 +*}
   14.87 +
   14.88 +theorem Knaster_Tarski':
   14.89 +  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
   14.90 +  assumes "mono f"
   14.91 +  shows "\<exists>a. f a = a"
   14.92 +proof
   14.93 +  let ?H = "{u. f u \<le> u}"
   14.94 +  let ?a = "\<Sqinter>?H"
   14.95 +  show "f ?a = ?a"
   14.96 +  proof (rule order_antisym)
   14.97 +    show "f ?a \<le> ?a"
   14.98 +    proof (rule Inf_greatest)
   14.99 +      fix x
  14.100 +      assume "x \<in> ?H"
  14.101 +      then have "?a \<le> x" by (rule Inf_lower)
  14.102 +      with `mono f` have "f ?a \<le> f x" ..
  14.103 +      also from `x \<in> ?H` have "\<dots> \<le> x" ..
  14.104 +      finally show "f ?a \<le> x" .
  14.105 +    qed
  14.106 +    show "?a \<le> f ?a"
  14.107 +    proof (rule Inf_lower)
  14.108 +      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
  14.109 +      then show "f ?a \<in> ?H" ..
  14.110 +    qed
  14.111 +  qed
  14.112 +qed
  14.113 +
  14.114 +end
    15.1 --- a/src/HOL/Isar_examples/MutilatedCheckerboard.thy	Mon Jun 22 22:51:08 2009 +0200
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,299 +0,0 @@
    15.4 -(*  Title:      HOL/Isar_examples/MutilatedCheckerboard.thy
    15.5 -    ID:         $Id$
    15.6 -    Author:     Markus Wenzel, TU Muenchen (Isar document)
    15.7 -                Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
    15.8 -*)
    15.9 -
   15.10 -header {* The Mutilated Checker Board Problem *}
   15.11 -
   15.12 -theory MutilatedCheckerboard imports Main begin
   15.13 -
   15.14 -text {*
   15.15 - The Mutilated Checker Board Problem, formalized inductively.  See
   15.16 - \cite{paulson-mutilated-board} and
   15.17 - \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
   15.18 - original tactic script version.
   15.19 -*}
   15.20 -
   15.21 -subsection {* Tilings *}
   15.22 -
   15.23 -inductive_set
   15.24 -  tiling :: "'a set set => 'a set set"
   15.25 -  for A :: "'a set set"
   15.26 -  where
   15.27 -    empty: "{} : tiling A"
   15.28 -  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
   15.29 -
   15.30 -
   15.31 -text "The union of two disjoint tilings is a tiling."
   15.32 -
   15.33 -lemma tiling_Un:
   15.34 -  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
   15.35 -  shows "t Un u : tiling A"
   15.36 -proof -
   15.37 -  let ?T = "tiling A"
   15.38 -  from `t : ?T` and `t Int u = {}`
   15.39 -  show "t Un u : ?T"
   15.40 -  proof (induct t)
   15.41 -    case empty
   15.42 -    with `u : ?T` show "{} Un u : ?T" by simp
   15.43 -  next
   15.44 -    case (Un a t)
   15.45 -    show "(a Un t) Un u : ?T"
   15.46 -    proof -
   15.47 -      have "a Un (t Un u) : ?T"
   15.48 -	using `a : A`
   15.49 -      proof (rule tiling.Un)
   15.50 -        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
   15.51 -        then show "t Un u: ?T" by (rule Un)
   15.52 -        from `a <= - t` and `(a Un t) Int u = {}`
   15.53 -	show "a <= - (t Un u)" by blast
   15.54 -      qed
   15.55 -      also have "a Un (t Un u) = (a Un t) Un u"
   15.56 -        by (simp only: Un_assoc)
   15.57 -      finally show ?thesis .
   15.58 -    qed
   15.59 -  qed
   15.60 -qed
   15.61 -
   15.62 -
   15.63 -subsection {* Basic properties of ``below'' *}
   15.64 -
   15.65 -constdefs
   15.66 -  below :: "nat => nat set"
   15.67 -  "below n == {i. i < n}"
   15.68 -
   15.69 -lemma below_less_iff [iff]: "(i: below k) = (i < k)"
   15.70 -  by (simp add: below_def)
   15.71 -
   15.72 -lemma below_0: "below 0 = {}"
   15.73 -  by (simp add: below_def)
   15.74 -
   15.75 -lemma Sigma_Suc1:
   15.76 -    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
   15.77 -  by (simp add: below_def less_Suc_eq) blast
   15.78 -
   15.79 -lemma Sigma_Suc2:
   15.80 -    "m = n + 2 ==> A <*> below m =
   15.81 -      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
   15.82 -  by (auto simp add: below_def)
   15.83 -
   15.84 -lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
   15.85 -
   15.86 -
   15.87 -subsection {* Basic properties of ``evnodd'' *}
   15.88 -
   15.89 -constdefs
   15.90 -  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
   15.91 -  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
   15.92 -
   15.93 -lemma evnodd_iff:
   15.94 -    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
   15.95 -  by (simp add: evnodd_def)
   15.96 -
   15.97 -lemma evnodd_subset: "evnodd A b <= A"
   15.98 -  by (unfold evnodd_def, rule Int_lower1)
   15.99 -
  15.100 -lemma evnoddD: "x : evnodd A b ==> x : A"
  15.101 -  by (rule subsetD, rule evnodd_subset)
  15.102 -
  15.103 -lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
  15.104 -  by (rule finite_subset, rule evnodd_subset)
  15.105 -
  15.106 -lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
  15.107 -  by (unfold evnodd_def) blast
  15.108 -
  15.109 -lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
  15.110 -  by (unfold evnodd_def) blast
  15.111 -
  15.112 -lemma evnodd_empty: "evnodd {} b = {}"
  15.113 -  by (simp add: evnodd_def)
  15.114 -
  15.115 -lemma evnodd_insert: "evnodd (insert (i, j) C) b =
  15.116 -    (if (i + j) mod 2 = b
  15.117 -      then insert (i, j) (evnodd C b) else evnodd C b)"
  15.118 -  by (simp add: evnodd_def) blast
  15.119 -
  15.120 -
  15.121 -subsection {* Dominoes *}
  15.122 -
  15.123 -inductive_set
  15.124 -  domino :: "(nat * nat) set set"
  15.125 -  where
  15.126 -    horiz: "{(i, j), (i, j + 1)} : domino"
  15.127 -  | vertl: "{(i, j), (i + 1, j)} : domino"
  15.128 -
  15.129 -lemma dominoes_tile_row:
  15.130 -  "{i} <*> below (2 * n) : tiling domino"
  15.131 -  (is "?B n : ?T")
  15.132 -proof (induct n)
  15.133 -  case 0
  15.134 -  show ?case by (simp add: below_0 tiling.empty)
  15.135 -next
  15.136 -  case (Suc n)
  15.137 -  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
  15.138 -  have "?B (Suc n) = ?a Un ?B n"
  15.139 -    by (auto simp add: Sigma_Suc Un_assoc)
  15.140 -  moreover have "... : ?T"
  15.141 -  proof (rule tiling.Un)
  15.142 -    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
  15.143 -      by (rule domino.horiz)
  15.144 -    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
  15.145 -    finally show "... : domino" .
  15.146 -    show "?B n : ?T" by (rule Suc)
  15.147 -    show "?a <= - ?B n" by blast
  15.148 -  qed
  15.149 -  ultimately show ?case by simp
  15.150 -qed
  15.151 -
  15.152 -lemma dominoes_tile_matrix:
  15.153 -  "below m <*> below (2 * n) : tiling domino"
  15.154 -  (is "?B m : ?T")
  15.155 -proof (induct m)
  15.156 -  case 0
  15.157 -  show ?case by (simp add: below_0 tiling.empty)
  15.158 -next
  15.159 -  case (Suc m)
  15.160 -  let ?t = "{m} <*> below (2 * n)"
  15.161 -  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
  15.162 -  moreover have "... : ?T"
  15.163 -  proof (rule tiling_Un)
  15.164 -    show "?t : ?T" by (rule dominoes_tile_row)
  15.165 -    show "?B m : ?T" by (rule Suc)
  15.166 -    show "?t Int ?B m = {}" by blast
  15.167 -  qed
  15.168 -  ultimately show ?case by simp
  15.169 -qed
  15.170 -
  15.171 -lemma domino_singleton:
  15.172 -  assumes d: "d : domino" and "b < 2"
  15.173 -  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
  15.174 -  using d
  15.175 -proof induct
  15.176 -  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
  15.177 -  fix i j
  15.178 -  note [simp] = evnodd_empty evnodd_insert mod_Suc
  15.179 -  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
  15.180 -  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
  15.181 -qed
  15.182 -
  15.183 -lemma domino_finite:
  15.184 -  assumes d: "d: domino"
  15.185 -  shows "finite d"
  15.186 -  using d
  15.187 -proof induct
  15.188 -  fix i j :: nat
  15.189 -  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
  15.190 -  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
  15.191 -qed
  15.192 -
  15.193 -
  15.194 -subsection {* Tilings of dominoes *}
  15.195 -
  15.196 -lemma tiling_domino_finite:
  15.197 -  assumes t: "t : tiling domino"  (is "t : ?T")
  15.198 -  shows "finite t"  (is "?F t")
  15.199 -  using t
  15.200 -proof induct
  15.201 -  show "?F {}" by (rule finite.emptyI)
  15.202 -  fix a t assume "?F t"
  15.203 -  assume "a : domino" then have "?F a" by (rule domino_finite)
  15.204 -  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
  15.205 -qed
  15.206 -
  15.207 -lemma tiling_domino_01:
  15.208 -  assumes t: "t : tiling domino"  (is "t : ?T")
  15.209 -  shows "card (evnodd t 0) = card (evnodd t 1)"
  15.210 -  using t
  15.211 -proof induct
  15.212 -  case empty
  15.213 -  show ?case by (simp add: evnodd_def)
  15.214 -next
  15.215 -  case (Un a t)
  15.216 -  let ?e = evnodd
  15.217 -  note hyp = `card (?e t 0) = card (?e t 1)`
  15.218 -    and at = `a <= - t`
  15.219 -  have card_suc:
  15.220 -    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
  15.221 -  proof -
  15.222 -    fix b :: nat assume "b < 2"
  15.223 -    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
  15.224 -    also obtain i j where e: "?e a b = {(i, j)}"
  15.225 -    proof -
  15.226 -      from `a \<in> domino` and `b < 2`
  15.227 -      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
  15.228 -      then show ?thesis by (blast intro: that)
  15.229 -    qed
  15.230 -    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
  15.231 -    moreover have "card ... = Suc (card (?e t b))"
  15.232 -    proof (rule card_insert_disjoint)
  15.233 -      from `t \<in> tiling domino` have "finite t"
  15.234 -	by (rule tiling_domino_finite)
  15.235 -      then show "finite (?e t b)"
  15.236 -        by (rule evnodd_finite)
  15.237 -      from e have "(i, j) : ?e a b" by simp
  15.238 -      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
  15.239 -    qed
  15.240 -    ultimately show "?thesis b" by simp
  15.241 -  qed
  15.242 -  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
  15.243 -  also from hyp have "card (?e t 0) = card (?e t 1)" .
  15.244 -  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
  15.245 -    by simp
  15.246 -  finally show ?case .
  15.247 -qed
  15.248 -
  15.249 -
  15.250 -subsection {* Main theorem *}
  15.251 -
  15.252 -constdefs
  15.253 -  mutilated_board :: "nat => nat => (nat * nat) set"
  15.254 -  "mutilated_board m n ==
  15.255 -    below (2 * (m + 1)) <*> below (2 * (n + 1))
  15.256 -      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
  15.257 -
  15.258 -theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
  15.259 -proof (unfold mutilated_board_def)
  15.260 -  let ?T = "tiling domino"
  15.261 -  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
  15.262 -  let ?t' = "?t - {(0, 0)}"
  15.263 -  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
  15.264 -
  15.265 -  show "?t'' ~: ?T"
  15.266 -  proof
  15.267 -    have t: "?t : ?T" by (rule dominoes_tile_matrix)
  15.268 -    assume t'': "?t'' : ?T"
  15.269 -
  15.270 -    let ?e = evnodd
  15.271 -    have fin: "finite (?e ?t 0)"
  15.272 -      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
  15.273 -
  15.274 -    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
  15.275 -    have "card (?e ?t'' 0) < card (?e ?t' 0)"
  15.276 -    proof -
  15.277 -      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
  15.278 -        < card (?e ?t' 0)"
  15.279 -      proof (rule card_Diff1_less)
  15.280 -        from _ fin show "finite (?e ?t' 0)"
  15.281 -          by (rule finite_subset) auto
  15.282 -        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
  15.283 -      qed
  15.284 -      then show ?thesis by simp
  15.285 -    qed
  15.286 -    also have "... < card (?e ?t 0)"
  15.287 -    proof -
  15.288 -      have "(0, 0) : ?e ?t 0" by simp
  15.289 -      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
  15.290 -        by (rule card_Diff1_less)
  15.291 -      then show ?thesis by simp
  15.292 -    qed
  15.293 -    also from t have "... = card (?e ?t 1)"
  15.294 -      by (rule tiling_domino_01)
  15.295 -    also have "?e ?t 1 = ?e ?t'' 1" by simp
  15.296 -    also from t'' have "card ... = card (?e ?t'' 0)"
  15.297 -      by (rule tiling_domino_01 [symmetric])
  15.298 -    finally have "... < ..." . then show False ..
  15.299 -  qed
  15.300 -qed
  15.301 -
  15.302 -end
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/src/HOL/Isar_examples/Mutilated_Checkerboard.thy	Mon Jun 22 23:48:24 2009 +0200
    16.3 @@ -0,0 +1,300 @@
    16.4 +(*  Title:      HOL/Isar_examples/Mutilated_Checkerboard.thy
    16.5 +    Author:     Markus Wenzel, TU Muenchen (Isar document)
    16.6 +    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
    16.7 +*)
    16.8 +
    16.9 +header {* The Mutilated Checker Board Problem *}
   16.10 +
   16.11 +theory Mutilated_Checkerboard
   16.12 +imports Main
   16.13 +begin
   16.14 +
   16.15 +text {*
   16.16 + The Mutilated Checker Board Problem, formalized inductively.  See
   16.17 + \cite{paulson-mutilated-board} and
   16.18 + \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
   16.19 + original tactic script version.
   16.20 +*}
   16.21 +
   16.22 +subsection {* Tilings *}
   16.23 +
   16.24 +inductive_set
   16.25 +  tiling :: "'a set set => 'a set set"
   16.26 +  for A :: "'a set set"
   16.27 +  where
   16.28 +    empty: "{} : tiling A"
   16.29 +  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
   16.30 +
   16.31 +
   16.32 +text "The union of two disjoint tilings is a tiling."
   16.33 +
   16.34 +lemma tiling_Un:
   16.35 +  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
   16.36 +  shows "t Un u : tiling A"
   16.37 +proof -
   16.38 +  let ?T = "tiling A"
   16.39 +  from `t : ?T` and `t Int u = {}`
   16.40 +  show "t Un u : ?T"
   16.41 +  proof (induct t)
   16.42 +    case empty
   16.43 +    with `u : ?T` show "{} Un u : ?T" by simp
   16.44 +  next
   16.45 +    case (Un a t)
   16.46 +    show "(a Un t) Un u : ?T"
   16.47 +    proof -
   16.48 +      have "a Un (t Un u) : ?T"
   16.49 +	using `a : A`
   16.50 +      proof (rule tiling.Un)
   16.51 +        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
   16.52 +        then show "t Un u: ?T" by (rule Un)
   16.53 +        from `a <= - t` and `(a Un t) Int u = {}`
   16.54 +	show "a <= - (t Un u)" by blast
   16.55 +      qed
   16.56 +      also have "a Un (t Un u) = (a Un t) Un u"
   16.57 +        by (simp only: Un_assoc)
   16.58 +      finally show ?thesis .
   16.59 +    qed
   16.60 +  qed
   16.61 +qed
   16.62 +
   16.63 +
   16.64 +subsection {* Basic properties of ``below'' *}
   16.65 +
   16.66 +constdefs
   16.67 +  below :: "nat => nat set"
   16.68 +  "below n == {i. i < n}"
   16.69 +
   16.70 +lemma below_less_iff [iff]: "(i: below k) = (i < k)"
   16.71 +  by (simp add: below_def)
   16.72 +
   16.73 +lemma below_0: "below 0 = {}"
   16.74 +  by (simp add: below_def)
   16.75 +
   16.76 +lemma Sigma_Suc1:
   16.77 +    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
   16.78 +  by (simp add: below_def less_Suc_eq) blast
   16.79 +
   16.80 +lemma Sigma_Suc2:
   16.81 +    "m = n + 2 ==> A <*> below m =
   16.82 +      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
   16.83 +  by (auto simp add: below_def)
   16.84 +
   16.85 +lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
   16.86 +
   16.87 +
   16.88 +subsection {* Basic properties of ``evnodd'' *}
   16.89 +
   16.90 +constdefs
   16.91 +  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
   16.92 +  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
   16.93 +
   16.94 +lemma evnodd_iff:
   16.95 +    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
   16.96 +  by (simp add: evnodd_def)
   16.97 +
   16.98 +lemma evnodd_subset: "evnodd A b <= A"
   16.99 +  by (unfold evnodd_def, rule Int_lower1)
  16.100 +
  16.101 +lemma evnoddD: "x : evnodd A b ==> x : A"
  16.102 +  by (rule subsetD, rule evnodd_subset)
  16.103 +
  16.104 +lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
  16.105 +  by (rule finite_subset, rule evnodd_subset)
  16.106 +
  16.107 +lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
  16.108 +  by (unfold evnodd_def) blast
  16.109 +
  16.110 +lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
  16.111 +  by (unfold evnodd_def) blast
  16.112 +
  16.113 +lemma evnodd_empty: "evnodd {} b = {}"
  16.114 +  by (simp add: evnodd_def)
  16.115 +
  16.116 +lemma evnodd_insert: "evnodd (insert (i, j) C) b =
  16.117 +    (if (i + j) mod 2 = b
  16.118 +      then insert (i, j) (evnodd C b) else evnodd C b)"
  16.119 +  by (simp add: evnodd_def) blast
  16.120 +
  16.121 +
  16.122 +subsection {* Dominoes *}
  16.123 +
  16.124 +inductive_set
  16.125 +  domino :: "(nat * nat) set set"
  16.126 +  where
  16.127 +    horiz: "{(i, j), (i, j + 1)} : domino"
  16.128 +  | vertl: "{(i, j), (i + 1, j)} : domino"
  16.129 +
  16.130 +lemma dominoes_tile_row:
  16.131 +  "{i} <*> below (2 * n) : tiling domino"
  16.132 +  (is "?B n : ?T")
  16.133 +proof (induct n)
  16.134 +  case 0
  16.135 +  show ?case by (simp add: below_0 tiling.empty)
  16.136 +next
  16.137 +  case (Suc n)
  16.138 +  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
  16.139 +  have "?B (Suc n) = ?a Un ?B n"
  16.140 +    by (auto simp add: Sigma_Suc Un_assoc)
  16.141 +  moreover have "... : ?T"
  16.142 +  proof (rule tiling.Un)
  16.143 +    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
  16.144 +      by (rule domino.horiz)
  16.145 +    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
  16.146 +    finally show "... : domino" .
  16.147 +    show "?B n : ?T" by (rule Suc)
  16.148 +    show "?a <= - ?B n" by blast
  16.149 +  qed
  16.150 +  ultimately show ?case by simp
  16.151 +qed
  16.152 +
  16.153 +lemma dominoes_tile_matrix:
  16.154 +  "below m <*> below (2 * n) : tiling domino"
  16.155 +  (is "?B m : ?T")
  16.156 +proof (induct m)
  16.157 +  case 0
  16.158 +  show ?case by (simp add: below_0 tiling.empty)
  16.159 +next
  16.160 +  case (Suc m)
  16.161 +  let ?t = "{m} <*> below (2 * n)"
  16.162 +  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
  16.163 +  moreover have "... : ?T"
  16.164 +  proof (rule tiling_Un)
  16.165 +    show "?t : ?T" by (rule dominoes_tile_row)
  16.166 +    show "?B m : ?T" by (rule Suc)
  16.167 +    show "?t Int ?B m = {}" by blast
  16.168 +  qed
  16.169 +  ultimately show ?case by simp
  16.170 +qed
  16.171 +
  16.172 +lemma domino_singleton:
  16.173 +  assumes d: "d : domino" and "b < 2"
  16.174 +  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
  16.175 +  using d
  16.176 +proof induct
  16.177 +  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
  16.178 +  fix i j
  16.179 +  note [simp] = evnodd_empty evnodd_insert mod_Suc
  16.180 +  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
  16.181 +  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
  16.182 +qed
  16.183 +
  16.184 +lemma domino_finite:
  16.185 +  assumes d: "d: domino"
  16.186 +  shows "finite d"
  16.187 +  using d
  16.188 +proof induct
  16.189 +  fix i j :: nat
  16.190 +  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
  16.191 +  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
  16.192 +qed
  16.193 +
  16.194 +
  16.195 +subsection {* Tilings of dominoes *}
  16.196 +
  16.197 +lemma tiling_domino_finite:
  16.198 +  assumes t: "t : tiling domino"  (is "t : ?T")
  16.199 +  shows "finite t"  (is "?F t")
  16.200 +  using t
  16.201 +proof induct
  16.202 +  show "?F {}" by (rule finite.emptyI)
  16.203 +  fix a t assume "?F t"
  16.204 +  assume "a : domino" then have "?F a" by (rule domino_finite)
  16.205 +  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
  16.206 +qed
  16.207 +
  16.208 +lemma tiling_domino_01:
  16.209 +  assumes t: "t : tiling domino"  (is "t : ?T")
  16.210 +  shows "card (evnodd t 0) = card (evnodd t 1)"
  16.211 +  using t
  16.212 +proof induct
  16.213 +  case empty
  16.214 +  show ?case by (simp add: evnodd_def)
  16.215 +next
  16.216 +  case (Un a t)
  16.217 +  let ?e = evnodd
  16.218 +  note hyp = `card (?e t 0) = card (?e t 1)`
  16.219 +    and at = `a <= - t`
  16.220 +  have card_suc:
  16.221 +    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
  16.222 +  proof -
  16.223 +    fix b :: nat assume "b < 2"
  16.224 +    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
  16.225 +    also obtain i j where e: "?e a b = {(i, j)}"
  16.226 +    proof -
  16.227 +      from `a \<in> domino` and `b < 2`
  16.228 +      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
  16.229 +      then show ?thesis by (blast intro: that)
  16.230 +    qed
  16.231 +    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
  16.232 +    moreover have "card ... = Suc (card (?e t b))"
  16.233 +    proof (rule card_insert_disjoint)
  16.234 +      from `t \<in> tiling domino` have "finite t"
  16.235 +	by (rule tiling_domino_finite)
  16.236 +      then show "finite (?e t b)"
  16.237 +        by (rule evnodd_finite)
  16.238 +      from e have "(i, j) : ?e a b" by simp
  16.239 +      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
  16.240 +    qed
  16.241 +    ultimately show "?thesis b" by simp
  16.242 +  qed
  16.243 +  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
  16.244 +  also from hyp have "card (?e t 0) = card (?e t 1)" .
  16.245 +  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
  16.246 +    by simp
  16.247 +  finally show ?case .
  16.248 +qed
  16.249 +
  16.250 +
  16.251 +subsection {* Main theorem *}
  16.252 +
  16.253 +constdefs
  16.254 +  mutilated_board :: "nat => nat => (nat * nat) set"
  16.255 +  "mutilated_board m n ==
  16.256 +    below (2 * (m + 1)) <*> below (2 * (n + 1))
  16.257 +      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
  16.258 +
  16.259 +theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
  16.260 +proof (unfold mutilated_board_def)
  16.261 +  let ?T = "tiling domino"
  16.262 +  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
  16.263 +  let ?t' = "?t - {(0, 0)}"
  16.264 +  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
  16.265 +
  16.266 +  show "?t'' ~: ?T"
  16.267 +  proof
  16.268 +    have t: "?t : ?T" by (rule dominoes_tile_matrix)
  16.269 +    assume t'': "?t'' : ?T"
  16.270 +
  16.271 +    let ?e = evnodd
  16.272 +    have fin: "finite (?e ?t 0)"
  16.273 +      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
  16.274 +
  16.275 +    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
  16.276 +    have "card (?e ?t'' 0) < card (?e ?t' 0)"
  16.277 +    proof -
  16.278 +      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
  16.279 +        < card (?e ?t' 0)"
  16.280 +      proof (rule card_Diff1_less)
  16.281 +        from _ fin show "finite (?e ?t' 0)"
  16.282 +          by (rule finite_subset) auto
  16.283 +        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
  16.284 +      qed
  16.285 +      then show ?thesis by simp
  16.286 +    qed
  16.287 +    also have "... < card (?e ?t 0)"
  16.288 +    proof -
  16.289 +      have "(0, 0) : ?e ?t 0" by simp
  16.290 +      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
  16.291 +        by (rule card_Diff1_less)
  16.292 +      then show ?thesis by simp
  16.293 +    qed
  16.294 +    also from t have "... = card (?e ?t 1)"
  16.295 +      by (rule tiling_domino_01)
  16.296 +    also have "?e ?t 1 = ?e ?t'' 1" by simp
  16.297 +    also from t'' have "card ... = card (?e ?t'' 0)"
  16.298 +      by (rule tiling_domino_01 [symmetric])
  16.299 +    finally have "... < ..." . then show False ..
  16.300 +  qed
  16.301 +qed
  16.302 +
  16.303 +end
    17.1 --- a/src/HOL/Isar_examples/NestedDatatype.thy	Mon Jun 22 22:51:08 2009 +0200
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,87 +0,0 @@
    17.4 -
    17.5 -(* $Id$ *)
    17.6 -
    17.7 -header {* Nested datatypes *}
    17.8 -
    17.9 -theory NestedDatatype imports Main begin
   17.10 -
   17.11 -subsection {* Terms and substitution *}
   17.12 -
   17.13 -datatype ('a, 'b) "term" =
   17.14 -    Var 'a
   17.15 -  | App 'b "('a, 'b) term list"
   17.16 -
   17.17 -consts
   17.18 -  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
   17.19 -  subst_term_list ::
   17.20 -    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
   17.21 -
   17.22 -primrec (subst)
   17.23 -  "subst_term f (Var a) = f a"
   17.24 -  "subst_term f (App b ts) = App b (subst_term_list f ts)"
   17.25 -  "subst_term_list f [] = []"
   17.26 -  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   17.27 -
   17.28 -
   17.29 -text {*
   17.30 - \medskip A simple lemma about composition of substitutions.
   17.31 -*}
   17.32 -
   17.33 -lemma "subst_term (subst_term f1 o f2) t =
   17.34 -      subst_term f1 (subst_term f2 t)"
   17.35 -  and "subst_term_list (subst_term f1 o f2) ts =
   17.36 -      subst_term_list f1 (subst_term_list f2 ts)"
   17.37 -  by (induct t and ts) simp_all
   17.38 -
   17.39 -lemma "subst_term (subst_term f1 o f2) t =
   17.40 -  subst_term f1 (subst_term f2 t)"
   17.41 -proof -
   17.42 -  let "?P t" = ?thesis
   17.43 -  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   17.44 -    subst_term_list f1 (subst_term_list f2 ts)"
   17.45 -  show ?thesis
   17.46 -  proof (induct t)
   17.47 -    fix a show "?P (Var a)" by simp
   17.48 -  next
   17.49 -    fix b ts assume "?Q ts"
   17.50 -    then show "?P (App b ts)"
   17.51 -      by (simp only: subst.simps)
   17.52 -  next
   17.53 -    show "?Q []" by simp
   17.54 -  next
   17.55 -    fix t ts
   17.56 -    assume "?P t" "?Q ts" then show "?Q (t # ts)"
   17.57 -      by (simp only: subst.simps)
   17.58 -  qed
   17.59 -qed
   17.60 -
   17.61 -
   17.62 -subsection {* Alternative induction *}
   17.63 -
   17.64 -theorem term_induct' [case_names Var App]:
   17.65 -  assumes var: "!!a. P (Var a)"
   17.66 -    and app: "!!b ts. list_all P ts ==> P (App b ts)"
   17.67 -  shows "P t"
   17.68 -proof (induct t)
   17.69 -  fix a show "P (Var a)" by (rule var)
   17.70 -next
   17.71 -  fix b t ts assume "list_all P ts"
   17.72 -  then show "P (App b ts)" by (rule app)
   17.73 -next
   17.74 -  show "list_all P []" by simp
   17.75 -next
   17.76 -  fix t ts assume "P t" "list_all P ts"
   17.77 -  then show "list_all P (t # ts)" by simp
   17.78 -qed
   17.79 -
   17.80 -lemma
   17.81 -  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
   17.82 -proof (induct t rule: term_induct')
   17.83 -  case (Var a)
   17.84 -  show ?case by (simp add: o_def)
   17.85 -next
   17.86 -  case (App b ts)
   17.87 -  then show ?case by (induct ts) simp_all
   17.88 -qed
   17.89 -
   17.90 -end
    18.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.2 +++ b/src/HOL/Isar_examples/Nested_Datatype.thy	Mon Jun 22 23:48:24 2009 +0200
    18.3 @@ -0,0 +1,86 @@
    18.4 +header {* Nested datatypes *}
    18.5 +
    18.6 +theory Nested_Datatype
    18.7 +imports Main
    18.8 +begin
    18.9 +
   18.10 +subsection {* Terms and substitution *}
   18.11 +
   18.12 +datatype ('a, 'b) "term" =
   18.13 +    Var 'a
   18.14 +  | App 'b "('a, 'b) term list"
   18.15 +
   18.16 +consts
   18.17 +  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
   18.18 +  subst_term_list ::
   18.19 +    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
   18.20 +
   18.21 +primrec (subst)
   18.22 +  "subst_term f (Var a) = f a"
   18.23 +  "subst_term f (App b ts) = App b (subst_term_list f ts)"
   18.24 +  "subst_term_list f [] = []"
   18.25 +  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   18.26 +
   18.27 +
   18.28 +text {*
   18.29 + \medskip A simple lemma about composition of substitutions.
   18.30 +*}
   18.31 +
   18.32 +lemma "subst_term (subst_term f1 o f2) t =
   18.33 +      subst_term f1 (subst_term f2 t)"
   18.34 +  and "subst_term_list (subst_term f1 o f2) ts =
   18.35 +      subst_term_list f1 (subst_term_list f2 ts)"
   18.36 +  by (induct t and ts) simp_all
   18.37 +
   18.38 +lemma "subst_term (subst_term f1 o f2) t =
   18.39 +  subst_term f1 (subst_term f2 t)"
   18.40 +proof -
   18.41 +  let "?P t" = ?thesis
   18.42 +  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
   18.43 +    subst_term_list f1 (subst_term_list f2 ts)"
   18.44 +  show ?thesis
   18.45 +  proof (induct t)
   18.46 +    fix a show "?P (Var a)" by simp
   18.47 +  next
   18.48 +    fix b ts assume "?Q ts"
   18.49 +    then show "?P (App b ts)"
   18.50 +      by (simp only: subst.simps)
   18.51 +  next
   18.52 +    show "?Q []" by simp
   18.53 +  next
   18.54 +    fix t ts
   18.55 +    assume "?P t" "?Q ts" then show "?Q (t # ts)"
   18.56 +      by (simp only: subst.simps)
   18.57 +  qed
   18.58 +qed
   18.59 +
   18.60 +
   18.61 +subsection {* Alternative induction *}
   18.62 +
   18.63 +theorem term_induct' [case_names Var App]:
   18.64 +  assumes var: "!!a. P (Var a)"
   18.65 +    and app: "!!b ts. list_all P ts ==> P (App b ts)"
   18.66 +  shows "P t"
   18.67 +proof (induct t)
   18.68 +  fix a show "P (Var a)" by (rule var)
   18.69 +next
   18.70 +  fix b t ts assume "list_all P ts"
   18.71 +  then show "P (App b ts)" by (rule app)
   18.72 +next
   18.73 +  show "list_all P []" by simp
   18.74 +next
   18.75 +  fix t ts assume "P t" "list_all P ts"
   18.76 +  then show "list_all P (t # ts)" by simp
   18.77 +qed
   18.78 +
   18.79 +lemma
   18.80 +  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
   18.81 +proof (induct t rule: term_induct')
   18.82 +  case (Var a)
   18.83 +  show ?case by (simp add: o_def)
   18.84 +next
   18.85 +  case (App b ts)
   18.86 +  then show ?case by (induct ts) simp_all
   18.87 +qed
   18.88 +
   18.89 +end
    19.1 --- a/src/HOL/Isar_examples/Peirce.thy	Mon Jun 22 22:51:08 2009 +0200
    19.2 +++ b/src/HOL/Isar_examples/Peirce.thy	Mon Jun 22 23:48:24 2009 +0200
    19.3 @@ -1,11 +1,12 @@
    19.4  (*  Title:      HOL/Isar_examples/Peirce.thy
    19.5 -    ID:         $Id$
    19.6      Author:     Markus Wenzel, TU Muenchen
    19.7  *)
    19.8  
    19.9  header {* Peirce's Law *}
   19.10  
   19.11 -theory Peirce imports Main begin
   19.12 +theory Peirce
   19.13 +imports Main
   19.14 +begin
   19.15  
   19.16  text {*
   19.17   We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.  This is
    20.1 --- a/src/HOL/Isar_examples/Puzzle.thy	Mon Jun 22 22:51:08 2009 +0200
    20.2 +++ b/src/HOL/Isar_examples/Puzzle.thy	Mon Jun 22 23:48:24 2009 +0200
    20.3 @@ -1,7 +1,8 @@
    20.4 -
    20.5  header {* An old chestnut *}
    20.6  
    20.7 -theory Puzzle imports Main begin
    20.8 +theory Puzzle
    20.9 +imports Main
   20.10 +begin
   20.11  
   20.12  text_raw {*
   20.13    \footnote{A question from ``Bundeswettbewerb Mathematik''.  Original
    21.1 --- a/src/HOL/Isar_examples/ROOT.ML	Mon Jun 22 22:51:08 2009 +0200
    21.2 +++ b/src/HOL/Isar_examples/ROOT.ML	Mon Jun 22 23:48:24 2009 +0200
    21.3 @@ -1,5 +1,4 @@
    21.4  (*  Title:      HOL/Isar_examples/ROOT.ML
    21.5 -    ID:         $Id$
    21.6      Author:     Markus Wenzel, TU Muenchen
    21.7  
    21.8  Miscellaneous Isabelle/Isar examples for Higher-Order Logic.
    21.9 @@ -8,16 +7,16 @@
   21.10  no_document use_thys ["../NumberTheory/Primes", "../NumberTheory/Fibonacci"];
   21.11  
   21.12  use_thys [
   21.13 -  "BasicLogic",
   21.14 +  "Basic_Logic",
   21.15    "Cantor",
   21.16    "Peirce",
   21.17    "Drinker",
   21.18 -  "ExprCompiler",
   21.19 +  "Expr_Compiler",
   21.20    "Group",
   21.21    "Summation",
   21.22 -  "KnasterTarski",
   21.23 -  "MutilatedCheckerboard",
   21.24 +  "Knaster_Tarski",
   21.25 +  "Mutilated_Checkerboard",
   21.26    "Puzzle",
   21.27 -  "NestedDatatype",
   21.28 -  "HoareEx"
   21.29 +  "Nested_Datatype",
   21.30 +  "Hoare_Ex"
   21.31  ];
    22.1 --- a/src/HOL/Isar_examples/Summation.thy	Mon Jun 22 22:51:08 2009 +0200
    22.2 +++ b/src/HOL/Isar_examples/Summation.thy	Mon Jun 22 23:48:24 2009 +0200
    22.3 @@ -1,5 +1,4 @@
    22.4  (*  Title:      HOL/Isar_examples/Summation.thy
    22.5 -    ID:         $Id$
    22.6      Author:     Markus Wenzel
    22.7  *)
    22.8  
    23.1 --- a/src/HOL/Isar_examples/document/root.tex	Mon Jun 22 22:51:08 2009 +0200
    23.2 +++ b/src/HOL/Isar_examples/document/root.tex	Mon Jun 22 23:48:24 2009 +0200
    23.3 @@ -1,6 +1,3 @@
    23.4 -
    23.5 -% $Id$
    23.6 -
    23.7  \input{style}
    23.8  
    23.9  \hyphenation{Isabelle}