observe standard theory naming conventions;
authorwenzelm
Mon, 22 Jun 2009 23:48:24 +0200
changeset 31758 3edd5f813f01
parent 31757 c1262feb61c7
child 31760 05e3e5980677
observe standard theory naming conventions; modernized headers;
src/HOL/IsaMakefile
src/HOL/Isar_examples/BasicLogic.thy
src/HOL/Isar_examples/Basic_Logic.thy
src/HOL/Isar_examples/Cantor.thy
src/HOL/Isar_examples/Drinker.thy
src/HOL/Isar_examples/ExprCompiler.thy
src/HOL/Isar_examples/Expr_Compiler.thy
src/HOL/Isar_examples/Fibonacci.thy
src/HOL/Isar_examples/Group.thy
src/HOL/Isar_examples/Hoare.thy
src/HOL/Isar_examples/HoareEx.thy
src/HOL/Isar_examples/Hoare_Ex.thy
src/HOL/Isar_examples/KnasterTarski.thy
src/HOL/Isar_examples/Knaster_Tarski.thy
src/HOL/Isar_examples/MutilatedCheckerboard.thy
src/HOL/Isar_examples/Mutilated_Checkerboard.thy
src/HOL/Isar_examples/NestedDatatype.thy
src/HOL/Isar_examples/Nested_Datatype.thy
src/HOL/Isar_examples/Peirce.thy
src/HOL/Isar_examples/Puzzle.thy
src/HOL/Isar_examples/ROOT.ML
src/HOL/Isar_examples/Summation.thy
src/HOL/Isar_examples/document/root.tex
--- a/src/HOL/IsaMakefile	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/IsaMakefile	Mon Jun 22 23:48:24 2009 +0200
@@ -886,13 +886,13 @@
 
 HOL-Isar_examples: HOL $(LOG)/HOL-Isar_examples.gz
 
-$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/BasicLogic.thy	\
+$(LOG)/HOL-Isar_examples.gz: $(OUT)/HOL Isar_examples/Basic_Logic.thy	\
   Isar_examples/Cantor.thy Isar_examples/Drinker.thy			\
-  Isar_examples/ExprCompiler.thy Isar_examples/Fibonacci.thy		\
+  Isar_examples/Expr_Compiler.thy Isar_examples/Fibonacci.thy		\
   Isar_examples/Group.thy Isar_examples/Hoare.thy			\
-  Isar_examples/HoareEx.thy Isar_examples/KnasterTarski.thy		\
-  Isar_examples/MutilatedCheckerboard.thy				\
-  Isar_examples/NestedDatatype.thy Isar_examples/Peirce.thy		\
+  Isar_examples/Hoare_Ex.thy Isar_examples/Knaster_Tarski.thy		\
+  Isar_examples/Mutilated_Checkerboard.thy				\
+  Isar_examples/Nested_Datatype.thy Isar_examples/Peirce.thy		\
   Isar_examples/Puzzle.thy Isar_examples/Summation.thy			\
   Isar_examples/ROOT.ML Isar_examples/document/proof.sty		\
   Isar_examples/document/root.bib Isar_examples/document/root.tex	\
--- a/src/HOL/Isar_examples/BasicLogic.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,447 +0,0 @@
-(*  Title:      HOL/Isar_examples/BasicLogic.thy
-    ID:         $Id$
-    Author:     Markus Wenzel, TU Muenchen
-
-Basic propositional and quantifier reasoning.
-*)
-
-header {* Basic logical reasoning *}
-
-theory BasicLogic imports Main begin
-
-
-subsection {* Pure backward reasoning *}
-
-text {*
-  In order to get a first idea of how Isabelle/Isar proof documents
-  may look like, we consider the propositions @{text I}, @{text K},
-  and @{text S}.  The following (rather explicit) proofs should
-  require little extra explanations.
-*}
-
-lemma I: "A --> A"
-proof
-  assume A
-  show A by fact
-qed
-
-lemma K: "A --> B --> A"
-proof
-  assume A
-  show "B --> A"
-  proof
-    show A by fact
-  qed
-qed
-
-lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
-proof
-  assume "A --> B --> C"
-  show "(A --> B) --> A --> C"
-  proof
-    assume "A --> B"
-    show "A --> C"
-    proof
-      assume A
-      show C
-      proof (rule mp)
-        show "B --> C" by (rule mp) fact+
-        show B by (rule mp) fact+
-      qed
-    qed
-  qed
-qed
-
-text {*
-  Isar provides several ways to fine-tune the reasoning, avoiding
-  excessive detail.  Several abbreviated language elements are
-  available, enabling the writer to express proofs in a more concise
-  way, even without referring to any automated proof tools yet.
-
-  First of all, proof by assumption may be abbreviated as a single
-  dot.
-*}
-
-lemma "A --> A"
-proof
-  assume A
-  show A by fact+
-qed
-
-text {*
-  In fact, concluding any (sub-)proof already involves solving any
-  remaining goals by assumption\footnote{This is not a completely
-  trivial operation, as proof by assumption may involve full
-  higher-order unification.}.  Thus we may skip the rather vacuous
-  body of the above proof as well.
-*}
-
-lemma "A --> A"
-proof
-qed
-
-text {*
-  Note that the \isacommand{proof} command refers to the @{text rule}
-  method (without arguments) by default.  Thus it implicitly applies a
-  single rule, as determined from the syntactic form of the statements
-  involved.  The \isacommand{by} command abbreviates any proof with
-  empty body, so the proof may be further pruned.
-*}
-
-lemma "A --> A"
-  by rule
-
-text {*
-  Proof by a single rule may be abbreviated as double-dot.
-*}
-
-lemma "A --> A" ..
-
-text {*
-  Thus we have arrived at an adequate representation of the proof of a
-  tautology that holds by a single standard rule.\footnote{Apparently,
-  the rule here is implication introduction.}
-*}
-
-text {*
-  Let us also reconsider @{text K}.  Its statement is composed of
-  iterated connectives.  Basic decomposition is by a single rule at a
-  time, which is why our first version above was by nesting two
-  proofs.
-
-  The @{text intro} proof method repeatedly decomposes a goal's
-  conclusion.\footnote{The dual method is @{text elim}, acting on a
-  goal's premises.}
-*}
-
-lemma "A --> B --> A"
-proof (intro impI)
-  assume A
-  show A by fact
-qed
-
-text {*
-  Again, the body may be collapsed.
-*}
-
-lemma "A --> B --> A"
-  by (intro impI)
-
-text {*
-  Just like @{text rule}, the @{text intro} and @{text elim} proof
-  methods pick standard structural rules, in case no explicit
-  arguments are given.  While implicit rules are usually just fine for
-  single rule application, this may go too far with iteration.  Thus
-  in practice, @{text intro} and @{text elim} would be typically
-  restricted to certain structures by giving a few rules only, e.g.\
-  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
-  and universal quantifiers.
-
-  Such well-tuned iterated decomposition of certain structures is the
-  prime application of @{text intro} and @{text elim}.  In contrast,
-  terminal steps that solve a goal completely are usually performed by
-  actual automated proof methods (such as \isacommand{by}~@{text
-  blast}.
-*}
-
-
-subsection {* Variations of backward vs.\ forward reasoning *}
-
-text {*
-  Certainly, any proof may be performed in backward-style only.  On
-  the other hand, small steps of reasoning are often more naturally
-  expressed in forward-style.  Isar supports both backward and forward
-  reasoning as a first-class concept.  In order to demonstrate the
-  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
-
-  The first version is purely backward.
-*}
-
-lemma "A & B --> B & A"
-proof
-  assume "A & B"
-  show "B & A"
-  proof
-    show B by (rule conjunct2) fact
-    show A by (rule conjunct1) fact
-  qed
-qed
-
-text {*
-  Above, the @{text "conjunct_1/2"} projection rules had to be named
-  explicitly, since the goals @{text B} and @{text A} did not provide
-  any structural clue.  This may be avoided using \isacommand{from} to
-  focus on the @{text "A \<and> B"} assumption as the current facts,
-  enabling the use of double-dot proofs.  Note that \isacommand{from}
-  already does forward-chaining, involving the \name{conjE} rule here.
-*}
-
-lemma "A & B --> B & A"
-proof
-  assume "A & B"
-  show "B & A"
-  proof
-    from `A & B` show B ..
-    from `A & B` show A ..
-  qed
-qed
-
-text {*
-  In the next version, we move the forward step one level upwards.
-  Forward-chaining from the most recent facts is indicated by the
-  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
-  @{text "A \<and> B"} actually becomes an elimination, rather than an
-  introduction.  The resulting proof structure directly corresponds to
-  that of the @{text conjE} rule, including the repeated goal
-  proposition that is abbreviated as @{text ?thesis} below.
-*}
-
-lemma "A & B --> B & A"
-proof
-  assume "A & B"
-  then show "B & A"
-  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
-    assume B A
-    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
-  qed
-qed
-
-text {*
-  In the subsequent version we flatten the structure of the main body
-  by doing forward reasoning all the time.  Only the outermost
-  decomposition step is left as backward.
-*}
-
-lemma "A & B --> B & A"
-proof
-  assume "A & B"
-  from `A & B` have A ..
-  from `A & B` have B ..
-  from `B` `A` show "B & A" ..
-qed
-
-text {*
-  We can still push forward-reasoning a bit further, even at the risk
-  of getting ridiculous.  Note that we force the initial proof step to
-  do nothing here, by referring to the ``-'' proof method.
-*}
-
-lemma "A & B --> B & A"
-proof -
-  {
-    assume "A & B"
-    from `A & B` have A ..
-    from `A & B` have B ..
-    from `B` `A` have "B & A" ..
-  }
-  then show ?thesis ..         -- {* rule \name{impI} *}
-qed
-
-text {*
-  \medskip With these examples we have shifted through a whole range
-  from purely backward to purely forward reasoning.  Apparently, in
-  the extreme ends we get slightly ill-structured proofs, which also
-  require much explicit naming of either rules (backward) or local
-  facts (forward).
-
-  The general lesson learned here is that good proof style would
-  achieve just the \emph{right} balance of top-down backward
-  decomposition, and bottom-up forward composition.  In general, there
-  is no single best way to arrange some pieces of formal reasoning, of
-  course.  Depending on the actual applications, the intended audience
-  etc., rules (and methods) on the one hand vs.\ facts on the other
-  hand have to be emphasized in an appropriate way.  This requires the
-  proof writer to develop good taste, and some practice, of course.
-*}
-
-text {*
-  For our example the most appropriate way of reasoning is probably
-  the middle one, with conjunction introduction done after
-  elimination.
-*}
-
-lemma "A & B --> B & A"
-proof
-  assume "A & B"
-  then show "B & A"
-  proof
-    assume B A
-    then show ?thesis ..
-  qed
-qed
-
-
-
-subsection {* A few examples from ``Introduction to Isabelle'' *}
-
-text {*
-  We rephrase some of the basic reasoning examples of
-  \cite{isabelle-intro}, using HOL rather than FOL.
-*}
-
-subsubsection {* A propositional proof *}
-
-text {*
-  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
-  involves forward-chaining from @{text "P \<or> P"}, followed by an
-  explicit case-analysis on the two \emph{identical} cases.
-*}
-
-lemma "P | P --> P"
-proof
-  assume "P | P"
-  then show P
-  proof                    -- {*
-    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
-  *}
-    assume P show P by fact
-  next
-    assume P show P by fact
-  qed
-qed
-
-text {*
-  Case splits are \emph{not} hardwired into the Isar language as a
-  special feature.  The \isacommand{next} command used to separate the
-  cases above is just a short form of managing block structure.
-
-  \medskip In general, applying proof methods may split up a goal into
-  separate ``cases'', i.e.\ new subgoals with individual local
-  assumptions.  The corresponding proof text typically mimics this by
-  establishing results in appropriate contexts, separated by blocks.
-
-  In order to avoid too much explicit parentheses, the Isar system
-  implicitly opens an additional block for any new goal, the
-  \isacommand{next} statement then closes one block level, opening a
-  new one.  The resulting behavior is what one would expect from
-  separating cases, only that it is more flexible.  E.g.\ an induction
-  base case (which does not introduce local assumptions) would
-  \emph{not} require \isacommand{next} to separate the subsequent step
-  case.
-
-  \medskip In our example the situation is even simpler, since the two
-  cases actually coincide.  Consequently the proof may be rephrased as
-  follows.
-*}
-
-lemma "P | P --> P"
-proof
-  assume "P | P"
-  then show P
-  proof
-    assume P
-    show P by fact
-    show P by fact
-  qed
-qed
-
-text {*
-  Again, the rather vacuous body of the proof may be collapsed.  Thus
-  the case analysis degenerates into two assumption steps, which are
-  implicitly performed when concluding the single rule step of the
-  double-dot proof as follows.
-*}
-
-lemma "P | P --> P"
-proof
-  assume "P | P"
-  then show P ..
-qed
-
-
-subsubsection {* A quantifier proof *}
-
-text {*
-  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
-  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
-  with @{text "P (f a)"} may be taken as a witness for the second
-  existential statement.
-
-  The first proof is rather verbose, exhibiting quite a lot of
-  (redundant) detail.  It gives explicit rules, even with some
-  instantiation.  Furthermore, we encounter two new language elements:
-  the \isacommand{fix} command augments the context by some new
-  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
-  binds term abbreviations by higher-order pattern matching.
-*}
-
-lemma "(EX x. P (f x)) --> (EX y. P y)"
-proof
-  assume "EX x. P (f x)"
-  then show "EX y. P y"
-  proof (rule exE)             -- {*
-    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
-  *}
-    fix a
-    assume "P (f a)" (is "P ?witness")
-    then show ?thesis by (rule exI [of P ?witness])
-  qed
-qed
-
-text {*
-  While explicit rule instantiation may occasionally improve
-  readability of certain aspects of reasoning, it is usually quite
-  redundant.  Above, the basic proof outline gives already enough
-  structural clues for the system to infer both the rules and their
-  instances (by higher-order unification).  Thus we may as well prune
-  the text as follows.
-*}
-
-lemma "(EX x. P (f x)) --> (EX y. P y)"
-proof
-  assume "EX x. P (f x)"
-  then show "EX y. P y"
-  proof
-    fix a
-    assume "P (f a)"
-    then show ?thesis ..
-  qed
-qed
-
-text {*
-  Explicit @{text \<exists>}-elimination as seen above can become quite
-  cumbersome in practice.  The derived Isar language element
-  ``\isakeyword{obtain}'' provides a more handsome way to do
-  generalized existence reasoning.
-*}
-
-lemma "(EX x. P (f x)) --> (EX y. P y)"
-proof
-  assume "EX x. P (f x)"
-  then obtain a where "P (f a)" ..
-  then show "EX y. P y" ..
-qed
-
-text {*
-  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
-  \isakeyword{assume} together with a soundness proof of the
-  elimination involved.  Thus it behaves similar to any other forward
-  proof element.  Also note that due to the nature of general
-  existence reasoning involved here, any result exported from the
-  context of an \isakeyword{obtain} statement may \emph{not} refer to
-  the parameters introduced there.
-*}
-
-
-
-subsubsection {* Deriving rules in Isabelle *}
-
-text {*
-  We derive the conjunction elimination rule from the corresponding
-  projections.  The proof is quite straight-forward, since
-  Isabelle/Isar supports non-atomic goals and assumptions fully
-  transparently.
-*}
-
-theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
-proof -
-  assume "A & B"
-  assume r: "A ==> B ==> C"
-  show C
-  proof (rule r)
-    show A by (rule conjunct1) fact
-    show B by (rule conjunct2) fact
-  qed
-qed
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Basic_Logic.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,448 @@
+(*  Title:      HOL/Isar_examples/Basic_Logic.thy
+    Author:     Markus Wenzel, TU Muenchen
+
+Basic propositional and quantifier reasoning.
+*)
+
+header {* Basic logical reasoning *}
+
+theory Basic_Logic
+imports Main
+begin
+
+
+subsection {* Pure backward reasoning *}
+
+text {*
+  In order to get a first idea of how Isabelle/Isar proof documents
+  may look like, we consider the propositions @{text I}, @{text K},
+  and @{text S}.  The following (rather explicit) proofs should
+  require little extra explanations.
+*}
+
+lemma I: "A --> A"
+proof
+  assume A
+  show A by fact
+qed
+
+lemma K: "A --> B --> A"
+proof
+  assume A
+  show "B --> A"
+  proof
+    show A by fact
+  qed
+qed
+
+lemma S: "(A --> B --> C) --> (A --> B) --> A --> C"
+proof
+  assume "A --> B --> C"
+  show "(A --> B) --> A --> C"
+  proof
+    assume "A --> B"
+    show "A --> C"
+    proof
+      assume A
+      show C
+      proof (rule mp)
+        show "B --> C" by (rule mp) fact+
+        show B by (rule mp) fact+
+      qed
+    qed
+  qed
+qed
+
+text {*
+  Isar provides several ways to fine-tune the reasoning, avoiding
+  excessive detail.  Several abbreviated language elements are
+  available, enabling the writer to express proofs in a more concise
+  way, even without referring to any automated proof tools yet.
+
+  First of all, proof by assumption may be abbreviated as a single
+  dot.
+*}
+
+lemma "A --> A"
+proof
+  assume A
+  show A by fact+
+qed
+
+text {*
+  In fact, concluding any (sub-)proof already involves solving any
+  remaining goals by assumption\footnote{This is not a completely
+  trivial operation, as proof by assumption may involve full
+  higher-order unification.}.  Thus we may skip the rather vacuous
+  body of the above proof as well.
+*}
+
+lemma "A --> A"
+proof
+qed
+
+text {*
+  Note that the \isacommand{proof} command refers to the @{text rule}
+  method (without arguments) by default.  Thus it implicitly applies a
+  single rule, as determined from the syntactic form of the statements
+  involved.  The \isacommand{by} command abbreviates any proof with
+  empty body, so the proof may be further pruned.
+*}
+
+lemma "A --> A"
+  by rule
+
+text {*
+  Proof by a single rule may be abbreviated as double-dot.
+*}
+
+lemma "A --> A" ..
+
+text {*
+  Thus we have arrived at an adequate representation of the proof of a
+  tautology that holds by a single standard rule.\footnote{Apparently,
+  the rule here is implication introduction.}
+*}
+
+text {*
+  Let us also reconsider @{text K}.  Its statement is composed of
+  iterated connectives.  Basic decomposition is by a single rule at a
+  time, which is why our first version above was by nesting two
+  proofs.
+
+  The @{text intro} proof method repeatedly decomposes a goal's
+  conclusion.\footnote{The dual method is @{text elim}, acting on a
+  goal's premises.}
+*}
+
+lemma "A --> B --> A"
+proof (intro impI)
+  assume A
+  show A by fact
+qed
+
+text {*
+  Again, the body may be collapsed.
+*}
+
+lemma "A --> B --> A"
+  by (intro impI)
+
+text {*
+  Just like @{text rule}, the @{text intro} and @{text elim} proof
+  methods pick standard structural rules, in case no explicit
+  arguments are given.  While implicit rules are usually just fine for
+  single rule application, this may go too far with iteration.  Thus
+  in practice, @{text intro} and @{text elim} would be typically
+  restricted to certain structures by giving a few rules only, e.g.\
+  \isacommand{proof}~@{text "(intro impI allI)"} to strip implications
+  and universal quantifiers.
+
+  Such well-tuned iterated decomposition of certain structures is the
+  prime application of @{text intro} and @{text elim}.  In contrast,
+  terminal steps that solve a goal completely are usually performed by
+  actual automated proof methods (such as \isacommand{by}~@{text
+  blast}.
+*}
+
+
+subsection {* Variations of backward vs.\ forward reasoning *}
+
+text {*
+  Certainly, any proof may be performed in backward-style only.  On
+  the other hand, small steps of reasoning are often more naturally
+  expressed in forward-style.  Isar supports both backward and forward
+  reasoning as a first-class concept.  In order to demonstrate the
+  difference, we consider several proofs of @{text "A \<and> B \<longrightarrow> B \<and> A"}.
+
+  The first version is purely backward.
+*}
+
+lemma "A & B --> B & A"
+proof
+  assume "A & B"
+  show "B & A"
+  proof
+    show B by (rule conjunct2) fact
+    show A by (rule conjunct1) fact
+  qed
+qed
+
+text {*
+  Above, the @{text "conjunct_1/2"} projection rules had to be named
+  explicitly, since the goals @{text B} and @{text A} did not provide
+  any structural clue.  This may be avoided using \isacommand{from} to
+  focus on the @{text "A \<and> B"} assumption as the current facts,
+  enabling the use of double-dot proofs.  Note that \isacommand{from}
+  already does forward-chaining, involving the \name{conjE} rule here.
+*}
+
+lemma "A & B --> B & A"
+proof
+  assume "A & B"
+  show "B & A"
+  proof
+    from `A & B` show B ..
+    from `A & B` show A ..
+  qed
+qed
+
+text {*
+  In the next version, we move the forward step one level upwards.
+  Forward-chaining from the most recent facts is indicated by the
+  \isacommand{then} command.  Thus the proof of @{text "B \<and> A"} from
+  @{text "A \<and> B"} actually becomes an elimination, rather than an
+  introduction.  The resulting proof structure directly corresponds to
+  that of the @{text conjE} rule, including the repeated goal
+  proposition that is abbreviated as @{text ?thesis} below.
+*}
+
+lemma "A & B --> B & A"
+proof
+  assume "A & B"
+  then show "B & A"
+  proof                    -- {* rule @{text conjE} of @{text "A \<and> B"} *}
+    assume B A
+    then show ?thesis ..   -- {* rule @{text conjI} of @{text "B \<and> A"} *}
+  qed
+qed
+
+text {*
+  In the subsequent version we flatten the structure of the main body
+  by doing forward reasoning all the time.  Only the outermost
+  decomposition step is left as backward.
+*}
+
+lemma "A & B --> B & A"
+proof
+  assume "A & B"
+  from `A & B` have A ..
+  from `A & B` have B ..
+  from `B` `A` show "B & A" ..
+qed
+
+text {*
+  We can still push forward-reasoning a bit further, even at the risk
+  of getting ridiculous.  Note that we force the initial proof step to
+  do nothing here, by referring to the ``-'' proof method.
+*}
+
+lemma "A & B --> B & A"
+proof -
+  {
+    assume "A & B"
+    from `A & B` have A ..
+    from `A & B` have B ..
+    from `B` `A` have "B & A" ..
+  }
+  then show ?thesis ..         -- {* rule \name{impI} *}
+qed
+
+text {*
+  \medskip With these examples we have shifted through a whole range
+  from purely backward to purely forward reasoning.  Apparently, in
+  the extreme ends we get slightly ill-structured proofs, which also
+  require much explicit naming of either rules (backward) or local
+  facts (forward).
+
+  The general lesson learned here is that good proof style would
+  achieve just the \emph{right} balance of top-down backward
+  decomposition, and bottom-up forward composition.  In general, there
+  is no single best way to arrange some pieces of formal reasoning, of
+  course.  Depending on the actual applications, the intended audience
+  etc., rules (and methods) on the one hand vs.\ facts on the other
+  hand have to be emphasized in an appropriate way.  This requires the
+  proof writer to develop good taste, and some practice, of course.
+*}
+
+text {*
+  For our example the most appropriate way of reasoning is probably
+  the middle one, with conjunction introduction done after
+  elimination.
+*}
+
+lemma "A & B --> B & A"
+proof
+  assume "A & B"
+  then show "B & A"
+  proof
+    assume B A
+    then show ?thesis ..
+  qed
+qed
+
+
+
+subsection {* A few examples from ``Introduction to Isabelle'' *}
+
+text {*
+  We rephrase some of the basic reasoning examples of
+  \cite{isabelle-intro}, using HOL rather than FOL.
+*}
+
+subsubsection {* A propositional proof *}
+
+text {*
+  We consider the proposition @{text "P \<or> P \<longrightarrow> P"}.  The proof below
+  involves forward-chaining from @{text "P \<or> P"}, followed by an
+  explicit case-analysis on the two \emph{identical} cases.
+*}
+
+lemma "P | P --> P"
+proof
+  assume "P | P"
+  then show P
+  proof                    -- {*
+    rule @{text disjE}: \smash{$\infer{C}{A \disj B & \infer*{C}{[A]} & \infer*{C}{[B]}}$}
+  *}
+    assume P show P by fact
+  next
+    assume P show P by fact
+  qed
+qed
+
+text {*
+  Case splits are \emph{not} hardwired into the Isar language as a
+  special feature.  The \isacommand{next} command used to separate the
+  cases above is just a short form of managing block structure.
+
+  \medskip In general, applying proof methods may split up a goal into
+  separate ``cases'', i.e.\ new subgoals with individual local
+  assumptions.  The corresponding proof text typically mimics this by
+  establishing results in appropriate contexts, separated by blocks.
+
+  In order to avoid too much explicit parentheses, the Isar system
+  implicitly opens an additional block for any new goal, the
+  \isacommand{next} statement then closes one block level, opening a
+  new one.  The resulting behavior is what one would expect from
+  separating cases, only that it is more flexible.  E.g.\ an induction
+  base case (which does not introduce local assumptions) would
+  \emph{not} require \isacommand{next} to separate the subsequent step
+  case.
+
+  \medskip In our example the situation is even simpler, since the two
+  cases actually coincide.  Consequently the proof may be rephrased as
+  follows.
+*}
+
+lemma "P | P --> P"
+proof
+  assume "P | P"
+  then show P
+  proof
+    assume P
+    show P by fact
+    show P by fact
+  qed
+qed
+
+text {*
+  Again, the rather vacuous body of the proof may be collapsed.  Thus
+  the case analysis degenerates into two assumption steps, which are
+  implicitly performed when concluding the single rule step of the
+  double-dot proof as follows.
+*}
+
+lemma "P | P --> P"
+proof
+  assume "P | P"
+  then show P ..
+qed
+
+
+subsubsection {* A quantifier proof *}
+
+text {*
+  To illustrate quantifier reasoning, let us prove @{text "(\<exists>x. P (f
+  x)) \<longrightarrow> (\<exists>y. P y)"}.  Informally, this holds because any @{text a}
+  with @{text "P (f a)"} may be taken as a witness for the second
+  existential statement.
+
+  The first proof is rather verbose, exhibiting quite a lot of
+  (redundant) detail.  It gives explicit rules, even with some
+  instantiation.  Furthermore, we encounter two new language elements:
+  the \isacommand{fix} command augments the context by some new
+  ``arbitrary, but fixed'' element; the \isacommand{is} annotation
+  binds term abbreviations by higher-order pattern matching.
+*}
+
+lemma "(EX x. P (f x)) --> (EX y. P y)"
+proof
+  assume "EX x. P (f x)"
+  then show "EX y. P y"
+  proof (rule exE)             -- {*
+    rule \name{exE}: \smash{$\infer{B}{\ex x A(x) & \infer*{B}{[A(x)]_x}}$}
+  *}
+    fix a
+    assume "P (f a)" (is "P ?witness")
+    then show ?thesis by (rule exI [of P ?witness])
+  qed
+qed
+
+text {*
+  While explicit rule instantiation may occasionally improve
+  readability of certain aspects of reasoning, it is usually quite
+  redundant.  Above, the basic proof outline gives already enough
+  structural clues for the system to infer both the rules and their
+  instances (by higher-order unification).  Thus we may as well prune
+  the text as follows.
+*}
+
+lemma "(EX x. P (f x)) --> (EX y. P y)"
+proof
+  assume "EX x. P (f x)"
+  then show "EX y. P y"
+  proof
+    fix a
+    assume "P (f a)"
+    then show ?thesis ..
+  qed
+qed
+
+text {*
+  Explicit @{text \<exists>}-elimination as seen above can become quite
+  cumbersome in practice.  The derived Isar language element
+  ``\isakeyword{obtain}'' provides a more handsome way to do
+  generalized existence reasoning.
+*}
+
+lemma "(EX x. P (f x)) --> (EX y. P y)"
+proof
+  assume "EX x. P (f x)"
+  then obtain a where "P (f a)" ..
+  then show "EX y. P y" ..
+qed
+
+text {*
+  Technically, \isakeyword{obtain} is similar to \isakeyword{fix} and
+  \isakeyword{assume} together with a soundness proof of the
+  elimination involved.  Thus it behaves similar to any other forward
+  proof element.  Also note that due to the nature of general
+  existence reasoning involved here, any result exported from the
+  context of an \isakeyword{obtain} statement may \emph{not} refer to
+  the parameters introduced there.
+*}
+
+
+
+subsubsection {* Deriving rules in Isabelle *}
+
+text {*
+  We derive the conjunction elimination rule from the corresponding
+  projections.  The proof is quite straight-forward, since
+  Isabelle/Isar supports non-atomic goals and assumptions fully
+  transparently.
+*}
+
+theorem conjE: "A & B ==> (A ==> B ==> C) ==> C"
+proof -
+  assume "A & B"
+  assume r: "A ==> B ==> C"
+  show C
+  proof (rule r)
+    show A by (rule conjunct1) fact
+    show B by (rule conjunct2) fact
+  qed
+qed
+
+end
--- a/src/HOL/Isar_examples/Cantor.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Cantor.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,11 +1,12 @@
 (*  Title:      HOL/Isar_examples/Cantor.thy
-    ID:         $Id$
     Author:     Markus Wenzel, TU Muenchen
 *)
 
 header {* Cantor's Theorem *}
 
-theory Cantor imports Main begin
+theory Cantor
+imports Main
+begin
 
 text_raw {*
   \footnote{This is an Isar version of the final example of the
--- a/src/HOL/Isar_examples/Drinker.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Drinker.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,11 +1,12 @@
 (*  Title:      HOL/Isar_examples/Drinker.thy
-    ID:         $Id$
     Author:     Makarius
 *)
 
 header {* The Drinker's Principle *}
 
-theory Drinker imports Main begin
+theory Drinker
+imports Main
+begin
 
 text {*
   Here is another example of classical reasoning: the Drinker's
--- a/src/HOL/Isar_examples/ExprCompiler.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,230 +0,0 @@
-(*  Title:      HOL/Isar_examples/ExprCompiler.thy
-    ID:         $Id$
-    Author:     Markus Wenzel, TU Muenchen
-
-Correctness of a simple expression/stack-machine compiler.
-*)
-
-header {* Correctness of a simple expression compiler *}
-
-theory ExprCompiler imports Main begin
-
-text {*
- This is a (rather trivial) example of program verification.  We model
- a compiler for translating expressions to stack machine instructions,
- and prove its correctness wrt.\ some evaluation semantics.
-*}
-
-
-subsection {* Binary operations *}
-
-text {*
- Binary operations are just functions over some type of values.  This
- is both for abstract syntax and semantics, i.e.\ we use a ``shallow
- embedding'' here.
-*}
-
-types
-  'val binop = "'val => 'val => 'val"
-
-
-subsection {* Expressions *}
-
-text {*
- The language of expressions is defined as an inductive type,
- consisting of variables, constants, and binary operations on
- expressions.
-*}
-
-datatype ('adr, 'val) expr =
-  Variable 'adr |
-  Constant 'val |
-  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
-
-text {*
- Evaluation (wrt.\ some environment of variable assignments) is
- defined by primitive recursion over the structure of expressions.
-*}
-
-consts
-  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
-
-primrec
-  "eval (Variable x) env = env x"
-  "eval (Constant c) env = c"
-  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
-
-
-subsection {* Machine *}
-
-text {*
- Next we model a simple stack machine, with three instructions.
-*}
-
-datatype ('adr, 'val) instr =
-  Const 'val |
-  Load 'adr |
-  Apply "'val binop"
-
-text {*
- Execution of a list of stack machine instructions is easily defined
- as follows.
-*}
-
-consts
-  exec :: "(('adr, 'val) instr) list
-    => 'val list => ('adr => 'val) => 'val list"
-
-primrec
-  "exec [] stack env = stack"
-  "exec (instr # instrs) stack env =
-    (case instr of
-      Const c => exec instrs (c # stack) env
-    | Load x => exec instrs (env x # stack) env
-    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
-                   # (tl (tl stack))) env)"
-
-constdefs
-  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
-  "execute instrs env == hd (exec instrs [] env)"
-
-
-subsection {* Compiler *}
-
-text {*
- We are ready to define the compilation function of expressions to
- lists of stack machine instructions.
-*}
-
-consts
-  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
-
-primrec
-  "compile (Variable x) = [Load x]"
-  "compile (Constant c) = [Const c]"
-  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
-
-
-text {*
- The main result of this development is the correctness theorem for
- $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
- list append.
-*}
-
-lemma exec_append:
-  "exec (xs @ ys) stack env =
-    exec ys (exec xs stack env) env"
-proof (induct xs arbitrary: stack)
-  case Nil
-  show ?case by simp
-next
-  case (Cons x xs)
-  show ?case
-  proof (induct x)
-    case Const
-    from Cons show ?case by simp
-  next
-    case Load
-    from Cons show ?case by simp
-  next
-    case Apply
-    from Cons show ?case by simp
-  qed
-qed
-
-theorem correctness: "execute (compile e) env = eval e env"
-proof -
-  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
-  proof (induct e)
-    case Variable show ?case by simp
-  next
-    case Constant show ?case by simp
-  next
-    case Binop then show ?case by (simp add: exec_append)
-  qed
-  then show ?thesis by (simp add: execute_def)
-qed
-
-
-text {*
- \bigskip In the proofs above, the \name{simp} method does quite a lot
- of work behind the scenes (mostly ``functional program execution'').
- Subsequently, the same reasoning is elaborated in detail --- at most
- one recursive function definition is used at a time.  Thus we get a
- better idea of what is actually going on.
-*}
-
-lemma exec_append':
-  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
-proof (induct xs arbitrary: stack)
-  case (Nil s)
-  have "exec ([] @ ys) s env = exec ys s env" by simp
-  also have "... = exec ys (exec [] s env) env" by simp
-  finally show ?case .
-next
-  case (Cons x xs s)
-  show ?case
-  proof (induct x)
-    case (Const val)
-    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
-      by simp
-    also have "... = exec (xs @ ys) (val # s) env" by simp
-    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
-    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
-    finally show ?case .
-  next
-    case (Load adr)
-    from Cons show ?case by simp -- {* same as above *}
-  next
-    case (Apply fn)
-    have "exec ((Apply fn # xs) @ ys) s env =
-        exec (Apply fn # xs @ ys) s env" by simp
-    also have "... =
-        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
-    also from Cons have "... =
-        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
-    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
-    finally show ?case .
-  qed
-qed
-
-theorem correctness': "execute (compile e) env = eval e env"
-proof -
-  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
-  proof (induct e)
-    case (Variable adr s)
-    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
-      by simp
-    also have "... = env adr # s" by simp
-    also have "env adr = eval (Variable adr) env" by simp
-    finally show ?case .
-  next
-    case (Constant val s)
-    show ?case by simp -- {* same as above *}
-  next
-    case (Binop fn e1 e2 s)
-    have "exec (compile (Binop fn e1 e2)) s env =
-        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
-    also have "... = exec [Apply fn]
-        (exec (compile e1) (exec (compile e2) s env) env) env"
-      by (simp only: exec_append)
-    also have "exec (compile e2) s env = eval e2 env # s" by fact
-    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
-    also have "exec [Apply fn] ... env =
-        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
-    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
-    also have "fn (eval e1 env) (eval e2 env) =
-        eval (Binop fn e1 e2) env"
-      by simp
-    finally show ?case .
-  qed
-
-  have "execute (compile e) env = hd (exec (compile e) [] env)"
-    by (simp add: execute_def)
-  also from exec_compile
-    have "exec (compile e) [] env = [eval e env]" .
-  also have "hd ... = eval e env" by simp
-  finally show ?thesis .
-qed
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Expr_Compiler.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,231 @@
+(*  Title:      HOL/Isar_examples/Expr_Compiler.thy
+    Author:     Markus Wenzel, TU Muenchen
+
+Correctness of a simple expression/stack-machine compiler.
+*)
+
+header {* Correctness of a simple expression compiler *}
+
+theory Expr_Compiler
+imports Main
+begin
+
+text {*
+ This is a (rather trivial) example of program verification.  We model
+ a compiler for translating expressions to stack machine instructions,
+ and prove its correctness wrt.\ some evaluation semantics.
+*}
+
+
+subsection {* Binary operations *}
+
+text {*
+ Binary operations are just functions over some type of values.  This
+ is both for abstract syntax and semantics, i.e.\ we use a ``shallow
+ embedding'' here.
+*}
+
+types
+  'val binop = "'val => 'val => 'val"
+
+
+subsection {* Expressions *}
+
+text {*
+ The language of expressions is defined as an inductive type,
+ consisting of variables, constants, and binary operations on
+ expressions.
+*}
+
+datatype ('adr, 'val) expr =
+  Variable 'adr |
+  Constant 'val |
+  Binop "'val binop" "('adr, 'val) expr" "('adr, 'val) expr"
+
+text {*
+ Evaluation (wrt.\ some environment of variable assignments) is
+ defined by primitive recursion over the structure of expressions.
+*}
+
+consts
+  eval :: "('adr, 'val) expr => ('adr => 'val) => 'val"
+
+primrec
+  "eval (Variable x) env = env x"
+  "eval (Constant c) env = c"
+  "eval (Binop f e1 e2) env = f (eval e1 env) (eval e2 env)"
+
+
+subsection {* Machine *}
+
+text {*
+ Next we model a simple stack machine, with three instructions.
+*}
+
+datatype ('adr, 'val) instr =
+  Const 'val |
+  Load 'adr |
+  Apply "'val binop"
+
+text {*
+ Execution of a list of stack machine instructions is easily defined
+ as follows.
+*}
+
+consts
+  exec :: "(('adr, 'val) instr) list
+    => 'val list => ('adr => 'val) => 'val list"
+
+primrec
+  "exec [] stack env = stack"
+  "exec (instr # instrs) stack env =
+    (case instr of
+      Const c => exec instrs (c # stack) env
+    | Load x => exec instrs (env x # stack) env
+    | Apply f => exec instrs (f (hd stack) (hd (tl stack))
+                   # (tl (tl stack))) env)"
+
+constdefs
+  execute :: "(('adr, 'val) instr) list => ('adr => 'val) => 'val"
+  "execute instrs env == hd (exec instrs [] env)"
+
+
+subsection {* Compiler *}
+
+text {*
+ We are ready to define the compilation function of expressions to
+ lists of stack machine instructions.
+*}
+
+consts
+  compile :: "('adr, 'val) expr => (('adr, 'val) instr) list"
+
+primrec
+  "compile (Variable x) = [Load x]"
+  "compile (Constant c) = [Const c]"
+  "compile (Binop f e1 e2) = compile e2 @ compile e1 @ [Apply f]"
+
+
+text {*
+ The main result of this development is the correctness theorem for
+ $\idt{compile}$.  We first establish a lemma about $\idt{exec}$ and
+ list append.
+*}
+
+lemma exec_append:
+  "exec (xs @ ys) stack env =
+    exec ys (exec xs stack env) env"
+proof (induct xs arbitrary: stack)
+  case Nil
+  show ?case by simp
+next
+  case (Cons x xs)
+  show ?case
+  proof (induct x)
+    case Const
+    from Cons show ?case by simp
+  next
+    case Load
+    from Cons show ?case by simp
+  next
+    case Apply
+    from Cons show ?case by simp
+  qed
+qed
+
+theorem correctness: "execute (compile e) env = eval e env"
+proof -
+  have "\<And>stack. exec (compile e) stack env = eval e env # stack"
+  proof (induct e)
+    case Variable show ?case by simp
+  next
+    case Constant show ?case by simp
+  next
+    case Binop then show ?case by (simp add: exec_append)
+  qed
+  then show ?thesis by (simp add: execute_def)
+qed
+
+
+text {*
+ \bigskip In the proofs above, the \name{simp} method does quite a lot
+ of work behind the scenes (mostly ``functional program execution'').
+ Subsequently, the same reasoning is elaborated in detail --- at most
+ one recursive function definition is used at a time.  Thus we get a
+ better idea of what is actually going on.
+*}
+
+lemma exec_append':
+  "exec (xs @ ys) stack env = exec ys (exec xs stack env) env"
+proof (induct xs arbitrary: stack)
+  case (Nil s)
+  have "exec ([] @ ys) s env = exec ys s env" by simp
+  also have "... = exec ys (exec [] s env) env" by simp
+  finally show ?case .
+next
+  case (Cons x xs s)
+  show ?case
+  proof (induct x)
+    case (Const val)
+    have "exec ((Const val # xs) @ ys) s env = exec (Const val # xs @ ys) s env"
+      by simp
+    also have "... = exec (xs @ ys) (val # s) env" by simp
+    also from Cons have "... = exec ys (exec xs (val # s) env) env" .
+    also have "... = exec ys (exec (Const val # xs) s env) env" by simp
+    finally show ?case .
+  next
+    case (Load adr)
+    from Cons show ?case by simp -- {* same as above *}
+  next
+    case (Apply fn)
+    have "exec ((Apply fn # xs) @ ys) s env =
+        exec (Apply fn # xs @ ys) s env" by simp
+    also have "... =
+        exec (xs @ ys) (fn (hd s) (hd (tl s)) # (tl (tl s))) env" by simp
+    also from Cons have "... =
+        exec ys (exec xs (fn (hd s) (hd (tl s)) # tl (tl s)) env) env" .
+    also have "... = exec ys (exec (Apply fn # xs) s env) env" by simp
+    finally show ?case .
+  qed
+qed
+
+theorem correctness': "execute (compile e) env = eval e env"
+proof -
+  have exec_compile: "\<And>stack. exec (compile e) stack env = eval e env # stack"
+  proof (induct e)
+    case (Variable adr s)
+    have "exec (compile (Variable adr)) s env = exec [Load adr] s env"
+      by simp
+    also have "... = env adr # s" by simp
+    also have "env adr = eval (Variable adr) env" by simp
+    finally show ?case .
+  next
+    case (Constant val s)
+    show ?case by simp -- {* same as above *}
+  next
+    case (Binop fn e1 e2 s)
+    have "exec (compile (Binop fn e1 e2)) s env =
+        exec (compile e2 @ compile e1 @ [Apply fn]) s env" by simp
+    also have "... = exec [Apply fn]
+        (exec (compile e1) (exec (compile e2) s env) env) env"
+      by (simp only: exec_append)
+    also have "exec (compile e2) s env = eval e2 env # s" by fact
+    also have "exec (compile e1) ... env = eval e1 env # ..." by fact
+    also have "exec [Apply fn] ... env =
+        fn (hd ...) (hd (tl ...)) # (tl (tl ...))" by simp
+    also have "... = fn (eval e1 env) (eval e2 env) # s" by simp
+    also have "fn (eval e1 env) (eval e2 env) =
+        eval (Binop fn e1 e2) env"
+      by simp
+    finally show ?case .
+  qed
+
+  have "execute (compile e) env = hd (exec (compile e) [] env)"
+    by (simp add: execute_def)
+  also from exec_compile
+    have "exec (compile e) [] env = [eval e env]" .
+  also have "hd ... = eval e env" by simp
+  finally show ?thesis .
+qed
+
+end
--- a/src/HOL/Isar_examples/Fibonacci.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Fibonacci.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Isar_examples/Fibonacci.thy
-    ID:         $Id$
     Author:     Gertrud Bauer
     Copyright   1999 Technische Universitaet Muenchen
 
--- a/src/HOL/Isar_examples/Group.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Group.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,11 +1,12 @@
 (*  Title:      HOL/Isar_examples/Group.thy
-    ID:         $Id$
     Author:     Markus Wenzel, TU Muenchen
 *)
 
 header {* Basic group theory *}
 
-theory Group imports Main begin
+theory Group
+imports Main
+begin
 
 subsection {* Groups and calculational reasoning *} 
 
--- a/src/HOL/Isar_examples/Hoare.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Hoare.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Isar_examples/Hoare.thy
-    ID:         $Id$
     Author:     Markus Wenzel, TU Muenchen
 
 A formulation of Hoare logic suitable for Isar.
@@ -7,8 +6,10 @@
 
 header {* Hoare Logic *}
 
-theory Hoare imports Main
-uses ("~~/src/HOL/Hoare/hoare_tac.ML") begin
+theory Hoare
+imports Main
+uses ("~~/src/HOL/Hoare/hoare_tac.ML")
+begin
 
 subsection {* Abstract syntax and semantics *}
 
--- a/src/HOL/Isar_examples/HoareEx.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,328 +0,0 @@
-
-header {* Using Hoare Logic *}
-
-theory HoareEx imports Hoare begin
-
-subsection {* State spaces *}
-
-text {*
- First of all we provide a store of program variables that
- occur in any of the programs considered later.  Slightly unexpected
- things may happen when attempting to work with undeclared variables.
-*}
-
-record vars =
-  I :: nat
-  M :: nat
-  N :: nat
-  S :: nat
-
-text {*
- While all of our variables happen to have the same type, nothing
- would prevent us from working with many-sorted programs as well, or
- even polymorphic ones.  Also note that Isabelle/HOL's extensible
- record types even provides simple means to extend the state space
- later.
-*}
-
-
-subsection {* Basic examples *}
-
-text {*
- We look at few trivialities involving assignment and sequential
- composition, in order to get an idea of how to work with our
- formulation of Hoare Logic.
-*}
-
-text {*
- Using the basic \name{assign} rule directly is a bit cumbersome.
-*}
-
-lemma
-  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
-  by (rule assign)
-
-text {*
- Certainly we want the state modification already done, e.g.\ by
- simplification.  The \name{hoare} method performs the basic state
- update for us; we may apply the Simplifier afterwards to achieve
- ``obvious'' consequences as well.
-*}
-
-lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
-  by hoare
-
-lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
-  by hoare
-
-lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
-  by hoare simp
-
-lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
-  by hoare
-
-lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
-  by hoare simp
-
-lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
-  by hoare
-
-lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
-  by hoare simp
-
-lemma
-"|- .{\<acute>M = a & \<acute>N = b}.
-    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
-    .{\<acute>M = b & \<acute>N = a}."
-  by hoare simp
-
-text {*
- It is important to note that statements like the following one can
- only be proven for each individual program variable.  Due to the
- extra-logical nature of record fields, we cannot formulate a theorem
- relating record selectors and updates schematically.
-*}
-
-lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
-  by hoare
-
-lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
-  oops
-
-lemma
-  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
-  -- {* same statement without concrete syntax *}
-  oops
-
-
-text {*
- In the following assignments we make use of the consequence rule in
- order to achieve the intended precondition.  Certainly, the
- \name{hoare} method is able to handle this case, too.
-*}
-
-lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
-proof -
-  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
-    by auto
-  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
-    by hoare
-  finally show ?thesis .
-qed
-
-lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
-proof -
-  have "!!m n::nat. m = n --> m + 1 ~= n"
-      -- {* inclusion of assertions expressed in ``pure'' logic, *}
-      -- {* without mentioning the state space *}
-    by simp
-  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
-    by hoare
-  finally show ?thesis .
-qed
-
-lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
-  by hoare simp
-
-
-subsection {* Multiplication by addition *}
-
-text {*
- We now do some basic examples of actual \texttt{WHILE} programs.
- This one is a loop for calculating the product of two natural
- numbers, by iterated addition.  We first give detailed structured
- proof based on single-step Hoare rules.
-*}
-
-lemma
-  "|- .{\<acute>M = 0 & \<acute>S = 0}.
-      WHILE \<acute>M ~= a
-      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
-      .{\<acute>S = a * b}."
-proof -
-  let "|- _ ?while _" = ?thesis
-  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
-
-  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
-  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
-  proof
-    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
-    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
-      by auto
-    also have "|- ... ?c .{\<acute>?inv}." by hoare
-    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
-  qed
-  also have "... <= .{\<acute>S = a * b}." by auto
-  finally show ?thesis .
-qed
-
-text {*
- The subsequent version of the proof applies the \name{hoare} method
- to reduce the Hoare statement to a purely logical problem that can be
- solved fully automatically.  Note that we have to specify the
- \texttt{WHILE} loop invariant in the original statement.
-*}
-
-lemma
-  "|- .{\<acute>M = 0 & \<acute>S = 0}.
-      WHILE \<acute>M ~= a
-      INV .{\<acute>S = \<acute>M * b}.
-      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
-      .{\<acute>S = a * b}."
-  by hoare auto
-
-
-subsection {* Summing natural numbers *}
-
-text {*
- We verify an imperative program to sum natural numbers up to a given
- limit.  First some functional definition for proper specification of
- the problem.
-*}
-
-text {*
- The following proof is quite explicit in the individual steps taken,
- with the \name{hoare} method only applied locally to take care of
- assignment and sequential composition.  Note that we express
- intermediate proof obligation in pure logic, without referring to the
- state space.
-*}
-
-declare atLeast0LessThan[symmetric,simp]
-
-theorem
-  "|- .{True}.
-      \<acute>S := 0; \<acute>I := 1;
-      WHILE \<acute>I ~= n
-      DO
-        \<acute>S := \<acute>S + \<acute>I;
-        \<acute>I := \<acute>I + 1
-      OD
-      .{\<acute>S = (SUM j<n. j)}."
-  (is "|- _ (_; ?while) _")
-proof -
-  let ?sum = "\<lambda>k::nat. SUM j<k. j"
-  let ?inv = "\<lambda>s i::nat. s = ?sum i"
-
-  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
-  proof -
-    have "True --> 0 = ?sum 1"
-      by simp
-    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
-      by hoare
-    finally show ?thesis .
-  qed
-  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
-  proof
-    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
-    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
-      by simp
-    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
-      by hoare
-    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
-  qed
-  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
-    by simp
-  finally show ?thesis .
-qed
-
-text {*
- The next version uses the \name{hoare} method, while still explaining
- the resulting proof obligations in an abstract, structured manner.
-*}
-
-theorem
-  "|- .{True}.
-      \<acute>S := 0; \<acute>I := 1;
-      WHILE \<acute>I ~= n
-      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
-      DO
-        \<acute>S := \<acute>S + \<acute>I;
-        \<acute>I := \<acute>I + 1
-      OD
-      .{\<acute>S = (SUM j<n. j)}."
-proof -
-  let ?sum = "\<lambda>k::nat. SUM j<k. j"
-  let ?inv = "\<lambda>s i::nat. s = ?sum i"
-
-  show ?thesis
-  proof hoare
-    show "?inv 0 1" by simp
-  next
-    fix s i assume "?inv s i & i ~= n"
-    thus "?inv (s + i) (i + 1)" by simp
-  next
-    fix s i assume "?inv s i & ~ i ~= n"
-    thus "s = ?sum n" by simp
-  qed
-qed
-
-text {*
- Certainly, this proof may be done fully automatic as well, provided
- that the invariant is given beforehand.
-*}
-
-theorem
-  "|- .{True}.
-      \<acute>S := 0; \<acute>I := 1;
-      WHILE \<acute>I ~= n
-      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
-      DO
-        \<acute>S := \<acute>S + \<acute>I;
-        \<acute>I := \<acute>I + 1
-      OD
-      .{\<acute>S = (SUM j<n. j)}."
-  by hoare auto
-
-
-subsection{* Time *}
-
-text{*
-  A simple embedding of time in Hoare logic: function @{text timeit}
-  inserts an extra variable to keep track of the elapsed time.
-*}
-
-record tstate = time :: nat
-
-types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
-
-consts timeit :: "'a time com \<Rightarrow> 'a time com"
-primrec
-  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
-  "timeit (c1; c2) = (timeit c1; timeit c2)"
-  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
-  "timeit (While b iv c) = While b iv (timeit c)"
-
-record tvars = tstate +
-  I :: nat
-  J :: nat
-
-lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
-  by (induct n) simp_all
-
-lemma "|- .{i = \<acute>I & \<acute>time = 0}.
- timeit(
- WHILE \<acute>I \<noteq> 0
- INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
- DO
-   \<acute>J := \<acute>I;
-   WHILE \<acute>J \<noteq> 0
-   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
-   DO \<acute>J := \<acute>J - 1 OD;
-   \<acute>I := \<acute>I - 1
- OD
- ) .{2*\<acute>time = i*i + 5*i}."
-  apply simp
-  apply hoare
-      apply simp
-     apply clarsimp
-    apply clarsimp
-   apply arith
-   prefer 2
-   apply clarsimp
-  apply (clarsimp simp: nat_distrib)
-  apply (frule lem)
-  apply arith
-  done
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Hoare_Ex.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,329 @@
+header {* Using Hoare Logic *}
+
+theory Hoare_Ex
+imports Hoare
+begin
+
+subsection {* State spaces *}
+
+text {*
+ First of all we provide a store of program variables that
+ occur in any of the programs considered later.  Slightly unexpected
+ things may happen when attempting to work with undeclared variables.
+*}
+
+record vars =
+  I :: nat
+  M :: nat
+  N :: nat
+  S :: nat
+
+text {*
+ While all of our variables happen to have the same type, nothing
+ would prevent us from working with many-sorted programs as well, or
+ even polymorphic ones.  Also note that Isabelle/HOL's extensible
+ record types even provides simple means to extend the state space
+ later.
+*}
+
+
+subsection {* Basic examples *}
+
+text {*
+ We look at few trivialities involving assignment and sequential
+ composition, in order to get an idea of how to work with our
+ formulation of Hoare Logic.
+*}
+
+text {*
+ Using the basic \name{assign} rule directly is a bit cumbersome.
+*}
+
+lemma
+  "|- .{\<acute>(N_update (\<lambda>_. (2 * \<acute>N))) : .{\<acute>N = 10}.}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
+  by (rule assign)
+
+text {*
+ Certainly we want the state modification already done, e.g.\ by
+ simplification.  The \name{hoare} method performs the basic state
+ update for us; we may apply the Simplifier afterwards to achieve
+ ``obvious'' consequences as well.
+*}
+
+lemma "|- .{True}. \<acute>N := 10 .{\<acute>N = 10}."
+  by hoare
+
+lemma "|- .{2 * \<acute>N = 10}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
+  by hoare
+
+lemma "|- .{\<acute>N = 5}. \<acute>N := 2 * \<acute>N .{\<acute>N = 10}."
+  by hoare simp
+
+lemma "|- .{\<acute>N + 1 = a + 1}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
+  by hoare
+
+lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N + 1 .{\<acute>N = a + 1}."
+  by hoare simp
+
+lemma "|- .{a = a & b = b}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
+  by hoare
+
+lemma "|- .{True}. \<acute>M := a; \<acute>N := b .{\<acute>M = a & \<acute>N = b}."
+  by hoare simp
+
+lemma
+"|- .{\<acute>M = a & \<acute>N = b}.
+    \<acute>I := \<acute>M; \<acute>M := \<acute>N; \<acute>N := \<acute>I
+    .{\<acute>M = b & \<acute>N = a}."
+  by hoare simp
+
+text {*
+ It is important to note that statements like the following one can
+ only be proven for each individual program variable.  Due to the
+ extra-logical nature of record fields, we cannot formulate a theorem
+ relating record selectors and updates schematically.
+*}
+
+lemma "|- .{\<acute>N = a}. \<acute>N := \<acute>N .{\<acute>N = a}."
+  by hoare
+
+lemma "|- .{\<acute>x = a}. \<acute>x := \<acute>x .{\<acute>x = a}."
+  oops
+
+lemma
+  "Valid {s. x s = a} (Basic (\<lambda>s. x_update (x s) s)) {s. x s = n}"
+  -- {* same statement without concrete syntax *}
+  oops
+
+
+text {*
+ In the following assignments we make use of the consequence rule in
+ order to achieve the intended precondition.  Certainly, the
+ \name{hoare} method is able to handle this case, too.
+*}
+
+lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
+proof -
+  have ".{\<acute>M = \<acute>N}. <= .{\<acute>M + 1 ~= \<acute>N}."
+    by auto
+  also have "|- ... \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
+    by hoare
+  finally show ?thesis .
+qed
+
+lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
+proof -
+  have "!!m n::nat. m = n --> m + 1 ~= n"
+      -- {* inclusion of assertions expressed in ``pure'' logic, *}
+      -- {* without mentioning the state space *}
+    by simp
+  also have "|- .{\<acute>M + 1 ~= \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
+    by hoare
+  finally show ?thesis .
+qed
+
+lemma "|- .{\<acute>M = \<acute>N}. \<acute>M := \<acute>M + 1 .{\<acute>M ~= \<acute>N}."
+  by hoare simp
+
+
+subsection {* Multiplication by addition *}
+
+text {*
+ We now do some basic examples of actual \texttt{WHILE} programs.
+ This one is a loop for calculating the product of two natural
+ numbers, by iterated addition.  We first give detailed structured
+ proof based on single-step Hoare rules.
+*}
+
+lemma
+  "|- .{\<acute>M = 0 & \<acute>S = 0}.
+      WHILE \<acute>M ~= a
+      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
+      .{\<acute>S = a * b}."
+proof -
+  let "|- _ ?while _" = ?thesis
+  let ".{\<acute>?inv}." = ".{\<acute>S = \<acute>M * b}."
+
+  have ".{\<acute>M = 0 & \<acute>S = 0}. <= .{\<acute>?inv}." by auto
+  also have "|- ... ?while .{\<acute>?inv & ~ (\<acute>M ~= a)}."
+  proof
+    let ?c = "\<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1"
+    have ".{\<acute>?inv & \<acute>M ~= a}. <= .{\<acute>S + b = (\<acute>M + 1) * b}."
+      by auto
+    also have "|- ... ?c .{\<acute>?inv}." by hoare
+    finally show "|- .{\<acute>?inv & \<acute>M ~= a}. ?c .{\<acute>?inv}." .
+  qed
+  also have "... <= .{\<acute>S = a * b}." by auto
+  finally show ?thesis .
+qed
+
+text {*
+ The subsequent version of the proof applies the \name{hoare} method
+ to reduce the Hoare statement to a purely logical problem that can be
+ solved fully automatically.  Note that we have to specify the
+ \texttt{WHILE} loop invariant in the original statement.
+*}
+
+lemma
+  "|- .{\<acute>M = 0 & \<acute>S = 0}.
+      WHILE \<acute>M ~= a
+      INV .{\<acute>S = \<acute>M * b}.
+      DO \<acute>S := \<acute>S + b; \<acute>M := \<acute>M + 1 OD
+      .{\<acute>S = a * b}."
+  by hoare auto
+
+
+subsection {* Summing natural numbers *}
+
+text {*
+ We verify an imperative program to sum natural numbers up to a given
+ limit.  First some functional definition for proper specification of
+ the problem.
+*}
+
+text {*
+ The following proof is quite explicit in the individual steps taken,
+ with the \name{hoare} method only applied locally to take care of
+ assignment and sequential composition.  Note that we express
+ intermediate proof obligation in pure logic, without referring to the
+ state space.
+*}
+
+declare atLeast0LessThan[symmetric,simp]
+
+theorem
+  "|- .{True}.
+      \<acute>S := 0; \<acute>I := 1;
+      WHILE \<acute>I ~= n
+      DO
+        \<acute>S := \<acute>S + \<acute>I;
+        \<acute>I := \<acute>I + 1
+      OD
+      .{\<acute>S = (SUM j<n. j)}."
+  (is "|- _ (_; ?while) _")
+proof -
+  let ?sum = "\<lambda>k::nat. SUM j<k. j"
+  let ?inv = "\<lambda>s i::nat. s = ?sum i"
+
+  have "|- .{True}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
+  proof -
+    have "True --> 0 = ?sum 1"
+      by simp
+    also have "|- .{...}. \<acute>S := 0; \<acute>I := 1 .{?inv \<acute>S \<acute>I}."
+      by hoare
+    finally show ?thesis .
+  qed
+  also have "|- ... ?while .{?inv \<acute>S \<acute>I & ~ \<acute>I ~= n}."
+  proof
+    let ?body = "\<acute>S := \<acute>S + \<acute>I; \<acute>I := \<acute>I + 1"
+    have "!!s i. ?inv s i & i ~= n -->  ?inv (s + i) (i + 1)"
+      by simp
+    also have "|- .{\<acute>S + \<acute>I = ?sum (\<acute>I + 1)}. ?body .{?inv \<acute>S \<acute>I}."
+      by hoare
+    finally show "|- .{?inv \<acute>S \<acute>I & \<acute>I ~= n}. ?body .{?inv \<acute>S \<acute>I}." .
+  qed
+  also have "!!s i. s = ?sum i & ~ i ~= n --> s = ?sum n"
+    by simp
+  finally show ?thesis .
+qed
+
+text {*
+ The next version uses the \name{hoare} method, while still explaining
+ the resulting proof obligations in an abstract, structured manner.
+*}
+
+theorem
+  "|- .{True}.
+      \<acute>S := 0; \<acute>I := 1;
+      WHILE \<acute>I ~= n
+      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
+      DO
+        \<acute>S := \<acute>S + \<acute>I;
+        \<acute>I := \<acute>I + 1
+      OD
+      .{\<acute>S = (SUM j<n. j)}."
+proof -
+  let ?sum = "\<lambda>k::nat. SUM j<k. j"
+  let ?inv = "\<lambda>s i::nat. s = ?sum i"
+
+  show ?thesis
+  proof hoare
+    show "?inv 0 1" by simp
+  next
+    fix s i assume "?inv s i & i ~= n"
+    thus "?inv (s + i) (i + 1)" by simp
+  next
+    fix s i assume "?inv s i & ~ i ~= n"
+    thus "s = ?sum n" by simp
+  qed
+qed
+
+text {*
+ Certainly, this proof may be done fully automatic as well, provided
+ that the invariant is given beforehand.
+*}
+
+theorem
+  "|- .{True}.
+      \<acute>S := 0; \<acute>I := 1;
+      WHILE \<acute>I ~= n
+      INV .{\<acute>S = (SUM j<\<acute>I. j)}.
+      DO
+        \<acute>S := \<acute>S + \<acute>I;
+        \<acute>I := \<acute>I + 1
+      OD
+      .{\<acute>S = (SUM j<n. j)}."
+  by hoare auto
+
+
+subsection{* Time *}
+
+text{*
+  A simple embedding of time in Hoare logic: function @{text timeit}
+  inserts an extra variable to keep track of the elapsed time.
+*}
+
+record tstate = time :: nat
+
+types 'a time = "\<lparr>time :: nat, \<dots> :: 'a\<rparr>"
+
+consts timeit :: "'a time com \<Rightarrow> 'a time com"
+primrec
+  "timeit (Basic f) = (Basic f; Basic(\<lambda>s. s\<lparr>time := Suc (time s)\<rparr>))"
+  "timeit (c1; c2) = (timeit c1; timeit c2)"
+  "timeit (Cond b c1 c2) = Cond b (timeit c1) (timeit c2)"
+  "timeit (While b iv c) = While b iv (timeit c)"
+
+record tvars = tstate +
+  I :: nat
+  J :: nat
+
+lemma lem: "(0::nat) < n \<Longrightarrow> n + n \<le> Suc (n * n)"
+  by (induct n) simp_all
+
+lemma "|- .{i = \<acute>I & \<acute>time = 0}.
+ timeit(
+ WHILE \<acute>I \<noteq> 0
+ INV .{2*\<acute>time + \<acute>I*\<acute>I + 5*\<acute>I = i*i + 5*i}.
+ DO
+   \<acute>J := \<acute>I;
+   WHILE \<acute>J \<noteq> 0
+   INV .{0 < \<acute>I & 2*\<acute>time + \<acute>I*\<acute>I + 3*\<acute>I + 2*\<acute>J - 2 = i*i + 5*i}.
+   DO \<acute>J := \<acute>J - 1 OD;
+   \<acute>I := \<acute>I - 1
+ OD
+ ) .{2*\<acute>time = i*i + 5*i}."
+  apply simp
+  apply hoare
+      apply simp
+     apply clarsimp
+    apply clarsimp
+   apply arith
+   prefer 2
+   apply clarsimp
+  apply (clarsimp simp: nat_distrib)
+  apply (frule lem)
+  apply arith
+  done
+
+end
--- a/src/HOL/Isar_examples/KnasterTarski.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,111 +0,0 @@
-(*  Title:      HOL/Isar_examples/KnasterTarski.thy
-    Author:     Markus Wenzel, TU Muenchen
-
-Typical textbook proof example.
-*)
-
-header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
-
-theory KnasterTarski
-imports Main Lattice_Syntax
-begin
-
-
-subsection {* Prose version *}
-
-text {*
-  According to the textbook \cite[pages 93--94]{davey-priestley}, the
-  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
-  dualized the argument, and tuned the notation a little bit.}
-
-  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
-  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
-  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
-
-  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
-  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
-  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
-  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
-  the reverse one (!) and thereby complete the proof that @{text a} is
-  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
-  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
-*}
-
-
-subsection {* Formal versions *}
-
-text {*
-  The Isar proof below closely follows the original presentation.
-  Virtually all of the prose narration has been rephrased in terms of
-  formal Isar language elements.  Just as many textbook-style proofs,
-  there is a strong bias towards forward proof, and several bends in
-  the course of reasoning.
-*}
-
-theorem Knaster_Tarski:
-  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
-  assumes "mono f"
-  shows "\<exists>a. f a = a"
-proof
-  let ?H = "{u. f u \<le> u}"
-  let ?a = "\<Sqinter>?H"
-  show "f ?a = ?a"
-  proof -
-    {
-      fix x
-      assume "x \<in> ?H"
-      then have "?a \<le> x" by (rule Inf_lower)
-      with `mono f` have "f ?a \<le> f x" ..
-      also from `x \<in> ?H` have "\<dots> \<le> x" ..
-      finally have "f ?a \<le> x" .
-    }
-    then have "f ?a \<le> ?a" by (rule Inf_greatest)
-    {
-      also presume "\<dots> \<le> f ?a"
-      finally (order_antisym) show ?thesis .
-    }
-    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
-    then have "f ?a \<in> ?H" ..
-    then show "?a \<le> f ?a" by (rule Inf_lower)
-  qed
-qed
-
-text {*
-  Above we have used several advanced Isar language elements, such as
-  explicit block structure and weak assumptions.  Thus we have
-  mimicked the particular way of reasoning of the original text.
-
-  In the subsequent version the order of reasoning is changed to
-  achieve structured top-down decomposition of the problem at the
-  outer level, while only the inner steps of reasoning are done in a
-  forward manner.  We are certainly more at ease here, requiring only
-  the most basic features of the Isar language.
-*}
-
-theorem Knaster_Tarski':
-  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
-  assumes "mono f"
-  shows "\<exists>a. f a = a"
-proof
-  let ?H = "{u. f u \<le> u}"
-  let ?a = "\<Sqinter>?H"
-  show "f ?a = ?a"
-  proof (rule order_antisym)
-    show "f ?a \<le> ?a"
-    proof (rule Inf_greatest)
-      fix x
-      assume "x \<in> ?H"
-      then have "?a \<le> x" by (rule Inf_lower)
-      with `mono f` have "f ?a \<le> f x" ..
-      also from `x \<in> ?H` have "\<dots> \<le> x" ..
-      finally show "f ?a \<le> x" .
-    qed
-    show "?a \<le> f ?a"
-    proof (rule Inf_lower)
-      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
-      then show "f ?a \<in> ?H" ..
-    qed
-  qed
-qed
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Knaster_Tarski.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,111 @@
+(*  Title:      HOL/Isar_examples/Knaster_Tarski.thy
+    Author:     Markus Wenzel, TU Muenchen
+
+Typical textbook proof example.
+*)
+
+header {* Textbook-style reasoning: the Knaster-Tarski Theorem *}
+
+theory Knaster_Tarski
+imports Main Lattice_Syntax
+begin
+
+
+subsection {* Prose version *}
+
+text {*
+  According to the textbook \cite[pages 93--94]{davey-priestley}, the
+  Knaster-Tarski fixpoint theorem is as follows.\footnote{We have
+  dualized the argument, and tuned the notation a little bit.}
+
+  \textbf{The Knaster-Tarski Fixpoint Theorem.}  Let @{text L} be a
+  complete lattice and @{text "f: L \<rightarrow> L"} an order-preserving map.
+  Then @{text "\<Sqinter>{x \<in> L | f(x) \<le> x}"} is a fixpoint of @{text f}.
+
+  \textbf{Proof.} Let @{text "H = {x \<in> L | f(x) \<le> x}"} and @{text "a =
+  \<Sqinter>H"}.  For all @{text "x \<in> H"} we have @{text "a \<le> x"}, so @{text
+  "f(a) \<le> f(x) \<le> x"}.  Thus @{text "f(a)"} is a lower bound of @{text
+  H}, whence @{text "f(a) \<le> a"}.  We now use this inequality to prove
+  the reverse one (!) and thereby complete the proof that @{text a} is
+  a fixpoint.  Since @{text f} is order-preserving, @{text "f(f(a)) \<le>
+  f(a)"}.  This says @{text "f(a) \<in> H"}, so @{text "a \<le> f(a)"}.
+*}
+
+
+subsection {* Formal versions *}
+
+text {*
+  The Isar proof below closely follows the original presentation.
+  Virtually all of the prose narration has been rephrased in terms of
+  formal Isar language elements.  Just as many textbook-style proofs,
+  there is a strong bias towards forward proof, and several bends in
+  the course of reasoning.
+*}
+
+theorem Knaster_Tarski:
+  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
+  assumes "mono f"
+  shows "\<exists>a. f a = a"
+proof
+  let ?H = "{u. f u \<le> u}"
+  let ?a = "\<Sqinter>?H"
+  show "f ?a = ?a"
+  proof -
+    {
+      fix x
+      assume "x \<in> ?H"
+      then have "?a \<le> x" by (rule Inf_lower)
+      with `mono f` have "f ?a \<le> f x" ..
+      also from `x \<in> ?H` have "\<dots> \<le> x" ..
+      finally have "f ?a \<le> x" .
+    }
+    then have "f ?a \<le> ?a" by (rule Inf_greatest)
+    {
+      also presume "\<dots> \<le> f ?a"
+      finally (order_antisym) show ?thesis .
+    }
+    from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
+    then have "f ?a \<in> ?H" ..
+    then show "?a \<le> f ?a" by (rule Inf_lower)
+  qed
+qed
+
+text {*
+  Above we have used several advanced Isar language elements, such as
+  explicit block structure and weak assumptions.  Thus we have
+  mimicked the particular way of reasoning of the original text.
+
+  In the subsequent version the order of reasoning is changed to
+  achieve structured top-down decomposition of the problem at the
+  outer level, while only the inner steps of reasoning are done in a
+  forward manner.  We are certainly more at ease here, requiring only
+  the most basic features of the Isar language.
+*}
+
+theorem Knaster_Tarski':
+  fixes f :: "'a::complete_lattice \<Rightarrow> 'a"
+  assumes "mono f"
+  shows "\<exists>a. f a = a"
+proof
+  let ?H = "{u. f u \<le> u}"
+  let ?a = "\<Sqinter>?H"
+  show "f ?a = ?a"
+  proof (rule order_antisym)
+    show "f ?a \<le> ?a"
+    proof (rule Inf_greatest)
+      fix x
+      assume "x \<in> ?H"
+      then have "?a \<le> x" by (rule Inf_lower)
+      with `mono f` have "f ?a \<le> f x" ..
+      also from `x \<in> ?H` have "\<dots> \<le> x" ..
+      finally show "f ?a \<le> x" .
+    qed
+    show "?a \<le> f ?a"
+    proof (rule Inf_lower)
+      from `mono f` and `f ?a \<le> ?a` have "f (f ?a) \<le> f ?a" ..
+      then show "f ?a \<in> ?H" ..
+    qed
+  qed
+qed
+
+end
--- a/src/HOL/Isar_examples/MutilatedCheckerboard.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,299 +0,0 @@
-(*  Title:      HOL/Isar_examples/MutilatedCheckerboard.thy
-    ID:         $Id$
-    Author:     Markus Wenzel, TU Muenchen (Isar document)
-                Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
-*)
-
-header {* The Mutilated Checker Board Problem *}
-
-theory MutilatedCheckerboard imports Main begin
-
-text {*
- The Mutilated Checker Board Problem, formalized inductively.  See
- \cite{paulson-mutilated-board} and
- \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
- original tactic script version.
-*}
-
-subsection {* Tilings *}
-
-inductive_set
-  tiling :: "'a set set => 'a set set"
-  for A :: "'a set set"
-  where
-    empty: "{} : tiling A"
-  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
-
-
-text "The union of two disjoint tilings is a tiling."
-
-lemma tiling_Un:
-  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
-  shows "t Un u : tiling A"
-proof -
-  let ?T = "tiling A"
-  from `t : ?T` and `t Int u = {}`
-  show "t Un u : ?T"
-  proof (induct t)
-    case empty
-    with `u : ?T` show "{} Un u : ?T" by simp
-  next
-    case (Un a t)
-    show "(a Un t) Un u : ?T"
-    proof -
-      have "a Un (t Un u) : ?T"
-	using `a : A`
-      proof (rule tiling.Un)
-        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
-        then show "t Un u: ?T" by (rule Un)
-        from `a <= - t` and `(a Un t) Int u = {}`
-	show "a <= - (t Un u)" by blast
-      qed
-      also have "a Un (t Un u) = (a Un t) Un u"
-        by (simp only: Un_assoc)
-      finally show ?thesis .
-    qed
-  qed
-qed
-
-
-subsection {* Basic properties of ``below'' *}
-
-constdefs
-  below :: "nat => nat set"
-  "below n == {i. i < n}"
-
-lemma below_less_iff [iff]: "(i: below k) = (i < k)"
-  by (simp add: below_def)
-
-lemma below_0: "below 0 = {}"
-  by (simp add: below_def)
-
-lemma Sigma_Suc1:
-    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
-  by (simp add: below_def less_Suc_eq) blast
-
-lemma Sigma_Suc2:
-    "m = n + 2 ==> A <*> below m =
-      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
-  by (auto simp add: below_def)
-
-lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
-
-
-subsection {* Basic properties of ``evnodd'' *}
-
-constdefs
-  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
-  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
-
-lemma evnodd_iff:
-    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
-  by (simp add: evnodd_def)
-
-lemma evnodd_subset: "evnodd A b <= A"
-  by (unfold evnodd_def, rule Int_lower1)
-
-lemma evnoddD: "x : evnodd A b ==> x : A"
-  by (rule subsetD, rule evnodd_subset)
-
-lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
-  by (rule finite_subset, rule evnodd_subset)
-
-lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
-  by (unfold evnodd_def) blast
-
-lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
-  by (unfold evnodd_def) blast
-
-lemma evnodd_empty: "evnodd {} b = {}"
-  by (simp add: evnodd_def)
-
-lemma evnodd_insert: "evnodd (insert (i, j) C) b =
-    (if (i + j) mod 2 = b
-      then insert (i, j) (evnodd C b) else evnodd C b)"
-  by (simp add: evnodd_def) blast
-
-
-subsection {* Dominoes *}
-
-inductive_set
-  domino :: "(nat * nat) set set"
-  where
-    horiz: "{(i, j), (i, j + 1)} : domino"
-  | vertl: "{(i, j), (i + 1, j)} : domino"
-
-lemma dominoes_tile_row:
-  "{i} <*> below (2 * n) : tiling domino"
-  (is "?B n : ?T")
-proof (induct n)
-  case 0
-  show ?case by (simp add: below_0 tiling.empty)
-next
-  case (Suc n)
-  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
-  have "?B (Suc n) = ?a Un ?B n"
-    by (auto simp add: Sigma_Suc Un_assoc)
-  moreover have "... : ?T"
-  proof (rule tiling.Un)
-    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
-      by (rule domino.horiz)
-    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
-    finally show "... : domino" .
-    show "?B n : ?T" by (rule Suc)
-    show "?a <= - ?B n" by blast
-  qed
-  ultimately show ?case by simp
-qed
-
-lemma dominoes_tile_matrix:
-  "below m <*> below (2 * n) : tiling domino"
-  (is "?B m : ?T")
-proof (induct m)
-  case 0
-  show ?case by (simp add: below_0 tiling.empty)
-next
-  case (Suc m)
-  let ?t = "{m} <*> below (2 * n)"
-  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
-  moreover have "... : ?T"
-  proof (rule tiling_Un)
-    show "?t : ?T" by (rule dominoes_tile_row)
-    show "?B m : ?T" by (rule Suc)
-    show "?t Int ?B m = {}" by blast
-  qed
-  ultimately show ?case by simp
-qed
-
-lemma domino_singleton:
-  assumes d: "d : domino" and "b < 2"
-  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
-  using d
-proof induct
-  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
-  fix i j
-  note [simp] = evnodd_empty evnodd_insert mod_Suc
-  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
-  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
-qed
-
-lemma domino_finite:
-  assumes d: "d: domino"
-  shows "finite d"
-  using d
-proof induct
-  fix i j :: nat
-  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
-  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
-qed
-
-
-subsection {* Tilings of dominoes *}
-
-lemma tiling_domino_finite:
-  assumes t: "t : tiling domino"  (is "t : ?T")
-  shows "finite t"  (is "?F t")
-  using t
-proof induct
-  show "?F {}" by (rule finite.emptyI)
-  fix a t assume "?F t"
-  assume "a : domino" then have "?F a" by (rule domino_finite)
-  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
-qed
-
-lemma tiling_domino_01:
-  assumes t: "t : tiling domino"  (is "t : ?T")
-  shows "card (evnodd t 0) = card (evnodd t 1)"
-  using t
-proof induct
-  case empty
-  show ?case by (simp add: evnodd_def)
-next
-  case (Un a t)
-  let ?e = evnodd
-  note hyp = `card (?e t 0) = card (?e t 1)`
-    and at = `a <= - t`
-  have card_suc:
-    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
-  proof -
-    fix b :: nat assume "b < 2"
-    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
-    also obtain i j where e: "?e a b = {(i, j)}"
-    proof -
-      from `a \<in> domino` and `b < 2`
-      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
-      then show ?thesis by (blast intro: that)
-    qed
-    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
-    moreover have "card ... = Suc (card (?e t b))"
-    proof (rule card_insert_disjoint)
-      from `t \<in> tiling domino` have "finite t"
-	by (rule tiling_domino_finite)
-      then show "finite (?e t b)"
-        by (rule evnodd_finite)
-      from e have "(i, j) : ?e a b" by simp
-      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
-    qed
-    ultimately show "?thesis b" by simp
-  qed
-  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
-  also from hyp have "card (?e t 0) = card (?e t 1)" .
-  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
-    by simp
-  finally show ?case .
-qed
-
-
-subsection {* Main theorem *}
-
-constdefs
-  mutilated_board :: "nat => nat => (nat * nat) set"
-  "mutilated_board m n ==
-    below (2 * (m + 1)) <*> below (2 * (n + 1))
-      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
-
-theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
-proof (unfold mutilated_board_def)
-  let ?T = "tiling domino"
-  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
-  let ?t' = "?t - {(0, 0)}"
-  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
-
-  show "?t'' ~: ?T"
-  proof
-    have t: "?t : ?T" by (rule dominoes_tile_matrix)
-    assume t'': "?t'' : ?T"
-
-    let ?e = evnodd
-    have fin: "finite (?e ?t 0)"
-      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
-
-    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
-    have "card (?e ?t'' 0) < card (?e ?t' 0)"
-    proof -
-      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
-        < card (?e ?t' 0)"
-      proof (rule card_Diff1_less)
-        from _ fin show "finite (?e ?t' 0)"
-          by (rule finite_subset) auto
-        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
-      qed
-      then show ?thesis by simp
-    qed
-    also have "... < card (?e ?t 0)"
-    proof -
-      have "(0, 0) : ?e ?t 0" by simp
-      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
-        by (rule card_Diff1_less)
-      then show ?thesis by simp
-    qed
-    also from t have "... = card (?e ?t 1)"
-      by (rule tiling_domino_01)
-    also have "?e ?t 1 = ?e ?t'' 1" by simp
-    also from t'' have "card ... = card (?e ?t'' 0)"
-      by (rule tiling_domino_01 [symmetric])
-    finally have "... < ..." . then show False ..
-  qed
-qed
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Mutilated_Checkerboard.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,300 @@
+(*  Title:      HOL/Isar_examples/Mutilated_Checkerboard.thy
+    Author:     Markus Wenzel, TU Muenchen (Isar document)
+    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory (original scripts)
+*)
+
+header {* The Mutilated Checker Board Problem *}
+
+theory Mutilated_Checkerboard
+imports Main
+begin
+
+text {*
+ The Mutilated Checker Board Problem, formalized inductively.  See
+ \cite{paulson-mutilated-board} and
+ \url{http://isabelle.in.tum.de/library/HOL/Induct/Mutil.html} for the
+ original tactic script version.
+*}
+
+subsection {* Tilings *}
+
+inductive_set
+  tiling :: "'a set set => 'a set set"
+  for A :: "'a set set"
+  where
+    empty: "{} : tiling A"
+  | Un: "a : A ==> t : tiling A ==> a <= - t ==> a Un t : tiling A"
+
+
+text "The union of two disjoint tilings is a tiling."
+
+lemma tiling_Un:
+  assumes "t : tiling A" and "u : tiling A" and "t Int u = {}"
+  shows "t Un u : tiling A"
+proof -
+  let ?T = "tiling A"
+  from `t : ?T` and `t Int u = {}`
+  show "t Un u : ?T"
+  proof (induct t)
+    case empty
+    with `u : ?T` show "{} Un u : ?T" by simp
+  next
+    case (Un a t)
+    show "(a Un t) Un u : ?T"
+    proof -
+      have "a Un (t Un u) : ?T"
+	using `a : A`
+      proof (rule tiling.Un)
+        from `(a Un t) Int u = {}` have "t Int u = {}" by blast
+        then show "t Un u: ?T" by (rule Un)
+        from `a <= - t` and `(a Un t) Int u = {}`
+	show "a <= - (t Un u)" by blast
+      qed
+      also have "a Un (t Un u) = (a Un t) Un u"
+        by (simp only: Un_assoc)
+      finally show ?thesis .
+    qed
+  qed
+qed
+
+
+subsection {* Basic properties of ``below'' *}
+
+constdefs
+  below :: "nat => nat set"
+  "below n == {i. i < n}"
+
+lemma below_less_iff [iff]: "(i: below k) = (i < k)"
+  by (simp add: below_def)
+
+lemma below_0: "below 0 = {}"
+  by (simp add: below_def)
+
+lemma Sigma_Suc1:
+    "m = n + 1 ==> below m <*> B = ({n} <*> B) Un (below n <*> B)"
+  by (simp add: below_def less_Suc_eq) blast
+
+lemma Sigma_Suc2:
+    "m = n + 2 ==> A <*> below m =
+      (A <*> {n}) Un (A <*> {n + 1}) Un (A <*> below n)"
+  by (auto simp add: below_def)
+
+lemmas Sigma_Suc = Sigma_Suc1 Sigma_Suc2
+
+
+subsection {* Basic properties of ``evnodd'' *}
+
+constdefs
+  evnodd :: "(nat * nat) set => nat => (nat * nat) set"
+  "evnodd A b == A Int {(i, j). (i + j) mod 2 = b}"
+
+lemma evnodd_iff:
+    "(i, j): evnodd A b = ((i, j): A  & (i + j) mod 2 = b)"
+  by (simp add: evnodd_def)
+
+lemma evnodd_subset: "evnodd A b <= A"
+  by (unfold evnodd_def, rule Int_lower1)
+
+lemma evnoddD: "x : evnodd A b ==> x : A"
+  by (rule subsetD, rule evnodd_subset)
+
+lemma evnodd_finite: "finite A ==> finite (evnodd A b)"
+  by (rule finite_subset, rule evnodd_subset)
+
+lemma evnodd_Un: "evnodd (A Un B) b = evnodd A b Un evnodd B b"
+  by (unfold evnodd_def) blast
+
+lemma evnodd_Diff: "evnodd (A - B) b = evnodd A b - evnodd B b"
+  by (unfold evnodd_def) blast
+
+lemma evnodd_empty: "evnodd {} b = {}"
+  by (simp add: evnodd_def)
+
+lemma evnodd_insert: "evnodd (insert (i, j) C) b =
+    (if (i + j) mod 2 = b
+      then insert (i, j) (evnodd C b) else evnodd C b)"
+  by (simp add: evnodd_def) blast
+
+
+subsection {* Dominoes *}
+
+inductive_set
+  domino :: "(nat * nat) set set"
+  where
+    horiz: "{(i, j), (i, j + 1)} : domino"
+  | vertl: "{(i, j), (i + 1, j)} : domino"
+
+lemma dominoes_tile_row:
+  "{i} <*> below (2 * n) : tiling domino"
+  (is "?B n : ?T")
+proof (induct n)
+  case 0
+  show ?case by (simp add: below_0 tiling.empty)
+next
+  case (Suc n)
+  let ?a = "{i} <*> {2 * n + 1} Un {i} <*> {2 * n}"
+  have "?B (Suc n) = ?a Un ?B n"
+    by (auto simp add: Sigma_Suc Un_assoc)
+  moreover have "... : ?T"
+  proof (rule tiling.Un)
+    have "{(i, 2 * n), (i, 2 * n + 1)} : domino"
+      by (rule domino.horiz)
+    also have "{(i, 2 * n), (i, 2 * n + 1)} = ?a" by blast
+    finally show "... : domino" .
+    show "?B n : ?T" by (rule Suc)
+    show "?a <= - ?B n" by blast
+  qed
+  ultimately show ?case by simp
+qed
+
+lemma dominoes_tile_matrix:
+  "below m <*> below (2 * n) : tiling domino"
+  (is "?B m : ?T")
+proof (induct m)
+  case 0
+  show ?case by (simp add: below_0 tiling.empty)
+next
+  case (Suc m)
+  let ?t = "{m} <*> below (2 * n)"
+  have "?B (Suc m) = ?t Un ?B m" by (simp add: Sigma_Suc)
+  moreover have "... : ?T"
+  proof (rule tiling_Un)
+    show "?t : ?T" by (rule dominoes_tile_row)
+    show "?B m : ?T" by (rule Suc)
+    show "?t Int ?B m = {}" by blast
+  qed
+  ultimately show ?case by simp
+qed
+
+lemma domino_singleton:
+  assumes d: "d : domino" and "b < 2"
+  shows "EX i j. evnodd d b = {(i, j)}"  (is "?P d")
+  using d
+proof induct
+  from `b < 2` have b_cases: "b = 0 | b = 1" by arith
+  fix i j
+  note [simp] = evnodd_empty evnodd_insert mod_Suc
+  from b_cases show "?P {(i, j), (i, j + 1)}" by rule auto
+  from b_cases show "?P {(i, j), (i + 1, j)}" by rule auto
+qed
+
+lemma domino_finite:
+  assumes d: "d: domino"
+  shows "finite d"
+  using d
+proof induct
+  fix i j :: nat
+  show "finite {(i, j), (i, j + 1)}" by (intro finite.intros)
+  show "finite {(i, j), (i + 1, j)}" by (intro finite.intros)
+qed
+
+
+subsection {* Tilings of dominoes *}
+
+lemma tiling_domino_finite:
+  assumes t: "t : tiling domino"  (is "t : ?T")
+  shows "finite t"  (is "?F t")
+  using t
+proof induct
+  show "?F {}" by (rule finite.emptyI)
+  fix a t assume "?F t"
+  assume "a : domino" then have "?F a" by (rule domino_finite)
+  from this and `?F t` show "?F (a Un t)" by (rule finite_UnI)
+qed
+
+lemma tiling_domino_01:
+  assumes t: "t : tiling domino"  (is "t : ?T")
+  shows "card (evnodd t 0) = card (evnodd t 1)"
+  using t
+proof induct
+  case empty
+  show ?case by (simp add: evnodd_def)
+next
+  case (Un a t)
+  let ?e = evnodd
+  note hyp = `card (?e t 0) = card (?e t 1)`
+    and at = `a <= - t`
+  have card_suc:
+    "!!b. b < 2 ==> card (?e (a Un t) b) = Suc (card (?e t b))"
+  proof -
+    fix b :: nat assume "b < 2"
+    have "?e (a Un t) b = ?e a b Un ?e t b" by (rule evnodd_Un)
+    also obtain i j where e: "?e a b = {(i, j)}"
+    proof -
+      from `a \<in> domino` and `b < 2`
+      have "EX i j. ?e a b = {(i, j)}" by (rule domino_singleton)
+      then show ?thesis by (blast intro: that)
+    qed
+    moreover have "... Un ?e t b = insert (i, j) (?e t b)" by simp
+    moreover have "card ... = Suc (card (?e t b))"
+    proof (rule card_insert_disjoint)
+      from `t \<in> tiling domino` have "finite t"
+	by (rule tiling_domino_finite)
+      then show "finite (?e t b)"
+        by (rule evnodd_finite)
+      from e have "(i, j) : ?e a b" by simp
+      with at show "(i, j) ~: ?e t b" by (blast dest: evnoddD)
+    qed
+    ultimately show "?thesis b" by simp
+  qed
+  then have "card (?e (a Un t) 0) = Suc (card (?e t 0))" by simp
+  also from hyp have "card (?e t 0) = card (?e t 1)" .
+  also from card_suc have "Suc ... = card (?e (a Un t) 1)"
+    by simp
+  finally show ?case .
+qed
+
+
+subsection {* Main theorem *}
+
+constdefs
+  mutilated_board :: "nat => nat => (nat * nat) set"
+  "mutilated_board m n ==
+    below (2 * (m + 1)) <*> below (2 * (n + 1))
+      - {(0, 0)} - {(2 * m + 1, 2 * n + 1)}"
+
+theorem mutil_not_tiling: "mutilated_board m n ~: tiling domino"
+proof (unfold mutilated_board_def)
+  let ?T = "tiling domino"
+  let ?t = "below (2 * (m + 1)) <*> below (2 * (n + 1))"
+  let ?t' = "?t - {(0, 0)}"
+  let ?t'' = "?t' - {(2 * m + 1, 2 * n + 1)}"
+
+  show "?t'' ~: ?T"
+  proof
+    have t: "?t : ?T" by (rule dominoes_tile_matrix)
+    assume t'': "?t'' : ?T"
+
+    let ?e = evnodd
+    have fin: "finite (?e ?t 0)"
+      by (rule evnodd_finite, rule tiling_domino_finite, rule t)
+
+    note [simp] = evnodd_iff evnodd_empty evnodd_insert evnodd_Diff
+    have "card (?e ?t'' 0) < card (?e ?t' 0)"
+    proof -
+      have "card (?e ?t' 0 - {(2 * m + 1, 2 * n + 1)})
+        < card (?e ?t' 0)"
+      proof (rule card_Diff1_less)
+        from _ fin show "finite (?e ?t' 0)"
+          by (rule finite_subset) auto
+        show "(2 * m + 1, 2 * n + 1) : ?e ?t' 0" by simp
+      qed
+      then show ?thesis by simp
+    qed
+    also have "... < card (?e ?t 0)"
+    proof -
+      have "(0, 0) : ?e ?t 0" by simp
+      with fin have "card (?e ?t 0 - {(0, 0)}) < card (?e ?t 0)"
+        by (rule card_Diff1_less)
+      then show ?thesis by simp
+    qed
+    also from t have "... = card (?e ?t 1)"
+      by (rule tiling_domino_01)
+    also have "?e ?t 1 = ?e ?t'' 1" by simp
+    also from t'' have "card ... = card (?e ?t'' 0)"
+      by (rule tiling_domino_01 [symmetric])
+    finally have "... < ..." . then show False ..
+  qed
+qed
+
+end
--- a/src/HOL/Isar_examples/NestedDatatype.thy	Mon Jun 22 22:51:08 2009 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,87 +0,0 @@
-
-(* $Id$ *)
-
-header {* Nested datatypes *}
-
-theory NestedDatatype imports Main begin
-
-subsection {* Terms and substitution *}
-
-datatype ('a, 'b) "term" =
-    Var 'a
-  | App 'b "('a, 'b) term list"
-
-consts
-  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
-  subst_term_list ::
-    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
-
-primrec (subst)
-  "subst_term f (Var a) = f a"
-  "subst_term f (App b ts) = App b (subst_term_list f ts)"
-  "subst_term_list f [] = []"
-  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
-
-
-text {*
- \medskip A simple lemma about composition of substitutions.
-*}
-
-lemma "subst_term (subst_term f1 o f2) t =
-      subst_term f1 (subst_term f2 t)"
-  and "subst_term_list (subst_term f1 o f2) ts =
-      subst_term_list f1 (subst_term_list f2 ts)"
-  by (induct t and ts) simp_all
-
-lemma "subst_term (subst_term f1 o f2) t =
-  subst_term f1 (subst_term f2 t)"
-proof -
-  let "?P t" = ?thesis
-  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
-    subst_term_list f1 (subst_term_list f2 ts)"
-  show ?thesis
-  proof (induct t)
-    fix a show "?P (Var a)" by simp
-  next
-    fix b ts assume "?Q ts"
-    then show "?P (App b ts)"
-      by (simp only: subst.simps)
-  next
-    show "?Q []" by simp
-  next
-    fix t ts
-    assume "?P t" "?Q ts" then show "?Q (t # ts)"
-      by (simp only: subst.simps)
-  qed
-qed
-
-
-subsection {* Alternative induction *}
-
-theorem term_induct' [case_names Var App]:
-  assumes var: "!!a. P (Var a)"
-    and app: "!!b ts. list_all P ts ==> P (App b ts)"
-  shows "P t"
-proof (induct t)
-  fix a show "P (Var a)" by (rule var)
-next
-  fix b t ts assume "list_all P ts"
-  then show "P (App b ts)" by (rule app)
-next
-  show "list_all P []" by simp
-next
-  fix t ts assume "P t" "list_all P ts"
-  then show "list_all P (t # ts)" by simp
-qed
-
-lemma
-  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
-proof (induct t rule: term_induct')
-  case (Var a)
-  show ?case by (simp add: o_def)
-next
-  case (App b ts)
-  then show ?case by (induct ts) simp_all
-qed
-
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Isar_examples/Nested_Datatype.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -0,0 +1,86 @@
+header {* Nested datatypes *}
+
+theory Nested_Datatype
+imports Main
+begin
+
+subsection {* Terms and substitution *}
+
+datatype ('a, 'b) "term" =
+    Var 'a
+  | App 'b "('a, 'b) term list"
+
+consts
+  subst_term :: "('a => ('a, 'b) term) => ('a, 'b) term => ('a, 'b) term"
+  subst_term_list ::
+    "('a => ('a, 'b) term) => ('a, 'b) term list => ('a, 'b) term list"
+
+primrec (subst)
+  "subst_term f (Var a) = f a"
+  "subst_term f (App b ts) = App b (subst_term_list f ts)"
+  "subst_term_list f [] = []"
+  "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
+
+
+text {*
+ \medskip A simple lemma about composition of substitutions.
+*}
+
+lemma "subst_term (subst_term f1 o f2) t =
+      subst_term f1 (subst_term f2 t)"
+  and "subst_term_list (subst_term f1 o f2) ts =
+      subst_term_list f1 (subst_term_list f2 ts)"
+  by (induct t and ts) simp_all
+
+lemma "subst_term (subst_term f1 o f2) t =
+  subst_term f1 (subst_term f2 t)"
+proof -
+  let "?P t" = ?thesis
+  let ?Q = "\<lambda>ts. subst_term_list (subst_term f1 o f2) ts =
+    subst_term_list f1 (subst_term_list f2 ts)"
+  show ?thesis
+  proof (induct t)
+    fix a show "?P (Var a)" by simp
+  next
+    fix b ts assume "?Q ts"
+    then show "?P (App b ts)"
+      by (simp only: subst.simps)
+  next
+    show "?Q []" by simp
+  next
+    fix t ts
+    assume "?P t" "?Q ts" then show "?Q (t # ts)"
+      by (simp only: subst.simps)
+  qed
+qed
+
+
+subsection {* Alternative induction *}
+
+theorem term_induct' [case_names Var App]:
+  assumes var: "!!a. P (Var a)"
+    and app: "!!b ts. list_all P ts ==> P (App b ts)"
+  shows "P t"
+proof (induct t)
+  fix a show "P (Var a)" by (rule var)
+next
+  fix b t ts assume "list_all P ts"
+  then show "P (App b ts)" by (rule app)
+next
+  show "list_all P []" by simp
+next
+  fix t ts assume "P t" "list_all P ts"
+  then show "list_all P (t # ts)" by simp
+qed
+
+lemma
+  "subst_term (subst_term f1 o f2) t = subst_term f1 (subst_term f2 t)"
+proof (induct t rule: term_induct')
+  case (Var a)
+  show ?case by (simp add: o_def)
+next
+  case (App b ts)
+  then show ?case by (induct ts) simp_all
+qed
+
+end
--- a/src/HOL/Isar_examples/Peirce.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Peirce.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,11 +1,12 @@
 (*  Title:      HOL/Isar_examples/Peirce.thy
-    ID:         $Id$
     Author:     Markus Wenzel, TU Muenchen
 *)
 
 header {* Peirce's Law *}
 
-theory Peirce imports Main begin
+theory Peirce
+imports Main
+begin
 
 text {*
  We consider Peirce's Law: $((A \impl B) \impl A) \impl A$.  This is
--- a/src/HOL/Isar_examples/Puzzle.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Puzzle.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,7 +1,8 @@
-
 header {* An old chestnut *}
 
-theory Puzzle imports Main begin
+theory Puzzle
+imports Main
+begin
 
 text_raw {*
   \footnote{A question from ``Bundeswettbewerb Mathematik''.  Original
--- a/src/HOL/Isar_examples/ROOT.ML	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/ROOT.ML	Mon Jun 22 23:48:24 2009 +0200
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Isar_examples/ROOT.ML
-    ID:         $Id$
     Author:     Markus Wenzel, TU Muenchen
 
 Miscellaneous Isabelle/Isar examples for Higher-Order Logic.
@@ -8,16 +7,16 @@
 no_document use_thys ["../NumberTheory/Primes", "../NumberTheory/Fibonacci"];
 
 use_thys [
-  "BasicLogic",
+  "Basic_Logic",
   "Cantor",
   "Peirce",
   "Drinker",
-  "ExprCompiler",
+  "Expr_Compiler",
   "Group",
   "Summation",
-  "KnasterTarski",
-  "MutilatedCheckerboard",
+  "Knaster_Tarski",
+  "Mutilated_Checkerboard",
   "Puzzle",
-  "NestedDatatype",
-  "HoareEx"
+  "Nested_Datatype",
+  "Hoare_Ex"
 ];
--- a/src/HOL/Isar_examples/Summation.thy	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/Summation.thy	Mon Jun 22 23:48:24 2009 +0200
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Isar_examples/Summation.thy
-    ID:         $Id$
     Author:     Markus Wenzel
 *)
 
--- a/src/HOL/Isar_examples/document/root.tex	Mon Jun 22 22:51:08 2009 +0200
+++ b/src/HOL/Isar_examples/document/root.tex	Mon Jun 22 23:48:24 2009 +0200
@@ -1,6 +1,3 @@
-
-% $Id$
-
 \input{style}
 
 \hyphenation{Isabelle}