--- a/doc-src/TutorialI/IsarOverview/IsaMakefile Tue Oct 01 15:03:28 2002 +0200
+++ b/doc-src/TutorialI/IsarOverview/IsaMakefile Tue Oct 01 20:54:17 2002 +0200
@@ -14,7 +14,8 @@
Isar: $(LOG)/HOL-Isar.gz
-$(LOG)/HOL-Isar.gz: Isar/ROOT.ML Isar/document/root.tex Isar/document/root.bib Isar/*.thy
+$(LOG)/HOL-Isar.gz: Isar/ROOT.ML Isar/document/intro.tex \
+ Isar/document/root.tex Isar/document/root.bib Isar/*.thy
@$(USEDIR) HOL Isar
--- a/doc-src/TutorialI/IsarOverview/Isar/Induction.thy Tue Oct 01 15:03:28 2002 +0200
+++ b/doc-src/TutorialI/IsarOverview/Isar/Induction.thy Tue Oct 01 20:54:17 2002 +0200
@@ -2,6 +2,11 @@
section{*Case distinction and induction \label{sec:Induct}*}
+text{* Computer science applications abound with inductively defined
+structures, which is why we treat them in more detail. HOL already
+comes with a datatype of lists with the two constructors @{text Nil}
+and @{text Cons}. @{text Nil} is written @{term"[]"} and @{text"Cons x
+xs"} is written @{term"x # xs"}. *}
subsection{*Case distinction*}
@@ -16,7 +21,7 @@
text{*\noindent Note that the two cases must come in this order.
This form is appropriate if @{term P} is textually small. However, if
-@{term P} is large, we don't want to repeat it. This can be avoided by
+@{term P} is large, we do not want to repeat it. This can be avoided by
the following idiom *}
lemma "P \<or> \<not> P"
@@ -25,10 +30,12 @@
next
case False thus ?thesis ..
qed
+
text{*\noindent which is simply a shorthand for the previous
-proof. More precisely, the phrases \isakeyword{case}~@{prop
-True}/@{prop False} abbreviate the corresponding assumptions @{prop P}
-and @{prop"\<not>P"}. In contrast to the previous proof we can prove the cases
+proof. More precisely, the phrase `\isakeyword{case}~@{text True}'
+abbreviates `\isakeyword{assume}~@{text"True: P"}'
+and analogously for @{text"False"} and @{prop"\<not>P"}.
+In contrast to the previous proof we can prove the cases
in arbitrary order.
The same game can be played with other datatypes, for example lists:
@@ -40,18 +47,19 @@
next
case Cons thus ?thesis by simp
qed
-text{*\noindent Here \isakeyword{case}~@{text Nil} abbreviates
-\isakeyword{assume}~@{prop"xs = []"} and \isakeyword{case}~@{text Cons}
-abbreviates \isakeyword{fix}~@{text"? ??"}
-\isakeyword{assume}~@{text"xs = ? # ??"} where @{text"?"} and @{text"??"}
+text{*\noindent Here `\isakeyword{case}~@{text Nil}' abbreviates
+`\isakeyword{assume}~@{text"Nil:"}~@{prop"xs = []"}' and
+`\isakeyword{case}~@{text Cons}'
+abbreviates `\isakeyword{fix}~@{text"? ??"}
+\isakeyword{assume}~@{text"Cons:"}~@{text"xs = ? # ??"}'
+where @{text"?"} and @{text"??"}
stand for variable names that have been chosen by the system.
-Therefore we cannot
-refer to them (this would lead to obscure proofs) and have not even shown
-them. Luckily, this proof is simple enough we do not need to refer to them.
+Therefore we cannot refer to them.
+Luckily, this proof is simple enough we do not need to refer to them.
However, sometimes one may have to. Hence Isar offers a simple scheme for
-naming those variables: replace the anonymous @{text Cons} by, for example,
-@{text"(Cons y ys)"}, which abbreviates \isakeyword{fix}~@{text"y ys"}
-\isakeyword{assume}~@{text"xs = Cons y ys"}, i.e.\ @{prop"xs = y # ys"}. Here
+naming those variables: replace the anonymous @{text Cons} by
+@{text"(Cons y ys)"}, which abbreviates `\isakeyword{fix}~@{text"y ys"}
+\isakeyword{assume}~@{text"Cons:"}~@{text"xs = y # ys"}'. Here
is a (somewhat contrived) example: *}
lemma "length(tl xs) = length xs - 1"
@@ -64,6 +72,10 @@
thus ?thesis by simp
qed
+text{* Note that in each \isakeyword{case} the assumption can be
+referred to inside the proof by the name of the constructor. In
+Section~\ref{sec:full-Ind} below we will come across an example
+of this. *}
subsection{*Structural induction*}
@@ -111,23 +123,21 @@
\isakeyword{thus}~@{text ?case} instead of giving the term explicitly
but we wanted to use @{term i} somewhere. *}
-subsection{*Induction formulae involving @{text"\<And>"} or @{text"\<Longrightarrow>"}*}
+subsection{*Induction formulae involving @{text"\<And>"} or @{text"\<Longrightarrow>"}\label{sec:full-Ind}*}
text{* Let us now consider the situation where the goal to be proved contains
@{text"\<And>"} or @{text"\<Longrightarrow>"}, say @{prop"\<And>x. P x \<Longrightarrow> Q x"} --- motivation and a
real example follow shortly. This means that in each case of the induction,
-@{text ?case} would be of the same form @{prop"\<And>x. P' x \<Longrightarrow> Q' x"}. Thus the
+@{text ?case} would be of the form @{prop"\<And>x. P' x \<Longrightarrow> Q' x"}. Thus the
first proof steps will be the canonical ones, fixing @{text x} and assuming
@{prop"P' x"}. To avoid this tedium, induction performs these steps
automatically: for example in case @{text"(Suc n)"}, @{text ?case} is only
-@{prop"Q' x"} whereas the assumptions (named @{term Suc}) contain both the
-usual induction hypothesis \emph{and} @{prop"P' x"}. To fix the name of the
-bound variable @{term x} you may write @{text"(Suc n x)"}, thus abbreviating
-\isakeyword{fix}~@{term x}.
-It should be clear how this example generalizes to more complex formulae.
+@{prop"Q' x"} whereas the assumptions (named @{term Suc}!) contain both the
+usual induction hypothesis \emph{and} @{prop"P' x"}.
+It should be clear how this generalizes to more complex formulae.
As a concrete example we will now prove complete induction via
-structural induction: *}
+structural induction. *}
lemma assumes A: "(\<And>n. (\<And>m. m < n \<Longrightarrow> P m) \<Longrightarrow> P n)"
shows "P(n::nat)"
@@ -136,11 +146,11 @@
proof (induct n)
case 0 thus ?case by simp
next
- case (Suc n m) -- {*@{text"?m < n \<Longrightarrow> P ?m"} and @{prop"m < Suc n"}*}
- show ?case -- {*@{term ?case}*}
+ case (Suc n) -- {*\isakeyword{fix} @{term m} \isakeyword{assume} @{text Suc}: @{text[source]"?m < n \<Longrightarrow> P ?m"} @{prop[source]"m < Suc n"}*}
+ show ?case -- {*@{term ?case}*}
proof cases
assume eq: "m = n"
- from Suc A have "P n" by blast
+ from Suc and A have "P n" by blast
with eq show "P m" by simp
next
assume "m \<noteq> n"
@@ -149,8 +159,9 @@
qed
qed
qed
-text{* \noindent Given the explanations above and the comments in
-the proof text, the proof should be quite readable.
+text{* \noindent Given the explanations above and the comments in the
+proof text (only necessary for novices), the proof should be quite
+readable.
The statement of the lemma is interesting because it deviates from the style in
the Tutorial~\cite{LNCS2283}, which suggests to introduce @{text"\<forall>"} or
@@ -162,15 +173,21 @@
subsection{*Rule induction*}
-text{* We define our own version of reflexive transitive closure of a
-relation *}
+text{* HOL also supports inductively defined sets. See \cite{LNCS2283}
+for details. As an example we define our own version of reflexive
+transitive closure of a relation --- HOL provides a predefined one as well.*}
consts rtc :: "('a \<times> 'a)set \<Rightarrow> ('a \<times> 'a)set" ("_*" [1000] 999)
inductive "r*"
intros
refl: "(x,x) \<in> r*"
step: "\<lbrakk> (x,y) \<in> r; (y,z) \<in> r* \<rbrakk> \<Longrightarrow> (x,z) \<in> r*"
-text{* \noindent and prove that @{term"r*"} is indeed transitive: *}
+text{* \noindent
+First the constant is declared as a function on binary
+relations (with concrete syntax @{term"r*"} instead of @{text"rtc
+r"}), then the defining clauses are given. We will now prove that
+@{term"r*"} is indeed transitive: *}
+
lemma assumes A: "(x,y) \<in> r*" shows "(y,z) \<in> r* \<Longrightarrow> (x,z) \<in> r*"
using A
proof induct
@@ -208,22 +225,37 @@
?thesis} but @{text"B \<Longrightarrow> ?thesis"}, just as in the previous proof. The
base case is trivial. In the assumptions for the induction step we can
see very clearly how things fit together and permit ourselves the
-obvious forward step @{text"IH[OF B]"}. *}
+obvious forward step @{text"IH[OF B]"}.
+
+The notation \isakeyword{case}~\isa{(constructor} \emph{vars}\isa{)}
+is also supported for inductive definitions. The constructor is (the
+names of) the rule and the \emph{vars} fix the free variables in the
+rule; the order of the \emph{vars} must correspond to the
+\emph{alphabetical order} of the variables as they appear in the rule.
+For example, we could start the above detailed proof of the induction
+with \isakeyword{case}~\isa{(step x' x y)}. However, we can then only
+refer to the assumptions named \isa{step} collectively and not
+individually, as the above proof requires. *}
subsection{*More induction*}
-text{* We close the section by demonstrating how arbitrary induction rules
-are applied. As a simple example we have chosen recursion induction, i.e.\
-induction based on a recursive function definition. The example is an unusual
-definition of rotation: *}
+text{* We close the section by demonstrating how arbitrary induction
+rules are applied. As a simple example we have chosen recursion
+induction, i.e.\ induction based on a recursive function
+definition. However, most of what we show works for induction in
+general.
+
+The example is an unusual definition of rotation: *}
consts rot :: "'a list \<Rightarrow> 'a list"
recdef rot "measure length"
"rot [] = []"
"rot [x] = [x]"
"rot (x#y#zs) = y # rot(x#zs)"
+text{*\noindent This yields, among other things, the induction rule
+@{thm[source]rot.induct}: @{thm[display]rot.induct[no_vars]}
-text{* In the first proof we set up each case explicitly, merely using
+In the first proof we set up each case explicitly, merely using
pattern matching to abbreviate the statement: *}
lemma "length(rot xs) = length xs" (is "?P xs")
@@ -235,10 +267,7 @@
fix x y zs assume "?P (x#zs)"
thus "?P (x#y#zs)" by simp
qed
-text{*\noindent
-This proof skeleton works for arbitrary induction rules, not just
-@{thm[source]rot.induct}.
-
+text{*
In the following proof we rely on a default naming scheme for cases: they are
called 1, 2, etc, unless they have been named explicitly. The latter happens
only with datatypes and inductively defined sets, but not with recursive
@@ -250,13 +279,29 @@
next
case 2 show ?case by simp
next
- case 3 thus ?case by simp
+ case (3 a b cs)
+ have "rot (a # b # cs) = b # rot(a # cs)" by simp
+ also have "\<dots> = b # tl(a # cs) @ [hd(a # cs)]" by(simp add:3)
+ also have "\<dots> = tl (a # b # cs) @ [hd (a # b # cs)]" by simp
+ finally show ?case .
qed
-text{*\noindent Why can case 2 get away with \isakeyword{show} instead of
-\isakeyword{thus}?
+text{*\noindent Why can case 2 get away with \isakeyword{show} instead
+of \isakeyword{thus}?
+
+The third case is only shown in gory detail (see \cite{BauerW-TPHOLs01}
+for how to reason with chains of equations) to demonstrate that the
+\isakeyword{case}~\isa{(constructor} \emph{vars}\isa{)} notation also
+works for arbitrary induction theorems with numbered cases. The order
+of the \emph{vars} corresponds to the order of the
+@{text"\<And>"}-quantified variables in each case of the induction
+theorem. For induction theorems produced by \isakeyword{recdef} it is
+the order in which the variables appear on the left-hand side of the
+equation.
Of course both proofs are so simple that they can be condensed to*}
(*<*)lemma "xs \<noteq> [] \<Longrightarrow> rot xs = tl xs @ [hd xs]"(*>*)
by (induct xs rule: rot.induct, simp_all)
+
+
(*<*)end(*>*)
--- a/doc-src/TutorialI/IsarOverview/Isar/Logic.thy Tue Oct 01 15:03:28 2002 +0200
+++ b/doc-src/TutorialI/IsarOverview/Isar/Logic.thy Tue Oct 01 20:54:17 2002 +0200
@@ -16,7 +16,8 @@
text{*\noindent
The operational reading: the \isakeyword{assume}-\isakeyword{show} block
proves @{prop"A \<Longrightarrow> A"},
-which rule @{thm[source]impI} turns into the desired @{prop"A \<longrightarrow> A"}.
+which rule @{thm[source]impI} (@{thm impI})
+turns into the desired @{prop"A \<longrightarrow> A"}.
However, this text is much too detailed for comfort. Therefore Isar
implements the following principle:
\begin{quote}\em
@@ -49,7 +50,8 @@
assume "A"
show "A \<and> A" by(rule conjI)
qed
-text{*\noindent A drawback of these implicit proofs by assumption is that it
+text{*\noindent Rule @{thm[source]conjI} is of course @{thm conjI}.
+A drawback of these implicit proofs by assumption is that it
is no longer obvious where an assumption is used.
Proofs of the form \isakeyword{by}@{text"(rule"}~\emph{name}@{text")"} can be
@@ -70,8 +72,8 @@
subsubsection{*Elimination rules*}
text{*A typical elimination rule is @{thm[source]conjE}, $\land$-elimination:
-@{thm[display,indent=5]conjE[no_vars]} In the following proof it is applied
-by hand, after its first (``\emph{major}'') premise has been eliminated via
+@{thm[display,indent=5]conjE} In the following proof it is applied
+by hand, after its first (\emph{major}) premise has been eliminated via
@{text"[OF AB]"}: *}
lemma "A \<and> B \<longrightarrow> B \<and> A"
proof
@@ -306,11 +308,12 @@
text{*\noindent Command \isakeyword{using} can appear before a proof
and adds further facts to those piped into the proof. Here @{text AB}
is the only such fact and it triggers $\land$-elimination. Another
-frequent usage is as follows:
+frequent idiom is as follows:
\begin{center}
-\isakeyword{from} \emph{important facts}
-\isakeyword{show} \emph{something}
-\isakeyword{using} \emph{minor facts}
+\isakeyword{from} \emph{major facts}~
+\isakeyword{show} \emph{proposition}~
+\isakeyword{using} \emph{minor facts}~
+\emph{proof}
\end{center}
\medskip
@@ -458,8 +461,74 @@
by best
text{*\noindent Of course this only works in the context of HOL's carefully
constructed introduction and elimination rules for set theory.
+*}
-Finally, whole ``scripts'' (unstructured proofs in the style of
+subsection{*Further refinements*}
+
+text{* Thus subsection discusses some further tricks that can make
+life easier although they are not essential. We start with some small
+syntactic items.*}
+
+subsubsection{*\isakeyword{and}*}
+
+text{* Propositions (following \isakeyword{assume} etc) may but need not be
+separated by \isakeyword{and}. This is not just for readability
+(\isakeyword{from} \isa{A} \isakeyword{and} \isa{B} looks nicer than
+\isakeyword{from} \isa{A} \isa{B}) but for structuring lists of propositions
+into possibly named blocks. For example in
+\begin{center}
+\isakeyword{assume} \isa{A:} $A_1$ $A_2$ \isakeyword{and} \isa{B:} $A_3$
+\isakeyword{and} $A_4$
+\end{center}
+label \isa{A} refers to the list of propositions $A_1$ $A_2$ and
+label \isa{B} to $A_3$. *}
+
+subsubsection{*\isakeyword{fixes}*}
+
+text{* Sometimes it is necessary to decorate a proposition with type
+constraints, as in Cantor's theorem above. These type constraints tend
+to make the theorem less readable. The situation can be improved a
+little by combining the type constraint with an outer @{text"\<And>"}: *}
+
+theorem "\<And>f :: 'a \<Rightarrow> 'a set. \<exists>S. S \<notin> range f"
+(*<*)oops(*>*)
+text{*\noindent However, now @{term f} is bound and we need a
+\isakeyword{fix}~\isa{f} in the proof before we can refer to @{term f}.
+This is avoided by \isakeyword{fixes}: *}
+
+theorem fixes f :: "'a \<Rightarrow> 'a set" shows "\<exists>S. S \<notin> range f"
+(*<*)oops(*>*)
+text{* \noindent
+But the real strength of \isakeyword{fixes} lies in the possibility to
+introduce concrete syntax locally:*}
+
+lemma comm_mono:
+ fixes r :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix ">" 60) and
+ f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "++" 70)
+ assumes comm: "\<And>x y::'a. x ++ y = y ++ x" and
+ mono: "\<And>x y z::'a. x > y \<Longrightarrow> x ++ z > y ++ z"
+ shows "x > y \<Longrightarrow> z ++ x > z ++ y"
+by(simp add: comm mono)
+
+text{*\noindent The concrete syntax is dropped at the end of the proof and the
+theorem becomes @{thm[display,margin=50]comm_mono} *}
+
+subsubsection{*\isakeyword{obtain}*}
+
+text{* The \isakeyword{obtain} construct can introduce multiple
+witnesses and propositions as in the following proof fragment:*}
+
+lemma assumes A: "\<exists>x y. P x y \<and> Q x y" shows "R"
+proof -
+ from A obtain x y where P: "P x y" and Q: "Q x y" by blast
+(*<*)oops(*>*)
+text{* Remember also that one does not even need to start with a formula
+containing @{text"\<exists>"} as we saw in the proof of Cantor's theorem.
+*}
+
+subsubsection{*Combining proof styles*}
+
+text{* Finally, whole ``scripts'' (tactic-based proofs in the style of
\cite{LNCS2283}) may appear in the leaves of the proof tree, although this is
best avoided. Here is a contrived example: *}
@@ -473,7 +542,12 @@
apply assumption
done
qed
-text{*\noindent You may need to resort to this technique if an automatic step
-fails to prove the desired proposition. *}
+
+text{*\noindent You may need to resort to this technique if an
+automatic step fails to prove the desired proposition.
+
+When converting a proof from tactic-style into Isar you can proceeed
+in a top-down manner: parts of the proof can be left in script form
+while to outer structure is already expressed in Isar. *}
(*<*)end(*>*)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/TutorialI/IsarOverview/Isar/document/intro.tex Tue Oct 01 20:54:17 2002 +0200
@@ -0,0 +1,154 @@
+\section{Introduction}
+
+Isar is an extension of Isabelle with structured proofs in a stylized
+language of mathematics. These proofs are readable for both a human
+and a machine. This document is a very compact introduction to
+structured proofs in Isar/HOL, an extension of
+Isabelle/HOL~\cite{LNCS2283}. We intentionally do not present the full
+language but concentrate on the essentials. Neither do we give a
+formal semantics of Isar. Instead we introduce Isar by example. Again
+this is intentional: we believe that the language ``speaks for
+itself'' in the same way that traditional mathamtical proofs do, which
+are also introduced by example rather than by teaching students logic
+first. A detailed exposition of Isar can be found in Markus Wenzel's
+PhD thesis~\cite{Wenzel-PhD} and the Isar reference
+manual~\cite{Isar-Ref-Man}.
+
+\subsection{Background}
+
+Interactive theorem proving has been dominated by a model of proof
+that goes back to the LCF system~\cite{LCF}: a proof is a more or less
+structured sequence of commands that manipulate an implicit proof
+state. Thus the proof text is only suitable for the machine; for a
+human, the proof only comes alive when he can see the state changes
+caused by the stepwise execution of the commands. Such proofs are like
+uncommented assembly language programs. We call them
+\emph{tactic-style} proofs because LCF proof commands were called
+tactics.
+
+A radically different approach was taken by the Mizar
+system
+%~\cite{Mizar}
+where proofs are written a stylized language akin to that used in
+ordinary mathematics texts. The most important argument in favour of a
+mathematics-like proof language is communication: as soon as not just
+the theorem but also the proof becomes an object of interest, it
+should be readable. From a system development point of view there is
+a second important argument against tactic-style proofs: they are much
+harder to maintain when the system is modified. The reason is as
+follows. Since it is usually quite unclear what exactly is proved at
+some point in the middle of a command sequence, updating a failed
+proof often requires executing the proof up to the point of failure
+using the old version of the system. In contrast, mathematics-like
+proofs contain enough information to tell you what is proved at any
+point.
+
+For these reasons the Isabelle system, originally firmly in the
+LCF-tradition, was extended with a language for writing structured
+proofs in a mathematics-like style. As the name already indicates,
+Isar was certainly inspired by Mizar. However, there are very many
+differences. For a start, Isar is generic: only a few of the language
+constructs described below are specific to HOL; many are generic and
+thus available for any logic defined in Isabelle, e.g.\ ZF.
+Furthermore, we have Isabelle's powerful automatic proof procedures at
+our disposal. A closer comparison of Isar and Mizar can be found
+elsewhere~\cite{WenzelW-JAR}.
+
+Finally, a word of warning for potential writers of Isar proofs. It
+has always been easier to write obscure rather than readable texts.
+Similarly, tactic-style proofs are often (though by no means always!)
+easier to write than readable ones: structure does not emerge
+automatically but needs to be understood and imposed. If the precise
+structure of the proof is not clear from the beginning, it can be
+useful to start in a tactic-based style for exploratory purposes until
+one has found a proof which can then be converted into a structured
+text in a second step. Top down conversion is possible because Isar
+allows tactic-style proofs as components of structured ones.
+
+\subsection{A first glimpse of Isar}
+
+Below you find a simplified grammar for Isar proofs.
+Parentheses are used for grouping and $^?$ indicates an optional item:
+\begin{center}
+\begin{tabular}{lrl}
+\emph{proof} & ::= & \isakeyword{proof} \emph{method}$^?$
+ \emph{statement}*
+ \isakeyword{qed} \\
+ &$\mid$& \isakeyword{by} \emph{method}\\[1ex]
+\emph{statement} &::= & \isakeyword{fix} \emph{variables} \\
+ &$\mid$& \isakeyword{assume} \emph{propositions} \\
+ &$\mid$& (\isakeyword{from} \emph{facts})$^?$
+ (\isakeyword{show} $\mid$ \isakeyword{have})
+ \emph{propositions} \emph{proof} \\[1ex]
+\emph{proposition} &::=& (\emph{label}{\bf:})$^?$ \emph{string} \\[1ex]
+\emph{fact} &::=& \emph{label}
+\end{tabular}
+\end{center}
+A proof can be either compound (\isakeyword{proof} --
+\isakeyword{qed}) or atomic (\isakeyword{by}). A \emph{method} is a
+proof method offered by the underlying theorem prover, for example
+\isa{rule} or \isa{simp} in Isabelle. Thus this grammar is
+generic both w.r.t.\ the logic and the theorem prover.
+
+This is a typical proof skeleton:
+\begin{center}
+\begin{tabular}{@{}ll}
+\isakeyword{proof}\\
+\hspace*{3ex}\isakeyword{assume} \isa{"}\emph{the-assm}\isa{"}\\
+\hspace*{3ex}\isakeyword{have} \isa{"}\dots\isa{"} & --- intermediate result\\
+\hspace*{3ex}\vdots\\
+\hspace*{3ex}\isakeyword{have} \isa{"}\dots\isa{"} & --- intermediate result\\
+\hspace*{3ex}\isakeyword{show} \isa{"}\emph{the-concl}\isa{"}\\
+\isakeyword{qed}
+\end{tabular}
+\end{center}
+It proves \emph{the-assm}~$\Longrightarrow$~\emph{the-concl}. Text starting with
+``---'' is a comment. The intermediate \isakeyword{have}s are only
+there to bridge the gap between the assumption and the conclusion and
+do not contribute to the theorem being proved. In contrast,
+\isakeyword{show} establishes the conclusion of the theorem.
+
+\subsection{Bits of Isabelle}
+
+In order to make this paper self-contained we recall some basic
+notions and notation from Isabelle~\cite{LNCS2283}. Isabelle's
+meta-logic offers an implication $\Longrightarrow$ and a universal quantifier $\bigwedge$
+for expressing inference rules and generality. Iterated implications
+$A_1 \Longrightarrow \dots A_n \Longrightarrow A$ may be abbreviated to $[\!| A_1; \dots; A_n
+|\!] \Longrightarrow A$. Applying a theorem $A \Longrightarrow B$ (named \isa{T}) to a theorem
+$A$ (named \isa{U}) is written \isa{T[OF U]} and yields theorem $B$.
+
+Isabelle terms are simply typed. Function types are
+written $\tau_1 \Rightarrow \tau_2$.
+
+Free variables that may be instantiated (``logical variables'' in
+Prolog parlance) are prefixed with a \isa{?}. Typically, theorems are
+stated with ordinary free variables but after the proof those are
+replaced by \isa{?}-variables. Thus the theorem can be used with
+arbitrary instances of its free variables.
+
+Isabelle/HOL offers all the usual logical symbols like $\longrightarrow$, $\land$,
+$\forall$ etc. Proof methods include \isa{rule} (which performs a backwards
+step with a given rule, unifying the conclusion of the rule with the
+current subgoal and replacing the subgoal by the premises of the
+rule), \isa{simp} (for simplification) and \isa{blast} (for predicate
+calculus reasoning).
+
+\subsection{Overview of the paper}
+
+The rest of the paper is divided into two parts.
+Section~\ref{sec:Logic} introduces proofs in pure logic based on
+natural deduction. Section~\ref{sec:Induct} is dedicated to induction,
+the key reasoning principle for computer science applications.
+
+There are at least two further areas where Isar provides specific
+support, but which we do not document here: reasoning by chains of
+(in)equations is described elsewhere~\cite{BauerW-TPHOLs01}, whereas
+reasoning about axiomatically defined structures by means of so called
+``locales'' \cite{KWP-TPHOLs99} is only described in a very early
+form and has evolved much since then.
+
+Do not be mislead by the simplicity of the formulae being proved,
+especially in the beginning. Isar has been used very successfully in
+large applications, for example the formalization of Java, in
+particular the verification of the bytecode verifier~\cite{KleinN-TCS}.
--- a/doc-src/TutorialI/IsarOverview/Isar/document/root.bib Tue Oct 01 15:03:28 2002 +0200
+++ b/doc-src/TutorialI/IsarOverview/Isar/document/root.bib Tue Oct 01 20:54:17 2002 +0200
@@ -1,10 +1,24 @@
@string{LNCS="Lect.\ Notes in Comp.\ Sci."}
@string{Springer="Springer-Verlag"}
+@InProceedings{BauerW-TPHOLs01,
+author={Gertrud Bauer and Markus Wenzel},
+title={Calculational Reasoning Revisited --- an {Isabelle/Isar} Experience},
+booktitle={Theorem Proving in Higher Order Logics, TPHOLs 2001},
+editor={R. Boulton and P. Jackson},
+year=2001,publisher=Springer,series=LNCS,volume=2152}
+
@book{LCF,author="M.C.J. Gordon and Robin Milner and C.P. Wadsworth",
title="Edinburgh {LCF}: a Mechanised Logic of Computation",
publisher=Springer,series=LNCS,volume=78,year=1979}
+@InProceedings{KWP-TPHOLs99,
+author={Florian Kamm{\"u}ller and Markus Wenzel and Lawrence C. Paulson},
+title={Locales: A Sectioning Concept for {Isabelle}},
+booktitle={Theorem Proving in Higher Order Logics, TPHOLs'99},
+editor={Y. Bertot and G. Dowek and A. Hirschowitz and C. Paulin and L. Thery},
+year=1999,publisher=Springer,series=LNCS,volume=1690,pages="149--165"}
+
@book{LNCS2283,author={Tobias Nipkow and Lawrence Paulson and Markus Wenzel},
title="Isabelle/HOL --- A Proof Assistant for Higher-Order Logic",
publisher=Springer,series=LNCS,volume=2283,year=2002,
@@ -20,3 +34,7 @@
school={Institut f{\"u}r Informatik, Technische Universit{\"a}t M{\"u}nchen},
year=2002,
note={\url{http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2002/wenzel.html}}}
+
+@unpublished{WenzelW-JAR,author={Markus Wenzel and Freek Wiedijk},
+title={A comparison of the mathematical proof languages {Mizar} and {Isar}},
+note={Submitted for publication},year=2002}
--- a/doc-src/TutorialI/IsarOverview/Isar/document/root.tex Tue Oct 01 15:03:28 2002 +0200
+++ b/doc-src/TutorialI/IsarOverview/Isar/document/root.tex Tue Oct 01 20:54:17 2002 +0200
@@ -24,146 +24,7 @@
behind the language and its constructs.
\end{abstract}
-\section{Introduction}
-
-Isar is an extension of Isabelle with structured proofs in a stylized
-language of mathematics. These proofs are readable for both a human
-and a machine. This document is a very compact introduction to
-structured proofs in Isar/HOL, an extension of
-Isabelle/HOL~\cite{LNCS2283}. We intentionally do not present the full
-language but concentrate on the essentials. Neither do we give a
-formal semantics of Isar. Instead we introduce Isar by example. Again
-this is intentional: we believe that the language ``speaks for
-itself'' in the same way that traditional mathamtical proofs do, which
-are also introduced by example rather than by teaching students logic
-first. A detailed exposition of Isar can be found in Markus Wenzel's
-PhD thesis~\cite{Wenzel-PhD} and the Isar reference
-manual~\cite{Isar-Ref-Man}.
-
-\subsection{Background}
-
-Interactive theorem proving has been dominated by a model of proof
-that goes back to the LCF system~\cite{LCF}: a proof is a more or less
-structured sequence of commands that manipulate an implicit proof
-state. Thus the proof proof text is only suitable for the machine; for
-a human, the proof only comes alive when he can see the state changes
-caused by the stepwise execution of the commands. Such a proof is like
-an uncommented assembly language program. We call them
-\emph{tactic-style} proofs because LCF proof-commands were called
-tactics.
-
-A radically different approach was taken by the Mizar
-system~\cite{Mizar} where proofs are written a stylized language akin
-to that used in ordinary mathematics texts. The most important
-argument in favour of a mathematics-like proof language is
-communication: as soon as not just the theorem but also the proof
-becomes an object of interest, it should be readable as it is. From a
-system development point of view there is a second important argument
-against tactic-style proofs: they are much harder to maintain when the
-system is modified. The reason is as follows. Since it is usually
-quite unclear what exactly is proved at some point in the middle of a
-command sequence, updating a failed proof often requires executing the
-proof up to the point of failure using the old version of the system.
-In contrast, mathematics-like proofs contain enough information
-to tell you what is proved at any point.
-
-For these reasons the Isabelle system, originally firmly in the
-LCF-tradition, was extended with a language for writing structured
-proofs in a mathematics-like style. As the name already indicates,
-Isar was certainly inspired by Mizar. However, there are very many
-differences. For a start, Isar is generic: only a few of the language
-constructs described below are specific to HOL; many are generic and
-thus available for any logic defined in Isabelle, e.g.\ ZF.
-Furthermore, we have Isabelle's powerful automatic proof procedures
-(e.g.\ simplification and a tableau-style prover) at our disposal.
-A closer comparison of Isar and Mizar can be found
-elsewhere~\cite{Isar-Mizar}.
-
-Finally, a word of warning for potential writers of structured Isar
-proofs. It has always been easier to write obscure rather than
-readable texts. Similarly, tactic-based proofs are often (though by no
-means always!) easier to write than readable ones: structure does not
-emerge automatically but needs to be understood and imposed. If the
-precise structure of the proof is not clear from the beginning, it can
-be useful to start in a tactic-based style for exploratory purposes
-until one has found a proof which can then be converted into a
-structured text in a second step. Top down conversion is possible
-because Isar allows tactic-based proofs as components of structured
-ones.
-
-\subsection{An overview of the Isar syntax}
-
-We begin by looking at a simplified grammar for Isar proofs
-where parentheses are used for grouping and $^?$ indicates an optional item:
-\begin{center}
-\begin{tabular}{lrl}
-\emph{proof} & ::= & \isakeyword{proof} \emph{method}$^?$
- \emph{statement}*
- \isakeyword{qed} \\
- &$\mid$& \isakeyword{by} \emph{method}\\[1ex]
-\emph{statement} &::= & \isakeyword{fix} \emph{variables} \\
- &$\mid$& \isakeyword{assume} \emph{propositions} \\
- &$\mid$& (\isakeyword{from} \emph{facts})$^?$
- (\isakeyword{show} $\mid$ \isakeyword{have})
- \emph{propositions} \emph{proof} \\[1ex]
-\emph{proposition} &::=& (\emph{label}{\bf:})$^?$ \emph{string} \\[1ex]
-\emph{fact} &::=& \emph{label}
-\end{tabular}
-\end{center}
-A proof can be either compound (\isakeyword{proof} --
-\isakeyword{qed}) or atomic (\isakeyword{by}). A \emph{method} is a
-proof method offered by the underlying theorem prover, for example
-\isa{rule} or \isa{simp} in Isabelle. Thus this grammar is
-generic both w.r.t.\ the logic and the theorem prover.
-
-This is a typical proof skeleton:
-\begin{center}
-\begin{tabular}{@{}ll}
-\isakeyword{proof}\\
-\hspace*{3ex}\isakeyword{assume} \isa{"}\emph{the-assm}\isa{"}\\
-\hspace*{3ex}\isakeyword{have} \isa{"}\dots\isa{"} & --- intermediate result\\
-\hspace*{3ex}\vdots\\
-\hspace*{3ex}\isakeyword{have} \isa{"}\dots\isa{"} & --- intermediate result\\
-\hspace*{3ex}\isakeyword{show} \isa{"}\emph{the-concl}\isa{"}\\
-\isakeyword{qed}
-\end{tabular}
-\end{center}
-It proves \emph{the-assm}~$\Longrightarrow$~\emph{the-concl}. Text starting with
-``---'' is a comment. The intermediate \isakeyword{have}s are only
-there to bridge the gap between the assumption and the conclusion and
-do not contribute to the theorem being proved. In contrast,
-\isakeyword{show} establishes the conclusion of the theorem.
-
-As a final bit of syntax note that \emph{propositions} (following
-\isakeyword{assume} etc) may but need not be separated by
-\isakeyword{and}, whose purpose is to structure them into possibly
-named blocks. For example in
-\begin{center}
-\isakeyword{assume} \isa{A:} $A_1$ $A_2$ \isakeyword{and} \isa{B:} $A_3$
-\isakeyword{and} $A_4$
-\end{center}
-label \isa{A} refers to the list of propositions $A_1$ $A_2$ and
-label \isa{B} to $A_3$.
-
-\subsection{Overview of the paper}
-
-The rest of the paper is divided into two parts.
-Section~\ref{sec:Logic} introduces proofs in pure logic based on
-natural deduction. Section~\ref{sec:Induct} is dedicated to induction,
-the key reasoning principle for computer science applications.
-
-There are at least two further areas where Isar provides specific
-support, but which we do not document here: reasoning by chains of
-(in)equations is described elsewhere~\cite{BauerW-TPHOL}, whereas
-reasoning about axiomatically defined structures by means of so called
-``locales'' \cite{BallarinPW-TPHOL} is only described in a very early
-form and has evolved much since then.
-
-Do not be mislead by the simplicity of the formulae being proved,
-especially in the beginning. Isar has been used very successfully in
-large applications, for example the formalization of Java, in
-particular the verification of the bytecode verifier~\cite{KleinN-TCS}.
-
+\input{intro.tex}
\input{Logic.tex}
\input{Induction.tex}