# HG changeset patch # User wenzelm # Date 1346093410 -7200 # Node ID fbf60999dc3181259107fea244a2391fe1615c73 # Parent f0d87c6b7a2e1d9ab53e0457eb190b2907f1024c more standard document preparation within session context; diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/advanced.tex --- a/doc-src/Intro/advanced.tex Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1249 +0,0 @@ -\part{Advanced Methods} -Before continuing, it might be wise to try some of your own examples in -Isabelle, reinforcing your knowledge of the basic functions. - -Look through {\em Isabelle's Object-Logics\/} and try proving some -simple theorems. You probably should begin with first-order logic -(\texttt{FOL} or~\texttt{LK}). Try working some of the examples provided, -and others from the literature. Set theory~(\texttt{ZF}) and -Constructive Type Theory~(\texttt{CTT}) form a richer world for -mathematical reasoning and, again, many examples are in the -literature. Higher-order logic~(\texttt{HOL}) is Isabelle's most -elaborate logic. Its types and functions are identified with those of -the meta-logic. - -Choose a logic that you already understand. Isabelle is a proof -tool, not a teaching tool; if you do not know how to do a particular proof -on paper, then you certainly will not be able to do it on the machine. -Even experienced users plan large proofs on paper. - -We have covered only the bare essentials of Isabelle, but enough to perform -substantial proofs. By occasionally dipping into the {\em Reference -Manual}, you can learn additional tactics, subgoal commands and tacticals. - - -\section{Deriving rules in Isabelle} -\index{rules!derived} -A mathematical development goes through a progression of stages. Each -stage defines some concepts and derives rules about them. We shall see how -to derive rules, perhaps involving definitions, using Isabelle. The -following section will explain how to declare types, constants, rules and -definitions. - - -\subsection{Deriving a rule using tactics and meta-level assumptions} -\label{deriving-example} -\index{examples!of deriving rules}\index{assumptions!of main goal} - -The subgoal module supports the derivation of rules, as discussed in -\S\ref{deriving}. When the \ttindex{Goal} command is supplied a formula of -the form $\List{\theta@1; \ldots; \theta@k} \Imp \phi$, there are two -possibilities: -\begin{itemize} -\item If all of the premises $\theta@1$, \ldots, $\theta@k$ are simple - formulae{} (they do not involve the meta-connectives $\Forall$ or - $\Imp$) then the command sets the goal to be - $\List{\theta@1; \ldots; \theta@k} \Imp \phi$ and returns the empty list. -\item If one or more premises involves the meta-connectives $\Forall$ or - $\Imp$, then the command sets the goal to be $\phi$ and returns a list - consisting of the theorems ${\theta@i\;[\theta@i]}$, for $i=1$, \ldots,~$k$. - These meta-level assumptions are also recorded internally, allowing - \texttt{result} (which is called by \texttt{qed}) to discharge them in the - original order. -\end{itemize} -Rules that discharge assumptions or introduce eigenvariables have complex -premises, and the second case applies. In this section, many of the -theorems are subject to meta-level assumptions, so we make them visible by by setting the -\ttindex{show_hyps} flag: -\begin{ttbox} -set show_hyps; -{\out val it = true : bool} -\end{ttbox} - -Now, we are ready to derive $\conj$ elimination. Until now, calling \texttt{Goal} has -returned an empty list, which we have ignored. In this example, the list -contains the two premises of the rule, since one of them involves the $\Imp$ -connective. We bind them to the \ML\ identifiers \texttt{major} and {\tt - minor}:\footnote{Some ML compilers will print a message such as {\em binding - not exhaustive}. This warns that \texttt{Goal} must return a 2-element - list. Otherwise, the pattern-match will fail; ML will raise exception - \xdx{Match}.} -\begin{ttbox} -val [major,minor] = Goal - "[| P&Q; [| P; Q |] ==> R |] ==> R"; -{\out Level 0} -{\out R} -{\out 1. R} -{\out val major = "P & Q [P & Q]" : thm} -{\out val minor = "[| P; Q |] ==> R [[| P; Q |] ==> R]" : thm} -\end{ttbox} -Look at the minor premise, recalling that meta-level assumptions are -shown in brackets. Using \texttt{minor}, we reduce $R$ to the subgoals -$P$ and~$Q$: -\begin{ttbox} -by (resolve_tac [minor] 1); -{\out Level 1} -{\out R} -{\out 1. P} -{\out 2. Q} -\end{ttbox} -Deviating from~\S\ref{deriving}, we apply $({\conj}E1)$ forwards from the -assumption $P\conj Q$ to obtain the theorem~$P\;[P\conj Q]$. -\begin{ttbox} -major RS conjunct1; -{\out val it = "P [P & Q]" : thm} -\ttbreak -by (resolve_tac [major RS conjunct1] 1); -{\out Level 2} -{\out R} -{\out 1. Q} -\end{ttbox} -Similarly, we solve the subgoal involving~$Q$. -\begin{ttbox} -major RS conjunct2; -{\out val it = "Q [P & Q]" : thm} -by (resolve_tac [major RS conjunct2] 1); -{\out Level 3} -{\out R} -{\out No subgoals!} -\end{ttbox} -Calling \ttindex{topthm} returns the current proof state as a theorem. -Note that it contains assumptions. Calling \ttindex{qed} discharges -the assumptions --- both occurrences of $P\conj Q$ are discharged as -one --- and makes the variables schematic. -\begin{ttbox} -topthm(); -{\out val it = "R [P & Q, P & Q, [| P; Q |] ==> R]" : thm} -qed "conjE"; -{\out val conjE = "[| ?P & ?Q; [| ?P; ?Q |] ==> ?R |] ==> ?R" : thm} -\end{ttbox} - - -\subsection{Definitions and derived rules} \label{definitions} -\index{rules!derived}\index{definitions!and derived rules|(} - -Definitions are expressed as meta-level equalities. Let us define negation -and the if-and-only-if connective: -\begin{eqnarray*} - \neg \Var{P} & \equiv & \Var{P}\imp\bot \\ - \Var{P}\bimp \Var{Q} & \equiv & - (\Var{P}\imp \Var{Q}) \conj (\Var{Q}\imp \Var{P}) -\end{eqnarray*} -\index{meta-rewriting}% -Isabelle permits {\bf meta-level rewriting} using definitions such as -these. {\bf Unfolding} replaces every instance -of $\neg \Var{P}$ by the corresponding instance of ${\Var{P}\imp\bot}$. For -example, $\forall x.\neg (P(x)\conj \neg R(x,0))$ unfolds to -\[ \forall x.(P(x)\conj R(x,0)\imp\bot)\imp\bot. \] -{\bf Folding} a definition replaces occurrences of the right-hand side by -the left. The occurrences need not be free in the entire formula. - -When you define new concepts, you should derive rules asserting their -abstract properties, and then forget their definitions. This supports -modularity: if you later change the definitions without affecting their -abstract properties, then most of your proofs will carry through without -change. Indiscriminate unfolding makes a subgoal grow exponentially, -becoming unreadable. - -Taking this point of view, Isabelle does not unfold definitions -automatically during proofs. Rewriting must be explicit and selective. -Isabelle provides tactics and meta-rules for rewriting, and a version of -the \texttt{Goal} command that unfolds the conclusion and premises of the rule -being derived. - -For example, the intuitionistic definition of negation given above may seem -peculiar. Using Isabelle, we shall derive pleasanter negation rules: -\[ \infer[({\neg}I)]{\neg P}{\infer*{\bot}{[P]}} \qquad - \infer[({\neg}E)]{Q}{\neg P & P} \] -This requires proving the following meta-formulae: -$$ (P\Imp\bot) \Imp \neg P \eqno(\neg I) $$ -$$ \List{\neg P; P} \Imp Q. \eqno(\neg E) $$ - - -\subsection{Deriving the $\neg$ introduction rule} -To derive $(\neg I)$, we may call \texttt{Goal} with the appropriate formula. -Again, the rule's premises involve a meta-connective, and \texttt{Goal} -returns one-element list. We bind this list to the \ML\ identifier \texttt{prems}. -\begin{ttbox} -val prems = Goal "(P ==> False) ==> ~P"; -{\out Level 0} -{\out ~P} -{\out 1. ~P} -{\out val prems = ["P ==> False [P ==> False]"] : thm list} -\end{ttbox} -Calling \ttindex{rewrite_goals_tac} with \tdx{not_def}, which is the -definition of negation, unfolds that definition in the subgoals. It leaves -the main goal alone. -\begin{ttbox} -not_def; -{\out val it = "~?P == ?P --> False" : thm} -by (rewrite_goals_tac [not_def]); -{\out Level 1} -{\out ~P} -{\out 1. P --> False} -\end{ttbox} -Using \tdx{impI} and the premise, we reduce subgoal~1 to a triviality: -\begin{ttbox} -by (resolve_tac [impI] 1); -{\out Level 2} -{\out ~P} -{\out 1. P ==> False} -\ttbreak -by (resolve_tac prems 1); -{\out Level 3} -{\out ~P} -{\out 1. P ==> P} -\end{ttbox} -The rest of the proof is routine. Note the form of the final result. -\begin{ttbox} -by (assume_tac 1); -{\out Level 4} -{\out ~P} -{\out No subgoals!} -\ttbreak -qed "notI"; -{\out val notI = "(?P ==> False) ==> ~?P" : thm} -\end{ttbox} -\indexbold{*notI theorem} - -There is a simpler way of conducting this proof. The \ttindex{Goalw} -command starts a backward proof, as does \texttt{Goal}, but it also -unfolds definitions. Thus there is no need to call -\ttindex{rewrite_goals_tac}: -\begin{ttbox} -val prems = Goalw [not_def] - "(P ==> False) ==> ~P"; -{\out Level 0} -{\out ~P} -{\out 1. P --> False} -{\out val prems = ["P ==> False [P ==> False]"] : thm list} -\end{ttbox} - - -\subsection{Deriving the $\neg$ elimination rule} -Let us derive the rule $(\neg E)$. The proof follows that of~\texttt{conjE} -above, with an additional step to unfold negation in the major premise. -The \texttt{Goalw} command is best for this: it unfolds definitions not only -in the conclusion but the premises. -\begin{ttbox} -Goalw [not_def] "[| ~P; P |] ==> R"; -{\out Level 0} -{\out [| ~ P; P |] ==> R} -{\out 1. [| P --> False; P |] ==> R} -\end{ttbox} -As the first step, we apply \tdx{FalseE}: -\begin{ttbox} -by (resolve_tac [FalseE] 1); -{\out Level 1} -{\out [| ~ P; P |] ==> R} -{\out 1. [| P --> False; P |] ==> False} -\end{ttbox} -% -Everything follows from falsity. And we can prove falsity using the -premises and Modus Ponens: -\begin{ttbox} -by (eresolve_tac [mp] 1); -{\out Level 2} -{\out [| ~ P; P |] ==> R} -{\out 1. P ==> P} -\ttbreak -by (assume_tac 1); -{\out Level 3} -{\out [| ~ P; P |] ==> R} -{\out No subgoals!} -\ttbreak -qed "notE"; -{\out val notE = "[| ~?P; ?P |] ==> ?R" : thm} -\end{ttbox} - - -\medskip -\texttt{Goalw} unfolds definitions in the premises even when it has to return -them as a list. Another way of unfolding definitions in a theorem is by -applying the function \ttindex{rewrite_rule}. - -\index{definitions!and derived rules|)} - - -\section{Defining theories}\label{sec:defining-theories} -\index{theories!defining|(} - -Isabelle makes no distinction between simple extensions of a logic --- -like specifying a type~$bool$ with constants~$true$ and~$false$ --- -and defining an entire logic. A theory definition has a form like -\begin{ttbox} -\(T\) = \(S@1\) + \(\cdots\) + \(S@n\) + -classes {\it class declarations} -default {\it sort} -types {\it type declarations and synonyms} -arities {\it type arity declarations} -consts {\it constant declarations} -syntax {\it syntactic constant declarations} -translations {\it ast translation rules} -defs {\it meta-logical definitions} -rules {\it rule declarations} -end -ML {\it ML code} -\end{ttbox} -This declares the theory $T$ to extend the existing theories -$S@1$,~\ldots,~$S@n$. It may introduce new classes, types, arities -(of existing types), constants and rules; it can specify the default -sort for type variables. A constant declaration can specify an -associated concrete syntax. The translations section specifies -rewrite rules on abstract syntax trees, handling notations and -abbreviations. \index{*ML section} The \texttt{ML} section may contain -code to perform arbitrary syntactic transformations. The main -declaration forms are discussed below. There are some more sections -not presented here, the full syntax can be found in -\iflabelundefined{app:TheorySyntax}{an appendix of the {\it Reference - Manual}}{App.\ts\ref{app:TheorySyntax}}. Also note that -object-logics may add further theory sections, for example -\texttt{typedef}, \texttt{datatype} in HOL. - -All the declaration parts can be omitted or repeated and may appear in -any order, except that the {\ML} section must be last (after the {\tt - end} keyword). In the simplest case, $T$ is just the union of -$S@1$,~\ldots,~$S@n$. New theories always extend one or more other -theories, inheriting their types, constants, syntax, etc. The theory -\thydx{Pure} contains nothing but Isabelle's meta-logic. The variant -\thydx{CPure} offers the more usual higher-order function application -syntax $t\,u@1\ldots\,u@n$ instead of $t(u@1,\ldots,u@n)$ in Pure. - -Each theory definition must reside in a separate file, whose name is -the theory's with {\tt.thy} appended. Calling -\ttindexbold{use_thy}~{\tt"{\it T\/}"} reads the definition from {\it - T}{\tt.thy}, writes a corresponding file of {\ML} code {\tt.{\it - T}.thy.ML}, reads the latter file, and deletes it if no errors -occurred. This declares the {\ML} structure~$T$, which contains a -component \texttt{thy} denoting the new theory, a component for each -rule, and everything declared in {\it ML code}. - -Errors may arise during the translation to {\ML} (say, a misspelled -keyword) or during creation of the new theory (say, a type error in a -rule). But if all goes well, \texttt{use_thy} will finally read the file -{\it T}{\tt.ML} (if it exists). This file typically contains proofs -that refer to the components of~$T$. The structure is automatically -opened, so its components may be referred to by unqualified names, -e.g.\ just \texttt{thy} instead of $T$\texttt{.thy}. - -\ttindexbold{use_thy} automatically loads a theory's parents before -loading the theory itself. When a theory file is modified, many -theories may have to be reloaded. Isabelle records the modification -times and dependencies of theory files. See -\iflabelundefined{sec:reloading-theories}{the {\em Reference Manual\/}}% - {\S\ref{sec:reloading-theories}} -for more details. - - -\subsection{Declaring constants, definitions and rules} -\indexbold{constants!declaring}\index{rules!declaring} - -Most theories simply declare constants, definitions and rules. The {\bf - constant declaration part} has the form -\begin{ttbox} -consts \(c@1\) :: \(\tau@1\) - \vdots - \(c@n\) :: \(\tau@n\) -\end{ttbox} -where $c@1$, \ldots, $c@n$ are constants and $\tau@1$, \ldots, $\tau@n$ are -types. The types must be enclosed in quotation marks if they contain -user-declared infix type constructors like \texttt{*}. Each -constant must be enclosed in quotation marks unless it is a valid -identifier. To declare $c@1$, \ldots, $c@n$ as constants of type $\tau$, -the $n$ declarations may be abbreviated to a single line: -\begin{ttbox} - \(c@1\), \ldots, \(c@n\) :: \(\tau\) -\end{ttbox} -The {\bf rule declaration part} has the form -\begin{ttbox} -rules \(id@1\) "\(rule@1\)" - \vdots - \(id@n\) "\(rule@n\)" -\end{ttbox} -where $id@1$, \ldots, $id@n$ are \ML{} identifiers and $rule@1$, \ldots, -$rule@n$ are expressions of type~$prop$. Each rule {\em must\/} be -enclosed in quotation marks. Rules are simply axioms; they are -called \emph{rules} because they are mainly used to specify the inference -rules when defining a new logic. - -\indexbold{definitions} The {\bf definition part} is similar, but with -the keyword \texttt{defs} instead of \texttt{rules}. {\bf Definitions} are -rules of the form $s \equiv t$, and should serve only as -abbreviations. The simplest form of a definition is $f \equiv t$, -where $f$ is a constant. Also allowed are $\eta$-equivalent forms of -this, where the arguments of~$f$ appear applied on the left-hand side -of the equation instead of abstracted on the right-hand side. - -Isabelle checks for common errors in definitions, such as extra -variables on the right-hand side and cyclic dependencies, that could -least to inconsistency. It is still essential to take care: -theorems proved on the basis of incorrect definitions are useless, -your system can be consistent and yet still wrong. - -\index{examples!of theories} This example theory extends first-order -logic by declaring and defining two constants, {\em nand} and {\em - xor}: -\begin{ttbox} -Gate = FOL + -consts nand,xor :: [o,o] => o -defs nand_def "nand(P,Q) == ~(P & Q)" - xor_def "xor(P,Q) == P & ~Q | ~P & Q" -end -\end{ttbox} - -Declaring and defining constants can be combined: -\begin{ttbox} -Gate = FOL + -constdefs nand :: [o,o] => o - "nand(P,Q) == ~(P & Q)" - xor :: [o,o] => o - "xor(P,Q) == P & ~Q | ~P & Q" -end -\end{ttbox} -\texttt{constdefs} generates the names \texttt{nand_def} and \texttt{xor_def} -automatically, which is why it is restricted to alphanumeric identifiers. In -general it has the form -\begin{ttbox} -constdefs \(id@1\) :: \(\tau@1\) - "\(id@1 \equiv \dots\)" - \vdots - \(id@n\) :: \(\tau@n\) - "\(id@n \equiv \dots\)" -\end{ttbox} - - -\begin{warn} -A common mistake when writing definitions is to introduce extra free variables -on the right-hand side as in the following fictitious definition: -\begin{ttbox} -defs prime_def "prime(p) == (m divides p) --> (m=1 | m=p)" -\end{ttbox} -Isabelle rejects this ``definition'' because of the extra \texttt{m} on the -right-hand side, which would introduce an inconsistency. What you should have -written is -\begin{ttbox} -defs prime_def "prime(p) == ALL m. (m divides p) --> (m=1 | m=p)" -\end{ttbox} -\end{warn} - -\subsection{Declaring type constructors} -\indexbold{types!declaring}\indexbold{arities!declaring} -% -Types are composed of type variables and {\bf type constructors}. Each -type constructor takes a fixed number of arguments. They are declared -with an \ML-like syntax. If $list$ takes one type argument, $tree$ takes -two arguments and $nat$ takes no arguments, then these type constructors -can be declared by -\begin{ttbox} -types 'a list - ('a,'b) tree - nat -\end{ttbox} - -The {\bf type declaration part} has the general form -\begin{ttbox} -types \(tids@1\) \(id@1\) - \vdots - \(tids@n\) \(id@n\) -\end{ttbox} -where $id@1$, \ldots, $id@n$ are identifiers and $tids@1$, \ldots, $tids@n$ -are type argument lists as shown in the example above. It declares each -$id@i$ as a type constructor with the specified number of argument places. - -The {\bf arity declaration part} has the form -\begin{ttbox} -arities \(tycon@1\) :: \(arity@1\) - \vdots - \(tycon@n\) :: \(arity@n\) -\end{ttbox} -where $tycon@1$, \ldots, $tycon@n$ are identifiers and $arity@1$, \ldots, -$arity@n$ are arities. Arity declarations add arities to existing -types; they do not declare the types themselves. -In the simplest case, for an 0-place type constructor, an arity is simply -the type's class. Let us declare a type~$bool$ of class $term$, with -constants $tt$ and~$ff$. (In first-order logic, booleans are -distinct from formulae, which have type $o::logic$.) -\index{examples!of theories} -\begin{ttbox} -Bool = FOL + -types bool -arities bool :: term -consts tt,ff :: bool -end -\end{ttbox} -A $k$-place type constructor may have arities of the form -$(s@1,\ldots,s@k)c$, where $s@1,\ldots,s@n$ are sorts and $c$ is a class. -Each sort specifies a type argument; it has the form $\{c@1,\ldots,c@m\}$, -where $c@1$, \dots,~$c@m$ are classes. Mostly we deal with singleton -sorts, and may abbreviate them by dropping the braces. The arity -$(term)term$ is short for $(\{term\})term$. Recall the discussion in -\S\ref{polymorphic}. - -A type constructor may be overloaded (subject to certain conditions) by -appearing in several arity declarations. For instance, the function type -constructor~$fun$ has the arity $(logic,logic)logic$; in higher-order -logic, it is declared also to have arity $(term,term)term$. - -Theory \texttt{List} declares the 1-place type constructor $list$, gives -it the arity $(term)term$, and declares constants $Nil$ and $Cons$ with -polymorphic types:% -\footnote{In the \texttt{consts} part, type variable {\tt'a} has the default - sort, which is \texttt{term}. See the {\em Reference Manual\/} -\iflabelundefined{sec:ref-defining-theories}{}% -{(\S\ref{sec:ref-defining-theories})} for more information.} -\index{examples!of theories} -\begin{ttbox} -List = FOL + -types 'a list -arities list :: (term)term -consts Nil :: 'a list - Cons :: ['a, 'a list] => 'a list -end -\end{ttbox} -Multiple arity declarations may be abbreviated to a single line: -\begin{ttbox} -arities \(tycon@1\), \ldots, \(tycon@n\) :: \(arity\) -\end{ttbox} - -%\begin{warn} -%Arity declarations resemble constant declarations, but there are {\it no\/} -%quotation marks! Types and rules must be quoted because the theory -%translator passes them verbatim to the {\ML} output file. -%\end{warn} - -\subsection{Type synonyms}\indexbold{type synonyms} -Isabelle supports {\bf type synonyms} ({\bf abbreviations}) which are similar -to those found in \ML. Such synonyms are defined in the type declaration part -and are fairly self explanatory: -\begin{ttbox} -types gate = [o,o] => o - 'a pred = 'a => o - ('a,'b)nuf = 'b => 'a -\end{ttbox} -Type declarations and synonyms can be mixed arbitrarily: -\begin{ttbox} -types nat - 'a stream = nat => 'a - signal = nat stream - 'a list -\end{ttbox} -A synonym is merely an abbreviation for some existing type expression. -Hence synonyms may not be recursive! Internally all synonyms are -fully expanded. As a consequence Isabelle output never contains -synonyms. Their main purpose is to improve the readability of theory -definitions. Synonyms can be used just like any other type: -\begin{ttbox} -consts and,or :: gate - negate :: signal => signal -\end{ttbox} - -\subsection{Infix and mixfix operators} -\index{infixes}\index{examples!of theories} - -Infix or mixfix syntax may be attached to constants. Consider the -following theory: -\begin{ttbox} -Gate2 = FOL + -consts "~&" :: [o,o] => o (infixl 35) - "#" :: [o,o] => o (infixl 30) -defs nand_def "P ~& Q == ~(P & Q)" - xor_def "P # Q == P & ~Q | ~P & Q" -end -\end{ttbox} -The constant declaration part declares two left-associating infix operators -with their priorities, or precedences; they are $\nand$ of priority~35 and -$\xor$ of priority~30. Hence $P \xor Q \xor R$ is parsed as $(P\xor Q) -\xor R$ and $P \xor Q \nand R$ as $P \xor (Q \nand R)$. Note the quotation -marks in \verb|"~&"| and \verb|"#"|. - -The constants \hbox{\verb|op ~&|} and \hbox{\verb|op #|} are declared -automatically, just as in \ML. Hence you may write propositions like -\verb|op #(True) == op ~&(True)|, which asserts that the functions $\lambda -Q.True \xor Q$ and $\lambda Q.True \nand Q$ are identical. - -\medskip Infix syntax and constant names may be also specified -independently. For example, consider this version of $\nand$: -\begin{ttbox} -consts nand :: [o,o] => o (infixl "~&" 35) -\end{ttbox} - -\bigskip\index{mixfix declarations} -{\bf Mixfix} operators may have arbitrary context-free syntaxes. Let us -add a line to the constant declaration part: -\begin{ttbox} - If :: [o,o,o] => o ("if _ then _ else _") -\end{ttbox} -This declares a constant $If$ of type $[o,o,o] \To o$ with concrete syntax {\tt - if~$P$ then~$Q$ else~$R$} as well as \texttt{If($P$,$Q$,$R$)}. Underscores -denote argument positions. - -The declaration above does not allow the \texttt{if}-\texttt{then}-{\tt - else} construct to be printed split across several lines, even if it -is too long to fit on one line. Pretty-printing information can be -added to specify the layout of mixfix operators. For details, see -\iflabelundefined{Defining-Logics}% - {the {\it Reference Manual}, chapter `Defining Logics'}% - {Chap.\ts\ref{Defining-Logics}}. - -Mixfix declarations can be annotated with priorities, just like -infixes. The example above is just a shorthand for -\begin{ttbox} - If :: [o,o,o] => o ("if _ then _ else _" [0,0,0] 1000) -\end{ttbox} -The numeric components determine priorities. The list of integers -defines, for each argument position, the minimal priority an expression -at that position must have. The final integer is the priority of the -construct itself. In the example above, any argument expression is -acceptable because priorities are non-negative, and conditionals may -appear everywhere because 1000 is the highest priority. On the other -hand, the declaration -\begin{ttbox} - If :: [o,o,o] => o ("if _ then _ else _" [100,0,0] 99) -\end{ttbox} -defines concrete syntax for a conditional whose first argument cannot have -the form \texttt{if~$P$ then~$Q$ else~$R$} because it must have a priority -of at least~100. We may of course write -\begin{quote}\tt -if (if $P$ then $Q$ else $R$) then $S$ else $T$ -\end{quote} -because expressions in parentheses have maximal priority. - -Binary type constructors, like products and sums, may also be declared as -infixes. The type declaration below introduces a type constructor~$*$ with -infix notation $\alpha*\beta$, together with the mixfix notation -${<}\_,\_{>}$ for pairs. We also see a rule declaration part. -\index{examples!of theories}\index{mixfix declarations} -\begin{ttbox} -Prod = FOL + -types ('a,'b) "*" (infixl 20) -arities "*" :: (term,term)term -consts fst :: "'a * 'b => 'a" - snd :: "'a * 'b => 'b" - Pair :: "['a,'b] => 'a * 'b" ("(1<_,/_>)") -rules fst "fst() = a" - snd "snd() = b" -end -\end{ttbox} - -\begin{warn} - The name of the type constructor is~\texttt{*} and not \texttt{op~*}, as - it would be in the case of an infix constant. Only infix type - constructors can have symbolic names like~\texttt{*}. General mixfix - syntax for types may be introduced via appropriate \texttt{syntax} - declarations. -\end{warn} - - -\subsection{Overloading} -\index{overloading}\index{examples!of theories} -The {\bf class declaration part} has the form -\begin{ttbox} -classes \(id@1\) < \(c@1\) - \vdots - \(id@n\) < \(c@n\) -\end{ttbox} -where $id@1$, \ldots, $id@n$ are identifiers and $c@1$, \ldots, $c@n$ are -existing classes. It declares each $id@i$ as a new class, a subclass -of~$c@i$. In the general case, an identifier may be declared to be a -subclass of $k$ existing classes: -\begin{ttbox} - \(id\) < \(c@1\), \ldots, \(c@k\) -\end{ttbox} -Type classes allow constants to be overloaded. As suggested in -\S\ref{polymorphic}, let us define the class $arith$ of arithmetic -types with the constants ${+} :: [\alpha,\alpha]\To \alpha$ and $0,1 {::} -\alpha$, for $\alpha{::}arith$. We introduce $arith$ as a subclass of -$term$ and add the three polymorphic constants of this class. -\index{examples!of theories}\index{constants!overloaded} -\begin{ttbox} -Arith = FOL + -classes arith < term -consts "0" :: 'a::arith ("0") - "1" :: 'a::arith ("1") - "+" :: ['a::arith,'a] => 'a (infixl 60) -end -\end{ttbox} -No rules are declared for these constants: we merely introduce their -names without specifying properties. On the other hand, classes -with rules make it possible to prove {\bf generic} theorems. Such -theorems hold for all instances, all types in that class. - -We can now obtain distinct versions of the constants of $arith$ by -declaring certain types to be of class $arith$. For example, let us -declare the 0-place type constructors $bool$ and $nat$: -\index{examples!of theories} -\begin{ttbox} -BoolNat = Arith + -types bool nat -arities bool, nat :: arith -consts Suc :: nat=>nat -\ttbreak -rules add0 "0 + n = n::nat" - addS "Suc(m)+n = Suc(m+n)" - nat1 "1 = Suc(0)" - or0l "0 + x = x::bool" - or0r "x + 0 = x::bool" - or1l "1 + x = 1::bool" - or1r "x + 1 = 1::bool" -end -\end{ttbox} -Because $nat$ and $bool$ have class $arith$, we can use $0$, $1$ and $+$ at -either type. The type constraints in the axioms are vital. Without -constraints, the $x$ in $1+x = 1$ (axiom \texttt{or1l}) -would have type $\alpha{::}arith$ -and the axiom would hold for any type of class $arith$. This would -collapse $nat$ to a trivial type: -\[ Suc(1) = Suc(0+1) = Suc(0)+1 = 1+1 = 1! \] - - -\section{Theory example: the natural numbers} - -We shall now work through a small example of formalized mathematics -demonstrating many of the theory extension features. - - -\subsection{Extending first-order logic with the natural numbers} -\index{examples!of theories} - -Section\ts\ref{sec:logical-syntax} has formalized a first-order logic, -including a type~$nat$ and the constants $0::nat$ and $Suc::nat\To nat$. -Let us introduce the Peano axioms for mathematical induction and the -freeness of $0$ and~$Suc$:\index{axioms!Peano} -\[ \vcenter{\infer[(induct)]{P[n/x]}{P[0/x] & \infer*{P[Suc(x)/x]}{[P]}}} - \qquad \parbox{4.5cm}{provided $x$ is not free in any assumption except~$P$} -\] -\[ \infer[(Suc\_inject)]{m=n}{Suc(m)=Suc(n)} \qquad - \infer[(Suc\_neq\_0)]{R}{Suc(m)=0} -\] -Mathematical induction asserts that $P(n)$ is true, for any $n::nat$, -provided $P(0)$ holds and that $P(x)$ implies $P(Suc(x))$ for all~$x$. -Some authors express the induction step as $\forall x. P(x)\imp P(Suc(x))$. -To avoid making induction require the presence of other connectives, we -formalize mathematical induction as -$$ \List{P(0); \Forall x. P(x)\Imp P(Suc(x))} \Imp P(n). \eqno(induct) $$ - -\noindent -Similarly, to avoid expressing the other rules using~$\forall$, $\imp$ -and~$\neg$, we take advantage of the meta-logic;\footnote -{On the other hand, the axioms $Suc(m)=Suc(n) \bimp m=n$ -and $\neg(Suc(m)=0)$ are logically equivalent to those given, and work -better with Isabelle's simplifier.} -$(Suc\_neq\_0)$ is -an elimination rule for $Suc(m)=0$: -$$ Suc(m)=Suc(n) \Imp m=n \eqno(Suc\_inject) $$ -$$ Suc(m)=0 \Imp R \eqno(Suc\_neq\_0) $$ - -\noindent -We shall also define a primitive recursion operator, $rec$. Traditionally, -primitive recursion takes a natural number~$a$ and a 2-place function~$f$, -and obeys the equations -\begin{eqnarray*} - rec(0,a,f) & = & a \\ - rec(Suc(m),a,f) & = & f(m, rec(m,a,f)) -\end{eqnarray*} -Addition, defined by $m+n \equiv rec(m,n,\lambda x\,y.Suc(y))$, -should satisfy -\begin{eqnarray*} - 0+n & = & n \\ - Suc(m)+n & = & Suc(m+n) -\end{eqnarray*} -Primitive recursion appears to pose difficulties: first-order logic has no -function-valued expressions. We again take advantage of the meta-logic, -which does have functions. We also generalise primitive recursion to be -polymorphic over any type of class~$term$, and declare the addition -function: -\begin{eqnarray*} - rec & :: & [nat, \alpha{::}term, [nat,\alpha]\To\alpha] \To\alpha \\ - + & :: & [nat,nat]\To nat -\end{eqnarray*} - - -\subsection{Declaring the theory to Isabelle} -\index{examples!of theories} -Let us create the theory \thydx{Nat} starting from theory~\verb$FOL$, -which contains only classical logic with no natural numbers. We declare -the 0-place type constructor $nat$ and the associated constants. Note that -the constant~0 requires a mixfix annotation because~0 is not a legal -identifier, and could not otherwise be written in terms: -\begin{ttbox}\index{mixfix declarations} -Nat = FOL + -types nat -arities nat :: term -consts "0" :: nat ("0") - Suc :: nat=>nat - rec :: [nat, 'a, [nat,'a]=>'a] => 'a - "+" :: [nat, nat] => nat (infixl 60) -rules Suc_inject "Suc(m)=Suc(n) ==> m=n" - Suc_neq_0 "Suc(m)=0 ==> R" - induct "[| P(0); !!x. P(x) ==> P(Suc(x)) |] ==> P(n)" - rec_0 "rec(0,a,f) = a" - rec_Suc "rec(Suc(m), a, f) = f(m, rec(m,a,f))" - add_def "m+n == rec(m, n, \%x y. Suc(y))" -end -\end{ttbox} -In axiom \texttt{add_def}, recall that \verb|%| stands for~$\lambda$. -Loading this theory file creates the \ML\ structure \texttt{Nat}, which -contains the theory and axioms. - -\subsection{Proving some recursion equations} -Theory \texttt{FOL/ex/Nat} contains proofs involving this theory of the -natural numbers. As a trivial example, let us derive recursion equations -for \verb$+$. Here is the zero case: -\begin{ttbox} -Goalw [add_def] "0+n = n"; -{\out Level 0} -{\out 0 + n = n} -{\out 1. rec(0,n,\%x y. Suc(y)) = n} -\ttbreak -by (resolve_tac [rec_0] 1); -{\out Level 1} -{\out 0 + n = n} -{\out No subgoals!} -qed "add_0"; -\end{ttbox} -And here is the successor case: -\begin{ttbox} -Goalw [add_def] "Suc(m)+n = Suc(m+n)"; -{\out Level 0} -{\out Suc(m) + n = Suc(m + n)} -{\out 1. rec(Suc(m),n,\%x y. Suc(y)) = Suc(rec(m,n,\%x y. Suc(y)))} -\ttbreak -by (resolve_tac [rec_Suc] 1); -{\out Level 1} -{\out Suc(m) + n = Suc(m + n)} -{\out No subgoals!} -qed "add_Suc"; -\end{ttbox} -The induction rule raises some complications, which are discussed next. -\index{theories!defining|)} - - -\section{Refinement with explicit instantiation} -\index{resolution!with instantiation} -\index{instantiation|(} - -In order to employ mathematical induction, we need to refine a subgoal by -the rule~$(induct)$. The conclusion of this rule is $\Var{P}(\Var{n})$, -which is highly ambiguous in higher-order unification. It matches every -way that a formula can be regarded as depending on a subterm of type~$nat$. -To get round this problem, we could make the induction rule conclude -$\forall n.\Var{P}(n)$ --- but putting a subgoal into this form requires -refinement by~$(\forall E)$, which is equally hard! - -The tactic \texttt{res_inst_tac}, like \texttt{resolve_tac}, refines a subgoal by -a rule. But it also accepts explicit instantiations for the rule's -schematic variables. -\begin{description} -\item[\ttindex{res_inst_tac} {\it insts} {\it thm} {\it i}] -instantiates the rule {\it thm} with the instantiations {\it insts}, and -then performs resolution on subgoal~$i$. - -\item[\ttindex{eres_inst_tac}] -and \ttindex{dres_inst_tac} are similar, but perform elim-resolution -and destruct-resolution, respectively. -\end{description} -The list {\it insts} consists of pairs $[(v@1,e@1), \ldots, (v@n,e@n)]$, -where $v@1$, \ldots, $v@n$ are names of schematic variables in the rule --- -with no leading question marks! --- and $e@1$, \ldots, $e@n$ are -expressions giving their instantiations. The expressions are type-checked -in the context of a particular subgoal: free variables receive the same -types as they have in the subgoal, and parameters may appear. Type -variable instantiations may appear in~{\it insts}, but they are seldom -required: \texttt{res_inst_tac} instantiates type variables automatically -whenever the type of~$e@i$ is an instance of the type of~$\Var{v@i}$. - -\subsection{A simple proof by induction} -\index{examples!of induction} -Let us prove that no natural number~$k$ equals its own successor. To -use~$(induct)$, we instantiate~$\Var{n}$ to~$k$; Isabelle finds a good -instantiation for~$\Var{P}$. -\begin{ttbox} -Goal "~ (Suc(k) = k)"; -{\out Level 0} -{\out Suc(k) ~= k} -{\out 1. Suc(k) ~= k} -\ttbreak -by (res_inst_tac [("n","k")] induct 1); -{\out Level 1} -{\out Suc(k) ~= k} -{\out 1. Suc(0) ~= 0} -{\out 2. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} -\end{ttbox} -We should check that Isabelle has correctly applied induction. Subgoal~1 -is the base case, with $k$ replaced by~0. Subgoal~2 is the inductive step, -with $k$ replaced by~$Suc(x)$ and with an induction hypothesis for~$x$. -The rest of the proof demonstrates~\tdx{notI}, \tdx{notE} and the -other rules of theory \texttt{Nat}. The base case holds by~\ttindex{Suc_neq_0}: -\begin{ttbox} -by (resolve_tac [notI] 1); -{\out Level 2} -{\out Suc(k) ~= k} -{\out 1. Suc(0) = 0 ==> False} -{\out 2. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} -\ttbreak -by (eresolve_tac [Suc_neq_0] 1); -{\out Level 3} -{\out Suc(k) ~= k} -{\out 1. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} -\end{ttbox} -The inductive step holds by the contrapositive of~\ttindex{Suc_inject}. -Negation rules transform the subgoal into that of proving $Suc(x)=x$ from -$Suc(Suc(x)) = Suc(x)$: -\begin{ttbox} -by (resolve_tac [notI] 1); -{\out Level 4} -{\out Suc(k) ~= k} -{\out 1. !!x. [| Suc(x) ~= x; Suc(Suc(x)) = Suc(x) |] ==> False} -\ttbreak -by (eresolve_tac [notE] 1); -{\out Level 5} -{\out Suc(k) ~= k} -{\out 1. !!x. Suc(Suc(x)) = Suc(x) ==> Suc(x) = x} -\ttbreak -by (eresolve_tac [Suc_inject] 1); -{\out Level 6} -{\out Suc(k) ~= k} -{\out No subgoals!} -\end{ttbox} - - -\subsection{An example of ambiguity in \texttt{resolve_tac}} -\index{examples!of induction}\index{unification!higher-order} -If you try the example above, you may observe that \texttt{res_inst_tac} is -not actually needed. Almost by chance, \ttindex{resolve_tac} finds the right -instantiation for~$(induct)$ to yield the desired next state. With more -complex formulae, our luck fails. -\begin{ttbox} -Goal "(k+m)+n = k+(m+n)"; -{\out Level 0} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + n = k + (m + n)} -\ttbreak -by (resolve_tac [induct] 1); -{\out Level 1} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + n = 0} -{\out 2. !!x. k + m + n = x ==> k + m + n = Suc(x)} -\end{ttbox} -This proof requires induction on~$k$. The occurrence of~0 in subgoal~1 -indicates that induction has been applied to the term~$k+(m+n)$; this -application is sound but will not lead to a proof here. Fortunately, -Isabelle can (lazily!) generate all the valid applications of induction. -The \ttindex{back} command causes backtracking to an alternative outcome of -the tactic. -\begin{ttbox} -back(); -{\out Level 1} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + n = k + 0} -{\out 2. !!x. k + m + n = k + x ==> k + m + n = k + Suc(x)} -\end{ttbox} -Now induction has been applied to~$m+n$. This is equally useless. Let us -call \ttindex{back} again. -\begin{ttbox} -back(); -{\out Level 1} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + 0 = k + (m + 0)} -{\out 2. !!x. k + m + x = k + (m + x) ==>} -{\out k + m + Suc(x) = k + (m + Suc(x))} -\end{ttbox} -Now induction has been applied to~$n$. What is the next alternative? -\begin{ttbox} -back(); -{\out Level 1} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + n = k + (m + 0)} -{\out 2. !!x. k + m + n = k + (m + x) ==> k + m + n = k + (m + Suc(x))} -\end{ttbox} -Inspecting subgoal~1 reveals that induction has been applied to just the -second occurrence of~$n$. This perfectly legitimate induction is useless -here. - -The main goal admits fourteen different applications of induction. The -number is exponential in the size of the formula. - -\subsection{Proving that addition is associative} -Let us invoke the induction rule properly, using~{\tt - res_inst_tac}. At the same time, we shall have a glimpse at Isabelle's -simplification tactics, which are described in -\iflabelundefined{simp-chap}% - {the {\em Reference Manual}}{Chap.\ts\ref{simp-chap}}. - -\index{simplification}\index{examples!of simplification} - -Isabelle's simplification tactics repeatedly apply equations to a subgoal, -perhaps proving it. For efficiency, the rewrite rules must be packaged into a -{\bf simplification set},\index{simplification sets} or {\bf simpset}. We -augment the implicit simpset of FOL with the equations proved in the previous -section, namely $0+n=n$ and $\texttt{Suc}(m)+n=\texttt{Suc}(m+n)$: -\begin{ttbox} -Addsimps [add_0, add_Suc]; -\end{ttbox} -We state the goal for associativity of addition, and -use \ttindex{res_inst_tac} to invoke induction on~$k$: -\begin{ttbox} -Goal "(k+m)+n = k+(m+n)"; -{\out Level 0} -{\out k + m + n = k + (m + n)} -{\out 1. k + m + n = k + (m + n)} -\ttbreak -by (res_inst_tac [("n","k")] induct 1); -{\out Level 1} -{\out k + m + n = k + (m + n)} -{\out 1. 0 + m + n = 0 + (m + n)} -{\out 2. !!x. x + m + n = x + (m + n) ==>} -{\out Suc(x) + m + n = Suc(x) + (m + n)} -\end{ttbox} -The base case holds easily; both sides reduce to $m+n$. The -tactic~\ttindex{Simp_tac} rewrites with respect to the current -simplification set, applying the rewrite rules for addition: -\begin{ttbox} -by (Simp_tac 1); -{\out Level 2} -{\out k + m + n = k + (m + n)} -{\out 1. !!x. x + m + n = x + (m + n) ==>} -{\out Suc(x) + m + n = Suc(x) + (m + n)} -\end{ttbox} -The inductive step requires rewriting by the equations for addition -and with the induction hypothesis, which is also an equation. The -tactic~\ttindex{Asm_simp_tac} rewrites using the implicit -simplification set and any useful assumptions: -\begin{ttbox} -by (Asm_simp_tac 1); -{\out Level 3} -{\out k + m + n = k + (m + n)} -{\out No subgoals!} -\end{ttbox} -\index{instantiation|)} - - -\section{A Prolog interpreter} -\index{Prolog interpreter|bold} -To demonstrate the power of tacticals, let us construct a Prolog -interpreter and execute programs involving lists.\footnote{To run these -examples, see the file \texttt{FOL/ex/Prolog.ML}.} The Prolog program -consists of a theory. We declare a type constructor for lists, with an -arity declaration to say that $(\tau)list$ is of class~$term$ -provided~$\tau$ is: -\begin{eqnarray*} - list & :: & (term)term -\end{eqnarray*} -We declare four constants: the empty list~$Nil$; the infix list -constructor~{:}; the list concatenation predicate~$app$; the list reverse -predicate~$rev$. (In Prolog, functions on lists are expressed as -predicates.) -\begin{eqnarray*} - Nil & :: & \alpha list \\ - {:} & :: & [\alpha,\alpha list] \To \alpha list \\ - app & :: & [\alpha list,\alpha list,\alpha list] \To o \\ - rev & :: & [\alpha list,\alpha list] \To o -\end{eqnarray*} -The predicate $app$ should satisfy the Prolog-style rules -\[ {app(Nil,ys,ys)} \qquad - {app(xs,ys,zs) \over app(x:xs, ys, x:zs)} \] -We define the naive version of $rev$, which calls~$app$: -\[ {rev(Nil,Nil)} \qquad - {rev(xs,ys)\quad app(ys, x:Nil, zs) \over - rev(x:xs, zs)} -\] - -\index{examples!of theories} -Theory \thydx{Prolog} extends first-order logic in order to make use -of the class~$term$ and the type~$o$. The interpreter does not use the -rules of~\texttt{FOL}. -\begin{ttbox} -Prolog = FOL + -types 'a list -arities list :: (term)term -consts Nil :: 'a list - ":" :: ['a, 'a list]=> 'a list (infixr 60) - app :: ['a list, 'a list, 'a list] => o - rev :: ['a list, 'a list] => o -rules appNil "app(Nil,ys,ys)" - appCons "app(xs,ys,zs) ==> app(x:xs, ys, x:zs)" - revNil "rev(Nil,Nil)" - revCons "[| rev(xs,ys); app(ys,x:Nil,zs) |] ==> rev(x:xs,zs)" -end -\end{ttbox} -\subsection{Simple executions} -Repeated application of the rules solves Prolog goals. Let us -append the lists $[a,b,c]$ and~$[d,e]$. As the rules are applied, the -answer builds up in~\texttt{?x}. -\begin{ttbox} -Goal "app(a:b:c:Nil, d:e:Nil, ?x)"; -{\out Level 0} -{\out app(a : b : c : Nil, d : e : Nil, ?x)} -{\out 1. app(a : b : c : Nil, d : e : Nil, ?x)} -\ttbreak -by (resolve_tac [appNil,appCons] 1); -{\out Level 1} -{\out app(a : b : c : Nil, d : e : Nil, a : ?zs1)} -{\out 1. app(b : c : Nil, d : e : Nil, ?zs1)} -\ttbreak -by (resolve_tac [appNil,appCons] 1); -{\out Level 2} -{\out app(a : b : c : Nil, d : e : Nil, a : b : ?zs2)} -{\out 1. app(c : Nil, d : e : Nil, ?zs2)} -\end{ttbox} -At this point, the first two elements of the result are~$a$ and~$b$. -\begin{ttbox} -by (resolve_tac [appNil,appCons] 1); -{\out Level 3} -{\out app(a : b : c : Nil, d : e : Nil, a : b : c : ?zs3)} -{\out 1. app(Nil, d : e : Nil, ?zs3)} -\ttbreak -by (resolve_tac [appNil,appCons] 1); -{\out Level 4} -{\out app(a : b : c : Nil, d : e : Nil, a : b : c : d : e : Nil)} -{\out No subgoals!} -\end{ttbox} - -Prolog can run functions backwards. Which list can be appended -with $[c,d]$ to produce $[a,b,c,d]$? -Using \ttindex{REPEAT}, we find the answer at once, $[a,b]$: -\begin{ttbox} -Goal "app(?x, c:d:Nil, a:b:c:d:Nil)"; -{\out Level 0} -{\out app(?x, c : d : Nil, a : b : c : d : Nil)} -{\out 1. app(?x, c : d : Nil, a : b : c : d : Nil)} -\ttbreak -by (REPEAT (resolve_tac [appNil,appCons] 1)); -{\out Level 1} -{\out app(a : b : Nil, c : d : Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\end{ttbox} - - -\subsection{Backtracking}\index{backtracking!Prolog style} -Prolog backtracking can answer questions that have multiple solutions. -Which lists $x$ and $y$ can be appended to form the list $[a,b,c,d]$? This -question has five solutions. Using \ttindex{REPEAT} to apply the rules, we -quickly find the first solution, namely $x=[]$ and $y=[a,b,c,d]$: -\begin{ttbox} -Goal "app(?x, ?y, a:b:c:d:Nil)"; -{\out Level 0} -{\out app(?x, ?y, a : b : c : d : Nil)} -{\out 1. app(?x, ?y, a : b : c : d : Nil)} -\ttbreak -by (REPEAT (resolve_tac [appNil,appCons] 1)); -{\out Level 1} -{\out app(Nil, a : b : c : d : Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\end{ttbox} -Isabelle can lazily generate all the possibilities. The \ttindex{back} -command returns the tactic's next outcome, namely $x=[a]$ and $y=[b,c,d]$: -\begin{ttbox} -back(); -{\out Level 1} -{\out app(a : Nil, b : c : d : Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\end{ttbox} -The other solutions are generated similarly. -\begin{ttbox} -back(); -{\out Level 1} -{\out app(a : b : Nil, c : d : Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\ttbreak -back(); -{\out Level 1} -{\out app(a : b : c : Nil, d : Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\ttbreak -back(); -{\out Level 1} -{\out app(a : b : c : d : Nil, Nil, a : b : c : d : Nil)} -{\out No subgoals!} -\end{ttbox} - - -\subsection{Depth-first search} -\index{search!depth-first} -Now let us try $rev$, reversing a list. -Bundle the rules together as the \ML{} identifier \texttt{rules}. Naive -reverse requires 120 inferences for this 14-element list, but the tactic -terminates in a few seconds. -\begin{ttbox} -Goal "rev(a:b:c:d:e:f:g:h:i:j:k:l:m:n:Nil, ?w)"; -{\out Level 0} -{\out rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil, ?w)} -{\out 1. rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil,} -{\out ?w)} -\ttbreak -val rules = [appNil,appCons,revNil,revCons]; -\ttbreak -by (REPEAT (resolve_tac rules 1)); -{\out Level 1} -{\out rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil,} -{\out n : m : l : k : j : i : h : g : f : e : d : c : b : a : Nil)} -{\out No subgoals!} -\end{ttbox} -We may execute $rev$ backwards. This, too, should reverse a list. What -is the reverse of $[a,b,c]$? -\begin{ttbox} -Goal "rev(?x, a:b:c:Nil)"; -{\out Level 0} -{\out rev(?x, a : b : c : Nil)} -{\out 1. rev(?x, a : b : c : Nil)} -\ttbreak -by (REPEAT (resolve_tac rules 1)); -{\out Level 1} -{\out rev(?x1 : Nil, a : b : c : Nil)} -{\out 1. app(Nil, ?x1 : Nil, a : b : c : Nil)} -\end{ttbox} -The tactic has failed to find a solution! It reached a dead end at -subgoal~1: there is no~$\Var{x@1}$ such that [] appended with~$[\Var{x@1}]$ -equals~$[a,b,c]$. Backtracking explores other outcomes. -\begin{ttbox} -back(); -{\out Level 1} -{\out rev(?x1 : a : Nil, a : b : c : Nil)} -{\out 1. app(Nil, ?x1 : Nil, b : c : Nil)} -\end{ttbox} -This too is a dead end, but the next outcome is successful. -\begin{ttbox} -back(); -{\out Level 1} -{\out rev(c : b : a : Nil, a : b : c : Nil)} -{\out No subgoals!} -\end{ttbox} -\ttindex{REPEAT} goes wrong because it is only a repetition tactical, not a -search tactical. \texttt{REPEAT} stops when it cannot continue, regardless of -which state is reached. The tactical \ttindex{DEPTH_FIRST} searches for a -satisfactory state, as specified by an \ML{} predicate. Below, -\ttindex{has_fewer_prems} specifies that the proof state should have no -subgoals. -\begin{ttbox} -val prolog_tac = DEPTH_FIRST (has_fewer_prems 1) - (resolve_tac rules 1); -\end{ttbox} -Since Prolog uses depth-first search, this tactic is a (slow!) -Prolog interpreter. We return to the start of the proof using -\ttindex{choplev}, and apply \texttt{prolog_tac}: -\begin{ttbox} -choplev 0; -{\out Level 0} -{\out rev(?x, a : b : c : Nil)} -{\out 1. rev(?x, a : b : c : Nil)} -\ttbreak -by prolog_tac; -{\out Level 1} -{\out rev(c : b : a : Nil, a : b : c : Nil)} -{\out No subgoals!} -\end{ttbox} -Let us try \texttt{prolog_tac} on one more example, containing four unknowns: -\begin{ttbox} -Goal "rev(a:?x:c:?y:Nil, d:?z:b:?u)"; -{\out Level 0} -{\out rev(a : ?x : c : ?y : Nil, d : ?z : b : ?u)} -{\out 1. rev(a : ?x : c : ?y : Nil, d : ?z : b : ?u)} -\ttbreak -by prolog_tac; -{\out Level 1} -{\out rev(a : b : c : d : Nil, d : c : b : a : Nil)} -{\out No subgoals!} -\end{ttbox} -Although Isabelle is much slower than a Prolog system, Isabelle -tactics can exploit logic programming techniques. - diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/arith.thy --- a/doc-src/Intro/arith.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,6 +0,0 @@ -Arith = FOL + -classes arith < term -consts "0" :: "'a::arith" ("0") - "1" :: "'a::arith" ("1") - "+" :: "['a::arith,'a] => 'a" (infixl 60) -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/bool.thy --- a/doc-src/Intro/bool.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,5 +0,0 @@ -Bool = FOL + -types bool 0 -arities bool :: term -consts tt,ff :: "bool" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/bool_nat.thy --- a/doc-src/Intro/bool_nat.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,12 +0,0 @@ -BoolNat = Arith + -types bool,nat 0 -arities bool,nat :: arith -consts Suc :: "nat=>nat" -rules add0 "0 + n = n::nat" - addS "Suc(m)+n = Suc(m+n)" - nat1 "1 = Suc(0)" - or0l "0 + x = x::bool" - or0r "x + 0 = x::bool" - or1l "1 + x = 1::bool" - or1r "x + 1 = 1::bool" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/deriv.txt --- a/doc-src/Intro/deriv.txt Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,210 +0,0 @@ -(* Deriving an inference rule *) - -Pretty.setmargin 72; (*existing macros just allow this margin*) -print_depth 0; - - -val [major,minor] = goal IFOL.thy - "[| P&Q; [| P; Q |] ==> R |] ==> R"; -prth minor; -by (resolve_tac [minor] 1); -prth major; -prth (major RS conjunct1); -by (resolve_tac [major RS conjunct1] 1); -prth (major RS conjunct2); -by (resolve_tac [major RS conjunct2] 1); -prth (topthm()); -val conjE = prth(result()); - - -- val [major,minor] = goal Int_Rule.thy -= "[| P&Q; [| P; Q |] ==> R |] ==> R"; -Level 0 -R - 1. R -val major = Thm {hyps=#,maxidx=#,prop=#,sign=#} : thm -val minor = Thm {hyps=#,maxidx=#,prop=#,sign=#} : thm -- prth minor; -[| P; Q |] ==> R [[| P; Q |] ==> R] -- by (resolve_tac [minor] 1); -Level 1 -R - 1. P - 2. Q -- prth major; -P & Q [P & Q] -- prth (major RS conjunct1); -P [P & Q] -- by (resolve_tac [major RS conjunct1] 1); -Level 2 -R - 1. Q -- prth (major RS conjunct2); -Q [P & Q] -- by (resolve_tac [major RS conjunct2] 1); -Level 3 -R -No subgoals! -- prth (topthm()); -R [P & Q, P & Q, [| P; Q |] ==> R] -- val conjE = prth(result()); -[| ?P & ?Q; [| ?P; ?Q |] ==> ?R |] ==> ?R -val conjE = Thm {hyps=#,maxidx=#,prop=#,sign=#} : thm - - -(*** Derived rules involving definitions ***) - -(** notI **) - -val prems = goal Int_Rule.thy "(P ==> False) ==> ~P"; -prth not_def; -by (rewrite_goals_tac [not_def]); -by (resolve_tac [impI] 1); -by (resolve_tac prems 1); -by (assume_tac 1); -val notI = prth(result()); - -val prems = goalw Int_Rule.thy [not_def] - "(P ==> False) ==> ~P"; - - -- prth not_def; -~?P == ?P --> False -- val prems = goal Int_Rule.thy "(P ==> False) ==> ~P"; -Level 0 -~P - 1. ~P -- by (rewrite_goals_tac [not_def]); -Level 1 -~P - 1. P --> False -- by (resolve_tac [impI] 1); -Level 2 -~P - 1. P ==> False -- by (resolve_tac prems 1); -Level 3 -~P - 1. P ==> P -- by (assume_tac 1); -Level 4 -~P -No subgoals! -- val notI = prth(result()); -(?P ==> False) ==> ~?P -val notI = # : thm - -- val prems = goalw Int_Rule.thy [not_def] -= "(P ==> False) ==> ~P"; -Level 0 -~P - 1. P --> False - - -(** notE **) - -val [major,minor] = goal Int_Rule.thy "[| ~P; P |] ==> R"; -by (resolve_tac [FalseE] 1); -by (resolve_tac [mp] 1); -prth (rewrite_rule [not_def] major); -by (resolve_tac [it] 1); -by (resolve_tac [minor] 1); -val notE = prth(result()); - -val [major,minor] = goalw Int_Rule.thy [not_def] - "[| ~P; P |] ==> R"; -prth (minor RS (major RS mp RS FalseE)); -by (resolve_tac [it] 1); - - -val prems = goalw Int_Rule.thy [not_def] - "[| ~P; P |] ==> R"; -prths prems; -by (resolve_tac [FalseE] 1); -by (resolve_tac [mp] 1); -by (resolve_tac prems 1); -by (resolve_tac prems 1); -val notE = prth(result()); - - -- val [major,minor] = goal Int_Rule.thy "[| ~P; P |] ==> R"; -Level 0 -R - 1. R -val major = # : thm -val minor = # : thm -- by (resolve_tac [FalseE] 1); -Level 1 -R - 1. False -- by (resolve_tac [mp] 1); -Level 2 -R - 1. ?P1 --> False - 2. ?P1 -- prth (rewrite_rule [not_def] major); -P --> False [~P] -- by (resolve_tac [it] 1); -Level 3 -R - 1. P -- by (resolve_tac [minor] 1); -Level 4 -R -No subgoals! -- val notE = prth(result()); -[| ~?P; ?P |] ==> ?R -val notE = # : thm -- val [major,minor] = goalw Int_Rule.thy [not_def] -= "[| ~P; P |] ==> R"; -Level 0 -R - 1. R -val major = # : thm -val minor = # : thm -- prth (minor RS (major RS mp RS FalseE)); -?P [P, ~P] -- by (resolve_tac [it] 1); -Level 1 -R -No subgoals! - - - - -- goal Int_Rule.thy "[| ~P; P |] ==> R"; -Level 0 -R - 1. R -- prths (map (rewrite_rule [not_def]) it); -P --> False [~P] - -P [P] - -- val prems = goalw Int_Rule.thy [not_def] -= "[| ~P; P |] ==> R"; -Level 0 -R - 1. R -val prems = # : thm list -- prths prems; -P --> False [~P] - -P [P] - -- by (resolve_tac [mp RS FalseE] 1); -Level 1 -R - 1. ?P1 --> False - 2. ?P1 -- by (resolve_tac prems 1); -Level 2 -R - 1. P -- by (resolve_tac prems 1); -Level 3 -R -No subgoals! -- val notE = prth(result()); -[| ~?P; ?P |] ==> ?R -val notE = # : thm diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/document/advanced.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc-src/Intro/document/advanced.tex Mon Aug 27 20:50:10 2012 +0200 @@ -0,0 +1,1249 @@ +\part{Advanced Methods} +Before continuing, it might be wise to try some of your own examples in +Isabelle, reinforcing your knowledge of the basic functions. + +Look through {\em Isabelle's Object-Logics\/} and try proving some +simple theorems. You probably should begin with first-order logic +(\texttt{FOL} or~\texttt{LK}). Try working some of the examples provided, +and others from the literature. Set theory~(\texttt{ZF}) and +Constructive Type Theory~(\texttt{CTT}) form a richer world for +mathematical reasoning and, again, many examples are in the +literature. Higher-order logic~(\texttt{HOL}) is Isabelle's most +elaborate logic. Its types and functions are identified with those of +the meta-logic. + +Choose a logic that you already understand. Isabelle is a proof +tool, not a teaching tool; if you do not know how to do a particular proof +on paper, then you certainly will not be able to do it on the machine. +Even experienced users plan large proofs on paper. + +We have covered only the bare essentials of Isabelle, but enough to perform +substantial proofs. By occasionally dipping into the {\em Reference +Manual}, you can learn additional tactics, subgoal commands and tacticals. + + +\section{Deriving rules in Isabelle} +\index{rules!derived} +A mathematical development goes through a progression of stages. Each +stage defines some concepts and derives rules about them. We shall see how +to derive rules, perhaps involving definitions, using Isabelle. The +following section will explain how to declare types, constants, rules and +definitions. + + +\subsection{Deriving a rule using tactics and meta-level assumptions} +\label{deriving-example} +\index{examples!of deriving rules}\index{assumptions!of main goal} + +The subgoal module supports the derivation of rules, as discussed in +\S\ref{deriving}. When the \ttindex{Goal} command is supplied a formula of +the form $\List{\theta@1; \ldots; \theta@k} \Imp \phi$, there are two +possibilities: +\begin{itemize} +\item If all of the premises $\theta@1$, \ldots, $\theta@k$ are simple + formulae{} (they do not involve the meta-connectives $\Forall$ or + $\Imp$) then the command sets the goal to be + $\List{\theta@1; \ldots; \theta@k} \Imp \phi$ and returns the empty list. +\item If one or more premises involves the meta-connectives $\Forall$ or + $\Imp$, then the command sets the goal to be $\phi$ and returns a list + consisting of the theorems ${\theta@i\;[\theta@i]}$, for $i=1$, \ldots,~$k$. + These meta-level assumptions are also recorded internally, allowing + \texttt{result} (which is called by \texttt{qed}) to discharge them in the + original order. +\end{itemize} +Rules that discharge assumptions or introduce eigenvariables have complex +premises, and the second case applies. In this section, many of the +theorems are subject to meta-level assumptions, so we make them visible by by setting the +\ttindex{show_hyps} flag: +\begin{ttbox} +set show_hyps; +{\out val it = true : bool} +\end{ttbox} + +Now, we are ready to derive $\conj$ elimination. Until now, calling \texttt{Goal} has +returned an empty list, which we have ignored. In this example, the list +contains the two premises of the rule, since one of them involves the $\Imp$ +connective. We bind them to the \ML\ identifiers \texttt{major} and {\tt + minor}:\footnote{Some ML compilers will print a message such as {\em binding + not exhaustive}. This warns that \texttt{Goal} must return a 2-element + list. Otherwise, the pattern-match will fail; ML will raise exception + \xdx{Match}.} +\begin{ttbox} +val [major,minor] = Goal + "[| P&Q; [| P; Q |] ==> R |] ==> R"; +{\out Level 0} +{\out R} +{\out 1. R} +{\out val major = "P & Q [P & Q]" : thm} +{\out val minor = "[| P; Q |] ==> R [[| P; Q |] ==> R]" : thm} +\end{ttbox} +Look at the minor premise, recalling that meta-level assumptions are +shown in brackets. Using \texttt{minor}, we reduce $R$ to the subgoals +$P$ and~$Q$: +\begin{ttbox} +by (resolve_tac [minor] 1); +{\out Level 1} +{\out R} +{\out 1. P} +{\out 2. Q} +\end{ttbox} +Deviating from~\S\ref{deriving}, we apply $({\conj}E1)$ forwards from the +assumption $P\conj Q$ to obtain the theorem~$P\;[P\conj Q]$. +\begin{ttbox} +major RS conjunct1; +{\out val it = "P [P & Q]" : thm} +\ttbreak +by (resolve_tac [major RS conjunct1] 1); +{\out Level 2} +{\out R} +{\out 1. Q} +\end{ttbox} +Similarly, we solve the subgoal involving~$Q$. +\begin{ttbox} +major RS conjunct2; +{\out val it = "Q [P & Q]" : thm} +by (resolve_tac [major RS conjunct2] 1); +{\out Level 3} +{\out R} +{\out No subgoals!} +\end{ttbox} +Calling \ttindex{topthm} returns the current proof state as a theorem. +Note that it contains assumptions. Calling \ttindex{qed} discharges +the assumptions --- both occurrences of $P\conj Q$ are discharged as +one --- and makes the variables schematic. +\begin{ttbox} +topthm(); +{\out val it = "R [P & Q, P & Q, [| P; Q |] ==> R]" : thm} +qed "conjE"; +{\out val conjE = "[| ?P & ?Q; [| ?P; ?Q |] ==> ?R |] ==> ?R" : thm} +\end{ttbox} + + +\subsection{Definitions and derived rules} \label{definitions} +\index{rules!derived}\index{definitions!and derived rules|(} + +Definitions are expressed as meta-level equalities. Let us define negation +and the if-and-only-if connective: +\begin{eqnarray*} + \neg \Var{P} & \equiv & \Var{P}\imp\bot \\ + \Var{P}\bimp \Var{Q} & \equiv & + (\Var{P}\imp \Var{Q}) \conj (\Var{Q}\imp \Var{P}) +\end{eqnarray*} +\index{meta-rewriting}% +Isabelle permits {\bf meta-level rewriting} using definitions such as +these. {\bf Unfolding} replaces every instance +of $\neg \Var{P}$ by the corresponding instance of ${\Var{P}\imp\bot}$. For +example, $\forall x.\neg (P(x)\conj \neg R(x,0))$ unfolds to +\[ \forall x.(P(x)\conj R(x,0)\imp\bot)\imp\bot. \] +{\bf Folding} a definition replaces occurrences of the right-hand side by +the left. The occurrences need not be free in the entire formula. + +When you define new concepts, you should derive rules asserting their +abstract properties, and then forget their definitions. This supports +modularity: if you later change the definitions without affecting their +abstract properties, then most of your proofs will carry through without +change. Indiscriminate unfolding makes a subgoal grow exponentially, +becoming unreadable. + +Taking this point of view, Isabelle does not unfold definitions +automatically during proofs. Rewriting must be explicit and selective. +Isabelle provides tactics and meta-rules for rewriting, and a version of +the \texttt{Goal} command that unfolds the conclusion and premises of the rule +being derived. + +For example, the intuitionistic definition of negation given above may seem +peculiar. Using Isabelle, we shall derive pleasanter negation rules: +\[ \infer[({\neg}I)]{\neg P}{\infer*{\bot}{[P]}} \qquad + \infer[({\neg}E)]{Q}{\neg P & P} \] +This requires proving the following meta-formulae: +$$ (P\Imp\bot) \Imp \neg P \eqno(\neg I) $$ +$$ \List{\neg P; P} \Imp Q. \eqno(\neg E) $$ + + +\subsection{Deriving the $\neg$ introduction rule} +To derive $(\neg I)$, we may call \texttt{Goal} with the appropriate formula. +Again, the rule's premises involve a meta-connective, and \texttt{Goal} +returns one-element list. We bind this list to the \ML\ identifier \texttt{prems}. +\begin{ttbox} +val prems = Goal "(P ==> False) ==> ~P"; +{\out Level 0} +{\out ~P} +{\out 1. ~P} +{\out val prems = ["P ==> False [P ==> False]"] : thm list} +\end{ttbox} +Calling \ttindex{rewrite_goals_tac} with \tdx{not_def}, which is the +definition of negation, unfolds that definition in the subgoals. It leaves +the main goal alone. +\begin{ttbox} +not_def; +{\out val it = "~?P == ?P --> False" : thm} +by (rewrite_goals_tac [not_def]); +{\out Level 1} +{\out ~P} +{\out 1. P --> False} +\end{ttbox} +Using \tdx{impI} and the premise, we reduce subgoal~1 to a triviality: +\begin{ttbox} +by (resolve_tac [impI] 1); +{\out Level 2} +{\out ~P} +{\out 1. P ==> False} +\ttbreak +by (resolve_tac prems 1); +{\out Level 3} +{\out ~P} +{\out 1. P ==> P} +\end{ttbox} +The rest of the proof is routine. Note the form of the final result. +\begin{ttbox} +by (assume_tac 1); +{\out Level 4} +{\out ~P} +{\out No subgoals!} +\ttbreak +qed "notI"; +{\out val notI = "(?P ==> False) ==> ~?P" : thm} +\end{ttbox} +\indexbold{*notI theorem} + +There is a simpler way of conducting this proof. The \ttindex{Goalw} +command starts a backward proof, as does \texttt{Goal}, but it also +unfolds definitions. Thus there is no need to call +\ttindex{rewrite_goals_tac}: +\begin{ttbox} +val prems = Goalw [not_def] + "(P ==> False) ==> ~P"; +{\out Level 0} +{\out ~P} +{\out 1. P --> False} +{\out val prems = ["P ==> False [P ==> False]"] : thm list} +\end{ttbox} + + +\subsection{Deriving the $\neg$ elimination rule} +Let us derive the rule $(\neg E)$. The proof follows that of~\texttt{conjE} +above, with an additional step to unfold negation in the major premise. +The \texttt{Goalw} command is best for this: it unfolds definitions not only +in the conclusion but the premises. +\begin{ttbox} +Goalw [not_def] "[| ~P; P |] ==> R"; +{\out Level 0} +{\out [| ~ P; P |] ==> R} +{\out 1. [| P --> False; P |] ==> R} +\end{ttbox} +As the first step, we apply \tdx{FalseE}: +\begin{ttbox} +by (resolve_tac [FalseE] 1); +{\out Level 1} +{\out [| ~ P; P |] ==> R} +{\out 1. [| P --> False; P |] ==> False} +\end{ttbox} +% +Everything follows from falsity. And we can prove falsity using the +premises and Modus Ponens: +\begin{ttbox} +by (eresolve_tac [mp] 1); +{\out Level 2} +{\out [| ~ P; P |] ==> R} +{\out 1. P ==> P} +\ttbreak +by (assume_tac 1); +{\out Level 3} +{\out [| ~ P; P |] ==> R} +{\out No subgoals!} +\ttbreak +qed "notE"; +{\out val notE = "[| ~?P; ?P |] ==> ?R" : thm} +\end{ttbox} + + +\medskip +\texttt{Goalw} unfolds definitions in the premises even when it has to return +them as a list. Another way of unfolding definitions in a theorem is by +applying the function \ttindex{rewrite_rule}. + +\index{definitions!and derived rules|)} + + +\section{Defining theories}\label{sec:defining-theories} +\index{theories!defining|(} + +Isabelle makes no distinction between simple extensions of a logic --- +like specifying a type~$bool$ with constants~$true$ and~$false$ --- +and defining an entire logic. A theory definition has a form like +\begin{ttbox} +\(T\) = \(S@1\) + \(\cdots\) + \(S@n\) + +classes {\it class declarations} +default {\it sort} +types {\it type declarations and synonyms} +arities {\it type arity declarations} +consts {\it constant declarations} +syntax {\it syntactic constant declarations} +translations {\it ast translation rules} +defs {\it meta-logical definitions} +rules {\it rule declarations} +end +ML {\it ML code} +\end{ttbox} +This declares the theory $T$ to extend the existing theories +$S@1$,~\ldots,~$S@n$. It may introduce new classes, types, arities +(of existing types), constants and rules; it can specify the default +sort for type variables. A constant declaration can specify an +associated concrete syntax. The translations section specifies +rewrite rules on abstract syntax trees, handling notations and +abbreviations. \index{*ML section} The \texttt{ML} section may contain +code to perform arbitrary syntactic transformations. The main +declaration forms are discussed below. There are some more sections +not presented here, the full syntax can be found in +\iflabelundefined{app:TheorySyntax}{an appendix of the {\it Reference + Manual}}{App.\ts\ref{app:TheorySyntax}}. Also note that +object-logics may add further theory sections, for example +\texttt{typedef}, \texttt{datatype} in HOL. + +All the declaration parts can be omitted or repeated and may appear in +any order, except that the {\ML} section must be last (after the {\tt + end} keyword). In the simplest case, $T$ is just the union of +$S@1$,~\ldots,~$S@n$. New theories always extend one or more other +theories, inheriting their types, constants, syntax, etc. The theory +\thydx{Pure} contains nothing but Isabelle's meta-logic. The variant +\thydx{CPure} offers the more usual higher-order function application +syntax $t\,u@1\ldots\,u@n$ instead of $t(u@1,\ldots,u@n)$ in Pure. + +Each theory definition must reside in a separate file, whose name is +the theory's with {\tt.thy} appended. Calling +\ttindexbold{use_thy}~{\tt"{\it T\/}"} reads the definition from {\it + T}{\tt.thy}, writes a corresponding file of {\ML} code {\tt.{\it + T}.thy.ML}, reads the latter file, and deletes it if no errors +occurred. This declares the {\ML} structure~$T$, which contains a +component \texttt{thy} denoting the new theory, a component for each +rule, and everything declared in {\it ML code}. + +Errors may arise during the translation to {\ML} (say, a misspelled +keyword) or during creation of the new theory (say, a type error in a +rule). But if all goes well, \texttt{use_thy} will finally read the file +{\it T}{\tt.ML} (if it exists). This file typically contains proofs +that refer to the components of~$T$. The structure is automatically +opened, so its components may be referred to by unqualified names, +e.g.\ just \texttt{thy} instead of $T$\texttt{.thy}. + +\ttindexbold{use_thy} automatically loads a theory's parents before +loading the theory itself. When a theory file is modified, many +theories may have to be reloaded. Isabelle records the modification +times and dependencies of theory files. See +\iflabelundefined{sec:reloading-theories}{the {\em Reference Manual\/}}% + {\S\ref{sec:reloading-theories}} +for more details. + + +\subsection{Declaring constants, definitions and rules} +\indexbold{constants!declaring}\index{rules!declaring} + +Most theories simply declare constants, definitions and rules. The {\bf + constant declaration part} has the form +\begin{ttbox} +consts \(c@1\) :: \(\tau@1\) + \vdots + \(c@n\) :: \(\tau@n\) +\end{ttbox} +where $c@1$, \ldots, $c@n$ are constants and $\tau@1$, \ldots, $\tau@n$ are +types. The types must be enclosed in quotation marks if they contain +user-declared infix type constructors like \texttt{*}. Each +constant must be enclosed in quotation marks unless it is a valid +identifier. To declare $c@1$, \ldots, $c@n$ as constants of type $\tau$, +the $n$ declarations may be abbreviated to a single line: +\begin{ttbox} + \(c@1\), \ldots, \(c@n\) :: \(\tau\) +\end{ttbox} +The {\bf rule declaration part} has the form +\begin{ttbox} +rules \(id@1\) "\(rule@1\)" + \vdots + \(id@n\) "\(rule@n\)" +\end{ttbox} +where $id@1$, \ldots, $id@n$ are \ML{} identifiers and $rule@1$, \ldots, +$rule@n$ are expressions of type~$prop$. Each rule {\em must\/} be +enclosed in quotation marks. Rules are simply axioms; they are +called \emph{rules} because they are mainly used to specify the inference +rules when defining a new logic. + +\indexbold{definitions} The {\bf definition part} is similar, but with +the keyword \texttt{defs} instead of \texttt{rules}. {\bf Definitions} are +rules of the form $s \equiv t$, and should serve only as +abbreviations. The simplest form of a definition is $f \equiv t$, +where $f$ is a constant. Also allowed are $\eta$-equivalent forms of +this, where the arguments of~$f$ appear applied on the left-hand side +of the equation instead of abstracted on the right-hand side. + +Isabelle checks for common errors in definitions, such as extra +variables on the right-hand side and cyclic dependencies, that could +least to inconsistency. It is still essential to take care: +theorems proved on the basis of incorrect definitions are useless, +your system can be consistent and yet still wrong. + +\index{examples!of theories} This example theory extends first-order +logic by declaring and defining two constants, {\em nand} and {\em + xor}: +\begin{ttbox} +Gate = FOL + +consts nand,xor :: [o,o] => o +defs nand_def "nand(P,Q) == ~(P & Q)" + xor_def "xor(P,Q) == P & ~Q | ~P & Q" +end +\end{ttbox} + +Declaring and defining constants can be combined: +\begin{ttbox} +Gate = FOL + +constdefs nand :: [o,o] => o + "nand(P,Q) == ~(P & Q)" + xor :: [o,o] => o + "xor(P,Q) == P & ~Q | ~P & Q" +end +\end{ttbox} +\texttt{constdefs} generates the names \texttt{nand_def} and \texttt{xor_def} +automatically, which is why it is restricted to alphanumeric identifiers. In +general it has the form +\begin{ttbox} +constdefs \(id@1\) :: \(\tau@1\) + "\(id@1 \equiv \dots\)" + \vdots + \(id@n\) :: \(\tau@n\) + "\(id@n \equiv \dots\)" +\end{ttbox} + + +\begin{warn} +A common mistake when writing definitions is to introduce extra free variables +on the right-hand side as in the following fictitious definition: +\begin{ttbox} +defs prime_def "prime(p) == (m divides p) --> (m=1 | m=p)" +\end{ttbox} +Isabelle rejects this ``definition'' because of the extra \texttt{m} on the +right-hand side, which would introduce an inconsistency. What you should have +written is +\begin{ttbox} +defs prime_def "prime(p) == ALL m. (m divides p) --> (m=1 | m=p)" +\end{ttbox} +\end{warn} + +\subsection{Declaring type constructors} +\indexbold{types!declaring}\indexbold{arities!declaring} +% +Types are composed of type variables and {\bf type constructors}. Each +type constructor takes a fixed number of arguments. They are declared +with an \ML-like syntax. If $list$ takes one type argument, $tree$ takes +two arguments and $nat$ takes no arguments, then these type constructors +can be declared by +\begin{ttbox} +types 'a list + ('a,'b) tree + nat +\end{ttbox} + +The {\bf type declaration part} has the general form +\begin{ttbox} +types \(tids@1\) \(id@1\) + \vdots + \(tids@n\) \(id@n\) +\end{ttbox} +where $id@1$, \ldots, $id@n$ are identifiers and $tids@1$, \ldots, $tids@n$ +are type argument lists as shown in the example above. It declares each +$id@i$ as a type constructor with the specified number of argument places. + +The {\bf arity declaration part} has the form +\begin{ttbox} +arities \(tycon@1\) :: \(arity@1\) + \vdots + \(tycon@n\) :: \(arity@n\) +\end{ttbox} +where $tycon@1$, \ldots, $tycon@n$ are identifiers and $arity@1$, \ldots, +$arity@n$ are arities. Arity declarations add arities to existing +types; they do not declare the types themselves. +In the simplest case, for an 0-place type constructor, an arity is simply +the type's class. Let us declare a type~$bool$ of class $term$, with +constants $tt$ and~$ff$. (In first-order logic, booleans are +distinct from formulae, which have type $o::logic$.) +\index{examples!of theories} +\begin{ttbox} +Bool = FOL + +types bool +arities bool :: term +consts tt,ff :: bool +end +\end{ttbox} +A $k$-place type constructor may have arities of the form +$(s@1,\ldots,s@k)c$, where $s@1,\ldots,s@n$ are sorts and $c$ is a class. +Each sort specifies a type argument; it has the form $\{c@1,\ldots,c@m\}$, +where $c@1$, \dots,~$c@m$ are classes. Mostly we deal with singleton +sorts, and may abbreviate them by dropping the braces. The arity +$(term)term$ is short for $(\{term\})term$. Recall the discussion in +\S\ref{polymorphic}. + +A type constructor may be overloaded (subject to certain conditions) by +appearing in several arity declarations. For instance, the function type +constructor~$fun$ has the arity $(logic,logic)logic$; in higher-order +logic, it is declared also to have arity $(term,term)term$. + +Theory \texttt{List} declares the 1-place type constructor $list$, gives +it the arity $(term)term$, and declares constants $Nil$ and $Cons$ with +polymorphic types:% +\footnote{In the \texttt{consts} part, type variable {\tt'a} has the default + sort, which is \texttt{term}. See the {\em Reference Manual\/} +\iflabelundefined{sec:ref-defining-theories}{}% +{(\S\ref{sec:ref-defining-theories})} for more information.} +\index{examples!of theories} +\begin{ttbox} +List = FOL + +types 'a list +arities list :: (term)term +consts Nil :: 'a list + Cons :: ['a, 'a list] => 'a list +end +\end{ttbox} +Multiple arity declarations may be abbreviated to a single line: +\begin{ttbox} +arities \(tycon@1\), \ldots, \(tycon@n\) :: \(arity\) +\end{ttbox} + +%\begin{warn} +%Arity declarations resemble constant declarations, but there are {\it no\/} +%quotation marks! Types and rules must be quoted because the theory +%translator passes them verbatim to the {\ML} output file. +%\end{warn} + +\subsection{Type synonyms}\indexbold{type synonyms} +Isabelle supports {\bf type synonyms} ({\bf abbreviations}) which are similar +to those found in \ML. Such synonyms are defined in the type declaration part +and are fairly self explanatory: +\begin{ttbox} +types gate = [o,o] => o + 'a pred = 'a => o + ('a,'b)nuf = 'b => 'a +\end{ttbox} +Type declarations and synonyms can be mixed arbitrarily: +\begin{ttbox} +types nat + 'a stream = nat => 'a + signal = nat stream + 'a list +\end{ttbox} +A synonym is merely an abbreviation for some existing type expression. +Hence synonyms may not be recursive! Internally all synonyms are +fully expanded. As a consequence Isabelle output never contains +synonyms. Their main purpose is to improve the readability of theory +definitions. Synonyms can be used just like any other type: +\begin{ttbox} +consts and,or :: gate + negate :: signal => signal +\end{ttbox} + +\subsection{Infix and mixfix operators} +\index{infixes}\index{examples!of theories} + +Infix or mixfix syntax may be attached to constants. Consider the +following theory: +\begin{ttbox} +Gate2 = FOL + +consts "~&" :: [o,o] => o (infixl 35) + "#" :: [o,o] => o (infixl 30) +defs nand_def "P ~& Q == ~(P & Q)" + xor_def "P # Q == P & ~Q | ~P & Q" +end +\end{ttbox} +The constant declaration part declares two left-associating infix operators +with their priorities, or precedences; they are $\nand$ of priority~35 and +$\xor$ of priority~30. Hence $P \xor Q \xor R$ is parsed as $(P\xor Q) +\xor R$ and $P \xor Q \nand R$ as $P \xor (Q \nand R)$. Note the quotation +marks in \verb|"~&"| and \verb|"#"|. + +The constants \hbox{\verb|op ~&|} and \hbox{\verb|op #|} are declared +automatically, just as in \ML. Hence you may write propositions like +\verb|op #(True) == op ~&(True)|, which asserts that the functions $\lambda +Q.True \xor Q$ and $\lambda Q.True \nand Q$ are identical. + +\medskip Infix syntax and constant names may be also specified +independently. For example, consider this version of $\nand$: +\begin{ttbox} +consts nand :: [o,o] => o (infixl "~&" 35) +\end{ttbox} + +\bigskip\index{mixfix declarations} +{\bf Mixfix} operators may have arbitrary context-free syntaxes. Let us +add a line to the constant declaration part: +\begin{ttbox} + If :: [o,o,o] => o ("if _ then _ else _") +\end{ttbox} +This declares a constant $If$ of type $[o,o,o] \To o$ with concrete syntax {\tt + if~$P$ then~$Q$ else~$R$} as well as \texttt{If($P$,$Q$,$R$)}. Underscores +denote argument positions. + +The declaration above does not allow the \texttt{if}-\texttt{then}-{\tt + else} construct to be printed split across several lines, even if it +is too long to fit on one line. Pretty-printing information can be +added to specify the layout of mixfix operators. For details, see +\iflabelundefined{Defining-Logics}% + {the {\it Reference Manual}, chapter `Defining Logics'}% + {Chap.\ts\ref{Defining-Logics}}. + +Mixfix declarations can be annotated with priorities, just like +infixes. The example above is just a shorthand for +\begin{ttbox} + If :: [o,o,o] => o ("if _ then _ else _" [0,0,0] 1000) +\end{ttbox} +The numeric components determine priorities. The list of integers +defines, for each argument position, the minimal priority an expression +at that position must have. The final integer is the priority of the +construct itself. In the example above, any argument expression is +acceptable because priorities are non-negative, and conditionals may +appear everywhere because 1000 is the highest priority. On the other +hand, the declaration +\begin{ttbox} + If :: [o,o,o] => o ("if _ then _ else _" [100,0,0] 99) +\end{ttbox} +defines concrete syntax for a conditional whose first argument cannot have +the form \texttt{if~$P$ then~$Q$ else~$R$} because it must have a priority +of at least~100. We may of course write +\begin{quote}\tt +if (if $P$ then $Q$ else $R$) then $S$ else $T$ +\end{quote} +because expressions in parentheses have maximal priority. + +Binary type constructors, like products and sums, may also be declared as +infixes. The type declaration below introduces a type constructor~$*$ with +infix notation $\alpha*\beta$, together with the mixfix notation +${<}\_,\_{>}$ for pairs. We also see a rule declaration part. +\index{examples!of theories}\index{mixfix declarations} +\begin{ttbox} +Prod = FOL + +types ('a,'b) "*" (infixl 20) +arities "*" :: (term,term)term +consts fst :: "'a * 'b => 'a" + snd :: "'a * 'b => 'b" + Pair :: "['a,'b] => 'a * 'b" ("(1<_,/_>)") +rules fst "fst() = a" + snd "snd() = b" +end +\end{ttbox} + +\begin{warn} + The name of the type constructor is~\texttt{*} and not \texttt{op~*}, as + it would be in the case of an infix constant. Only infix type + constructors can have symbolic names like~\texttt{*}. General mixfix + syntax for types may be introduced via appropriate \texttt{syntax} + declarations. +\end{warn} + + +\subsection{Overloading} +\index{overloading}\index{examples!of theories} +The {\bf class declaration part} has the form +\begin{ttbox} +classes \(id@1\) < \(c@1\) + \vdots + \(id@n\) < \(c@n\) +\end{ttbox} +where $id@1$, \ldots, $id@n$ are identifiers and $c@1$, \ldots, $c@n$ are +existing classes. It declares each $id@i$ as a new class, a subclass +of~$c@i$. In the general case, an identifier may be declared to be a +subclass of $k$ existing classes: +\begin{ttbox} + \(id\) < \(c@1\), \ldots, \(c@k\) +\end{ttbox} +Type classes allow constants to be overloaded. As suggested in +\S\ref{polymorphic}, let us define the class $arith$ of arithmetic +types with the constants ${+} :: [\alpha,\alpha]\To \alpha$ and $0,1 {::} +\alpha$, for $\alpha{::}arith$. We introduce $arith$ as a subclass of +$term$ and add the three polymorphic constants of this class. +\index{examples!of theories}\index{constants!overloaded} +\begin{ttbox} +Arith = FOL + +classes arith < term +consts "0" :: 'a::arith ("0") + "1" :: 'a::arith ("1") + "+" :: ['a::arith,'a] => 'a (infixl 60) +end +\end{ttbox} +No rules are declared for these constants: we merely introduce their +names without specifying properties. On the other hand, classes +with rules make it possible to prove {\bf generic} theorems. Such +theorems hold for all instances, all types in that class. + +We can now obtain distinct versions of the constants of $arith$ by +declaring certain types to be of class $arith$. For example, let us +declare the 0-place type constructors $bool$ and $nat$: +\index{examples!of theories} +\begin{ttbox} +BoolNat = Arith + +types bool nat +arities bool, nat :: arith +consts Suc :: nat=>nat +\ttbreak +rules add0 "0 + n = n::nat" + addS "Suc(m)+n = Suc(m+n)" + nat1 "1 = Suc(0)" + or0l "0 + x = x::bool" + or0r "x + 0 = x::bool" + or1l "1 + x = 1::bool" + or1r "x + 1 = 1::bool" +end +\end{ttbox} +Because $nat$ and $bool$ have class $arith$, we can use $0$, $1$ and $+$ at +either type. The type constraints in the axioms are vital. Without +constraints, the $x$ in $1+x = 1$ (axiom \texttt{or1l}) +would have type $\alpha{::}arith$ +and the axiom would hold for any type of class $arith$. This would +collapse $nat$ to a trivial type: +\[ Suc(1) = Suc(0+1) = Suc(0)+1 = 1+1 = 1! \] + + +\section{Theory example: the natural numbers} + +We shall now work through a small example of formalized mathematics +demonstrating many of the theory extension features. + + +\subsection{Extending first-order logic with the natural numbers} +\index{examples!of theories} + +Section\ts\ref{sec:logical-syntax} has formalized a first-order logic, +including a type~$nat$ and the constants $0::nat$ and $Suc::nat\To nat$. +Let us introduce the Peano axioms for mathematical induction and the +freeness of $0$ and~$Suc$:\index{axioms!Peano} +\[ \vcenter{\infer[(induct)]{P[n/x]}{P[0/x] & \infer*{P[Suc(x)/x]}{[P]}}} + \qquad \parbox{4.5cm}{provided $x$ is not free in any assumption except~$P$} +\] +\[ \infer[(Suc\_inject)]{m=n}{Suc(m)=Suc(n)} \qquad + \infer[(Suc\_neq\_0)]{R}{Suc(m)=0} +\] +Mathematical induction asserts that $P(n)$ is true, for any $n::nat$, +provided $P(0)$ holds and that $P(x)$ implies $P(Suc(x))$ for all~$x$. +Some authors express the induction step as $\forall x. P(x)\imp P(Suc(x))$. +To avoid making induction require the presence of other connectives, we +formalize mathematical induction as +$$ \List{P(0); \Forall x. P(x)\Imp P(Suc(x))} \Imp P(n). \eqno(induct) $$ + +\noindent +Similarly, to avoid expressing the other rules using~$\forall$, $\imp$ +and~$\neg$, we take advantage of the meta-logic;\footnote +{On the other hand, the axioms $Suc(m)=Suc(n) \bimp m=n$ +and $\neg(Suc(m)=0)$ are logically equivalent to those given, and work +better with Isabelle's simplifier.} +$(Suc\_neq\_0)$ is +an elimination rule for $Suc(m)=0$: +$$ Suc(m)=Suc(n) \Imp m=n \eqno(Suc\_inject) $$ +$$ Suc(m)=0 \Imp R \eqno(Suc\_neq\_0) $$ + +\noindent +We shall also define a primitive recursion operator, $rec$. Traditionally, +primitive recursion takes a natural number~$a$ and a 2-place function~$f$, +and obeys the equations +\begin{eqnarray*} + rec(0,a,f) & = & a \\ + rec(Suc(m),a,f) & = & f(m, rec(m,a,f)) +\end{eqnarray*} +Addition, defined by $m+n \equiv rec(m,n,\lambda x\,y.Suc(y))$, +should satisfy +\begin{eqnarray*} + 0+n & = & n \\ + Suc(m)+n & = & Suc(m+n) +\end{eqnarray*} +Primitive recursion appears to pose difficulties: first-order logic has no +function-valued expressions. We again take advantage of the meta-logic, +which does have functions. We also generalise primitive recursion to be +polymorphic over any type of class~$term$, and declare the addition +function: +\begin{eqnarray*} + rec & :: & [nat, \alpha{::}term, [nat,\alpha]\To\alpha] \To\alpha \\ + + & :: & [nat,nat]\To nat +\end{eqnarray*} + + +\subsection{Declaring the theory to Isabelle} +\index{examples!of theories} +Let us create the theory \thydx{Nat} starting from theory~\verb$FOL$, +which contains only classical logic with no natural numbers. We declare +the 0-place type constructor $nat$ and the associated constants. Note that +the constant~0 requires a mixfix annotation because~0 is not a legal +identifier, and could not otherwise be written in terms: +\begin{ttbox}\index{mixfix declarations} +Nat = FOL + +types nat +arities nat :: term +consts "0" :: nat ("0") + Suc :: nat=>nat + rec :: [nat, 'a, [nat,'a]=>'a] => 'a + "+" :: [nat, nat] => nat (infixl 60) +rules Suc_inject "Suc(m)=Suc(n) ==> m=n" + Suc_neq_0 "Suc(m)=0 ==> R" + induct "[| P(0); !!x. P(x) ==> P(Suc(x)) |] ==> P(n)" + rec_0 "rec(0,a,f) = a" + rec_Suc "rec(Suc(m), a, f) = f(m, rec(m,a,f))" + add_def "m+n == rec(m, n, \%x y. Suc(y))" +end +\end{ttbox} +In axiom \texttt{add_def}, recall that \verb|%| stands for~$\lambda$. +Loading this theory file creates the \ML\ structure \texttt{Nat}, which +contains the theory and axioms. + +\subsection{Proving some recursion equations} +Theory \texttt{FOL/ex/Nat} contains proofs involving this theory of the +natural numbers. As a trivial example, let us derive recursion equations +for \verb$+$. Here is the zero case: +\begin{ttbox} +Goalw [add_def] "0+n = n"; +{\out Level 0} +{\out 0 + n = n} +{\out 1. rec(0,n,\%x y. Suc(y)) = n} +\ttbreak +by (resolve_tac [rec_0] 1); +{\out Level 1} +{\out 0 + n = n} +{\out No subgoals!} +qed "add_0"; +\end{ttbox} +And here is the successor case: +\begin{ttbox} +Goalw [add_def] "Suc(m)+n = Suc(m+n)"; +{\out Level 0} +{\out Suc(m) + n = Suc(m + n)} +{\out 1. rec(Suc(m),n,\%x y. Suc(y)) = Suc(rec(m,n,\%x y. Suc(y)))} +\ttbreak +by (resolve_tac [rec_Suc] 1); +{\out Level 1} +{\out Suc(m) + n = Suc(m + n)} +{\out No subgoals!} +qed "add_Suc"; +\end{ttbox} +The induction rule raises some complications, which are discussed next. +\index{theories!defining|)} + + +\section{Refinement with explicit instantiation} +\index{resolution!with instantiation} +\index{instantiation|(} + +In order to employ mathematical induction, we need to refine a subgoal by +the rule~$(induct)$. The conclusion of this rule is $\Var{P}(\Var{n})$, +which is highly ambiguous in higher-order unification. It matches every +way that a formula can be regarded as depending on a subterm of type~$nat$. +To get round this problem, we could make the induction rule conclude +$\forall n.\Var{P}(n)$ --- but putting a subgoal into this form requires +refinement by~$(\forall E)$, which is equally hard! + +The tactic \texttt{res_inst_tac}, like \texttt{resolve_tac}, refines a subgoal by +a rule. But it also accepts explicit instantiations for the rule's +schematic variables. +\begin{description} +\item[\ttindex{res_inst_tac} {\it insts} {\it thm} {\it i}] +instantiates the rule {\it thm} with the instantiations {\it insts}, and +then performs resolution on subgoal~$i$. + +\item[\ttindex{eres_inst_tac}] +and \ttindex{dres_inst_tac} are similar, but perform elim-resolution +and destruct-resolution, respectively. +\end{description} +The list {\it insts} consists of pairs $[(v@1,e@1), \ldots, (v@n,e@n)]$, +where $v@1$, \ldots, $v@n$ are names of schematic variables in the rule --- +with no leading question marks! --- and $e@1$, \ldots, $e@n$ are +expressions giving their instantiations. The expressions are type-checked +in the context of a particular subgoal: free variables receive the same +types as they have in the subgoal, and parameters may appear. Type +variable instantiations may appear in~{\it insts}, but they are seldom +required: \texttt{res_inst_tac} instantiates type variables automatically +whenever the type of~$e@i$ is an instance of the type of~$\Var{v@i}$. + +\subsection{A simple proof by induction} +\index{examples!of induction} +Let us prove that no natural number~$k$ equals its own successor. To +use~$(induct)$, we instantiate~$\Var{n}$ to~$k$; Isabelle finds a good +instantiation for~$\Var{P}$. +\begin{ttbox} +Goal "~ (Suc(k) = k)"; +{\out Level 0} +{\out Suc(k) ~= k} +{\out 1. Suc(k) ~= k} +\ttbreak +by (res_inst_tac [("n","k")] induct 1); +{\out Level 1} +{\out Suc(k) ~= k} +{\out 1. Suc(0) ~= 0} +{\out 2. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} +\end{ttbox} +We should check that Isabelle has correctly applied induction. Subgoal~1 +is the base case, with $k$ replaced by~0. Subgoal~2 is the inductive step, +with $k$ replaced by~$Suc(x)$ and with an induction hypothesis for~$x$. +The rest of the proof demonstrates~\tdx{notI}, \tdx{notE} and the +other rules of theory \texttt{Nat}. The base case holds by~\ttindex{Suc_neq_0}: +\begin{ttbox} +by (resolve_tac [notI] 1); +{\out Level 2} +{\out Suc(k) ~= k} +{\out 1. Suc(0) = 0 ==> False} +{\out 2. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} +\ttbreak +by (eresolve_tac [Suc_neq_0] 1); +{\out Level 3} +{\out Suc(k) ~= k} +{\out 1. !!x. Suc(x) ~= x ==> Suc(Suc(x)) ~= Suc(x)} +\end{ttbox} +The inductive step holds by the contrapositive of~\ttindex{Suc_inject}. +Negation rules transform the subgoal into that of proving $Suc(x)=x$ from +$Suc(Suc(x)) = Suc(x)$: +\begin{ttbox} +by (resolve_tac [notI] 1); +{\out Level 4} +{\out Suc(k) ~= k} +{\out 1. !!x. [| Suc(x) ~= x; Suc(Suc(x)) = Suc(x) |] ==> False} +\ttbreak +by (eresolve_tac [notE] 1); +{\out Level 5} +{\out Suc(k) ~= k} +{\out 1. !!x. Suc(Suc(x)) = Suc(x) ==> Suc(x) = x} +\ttbreak +by (eresolve_tac [Suc_inject] 1); +{\out Level 6} +{\out Suc(k) ~= k} +{\out No subgoals!} +\end{ttbox} + + +\subsection{An example of ambiguity in \texttt{resolve_tac}} +\index{examples!of induction}\index{unification!higher-order} +If you try the example above, you may observe that \texttt{res_inst_tac} is +not actually needed. Almost by chance, \ttindex{resolve_tac} finds the right +instantiation for~$(induct)$ to yield the desired next state. With more +complex formulae, our luck fails. +\begin{ttbox} +Goal "(k+m)+n = k+(m+n)"; +{\out Level 0} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + n = k + (m + n)} +\ttbreak +by (resolve_tac [induct] 1); +{\out Level 1} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + n = 0} +{\out 2. !!x. k + m + n = x ==> k + m + n = Suc(x)} +\end{ttbox} +This proof requires induction on~$k$. The occurrence of~0 in subgoal~1 +indicates that induction has been applied to the term~$k+(m+n)$; this +application is sound but will not lead to a proof here. Fortunately, +Isabelle can (lazily!) generate all the valid applications of induction. +The \ttindex{back} command causes backtracking to an alternative outcome of +the tactic. +\begin{ttbox} +back(); +{\out Level 1} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + n = k + 0} +{\out 2. !!x. k + m + n = k + x ==> k + m + n = k + Suc(x)} +\end{ttbox} +Now induction has been applied to~$m+n$. This is equally useless. Let us +call \ttindex{back} again. +\begin{ttbox} +back(); +{\out Level 1} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + 0 = k + (m + 0)} +{\out 2. !!x. k + m + x = k + (m + x) ==>} +{\out k + m + Suc(x) = k + (m + Suc(x))} +\end{ttbox} +Now induction has been applied to~$n$. What is the next alternative? +\begin{ttbox} +back(); +{\out Level 1} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + n = k + (m + 0)} +{\out 2. !!x. k + m + n = k + (m + x) ==> k + m + n = k + (m + Suc(x))} +\end{ttbox} +Inspecting subgoal~1 reveals that induction has been applied to just the +second occurrence of~$n$. This perfectly legitimate induction is useless +here. + +The main goal admits fourteen different applications of induction. The +number is exponential in the size of the formula. + +\subsection{Proving that addition is associative} +Let us invoke the induction rule properly, using~{\tt + res_inst_tac}. At the same time, we shall have a glimpse at Isabelle's +simplification tactics, which are described in +\iflabelundefined{simp-chap}% + {the {\em Reference Manual}}{Chap.\ts\ref{simp-chap}}. + +\index{simplification}\index{examples!of simplification} + +Isabelle's simplification tactics repeatedly apply equations to a subgoal, +perhaps proving it. For efficiency, the rewrite rules must be packaged into a +{\bf simplification set},\index{simplification sets} or {\bf simpset}. We +augment the implicit simpset of FOL with the equations proved in the previous +section, namely $0+n=n$ and $\texttt{Suc}(m)+n=\texttt{Suc}(m+n)$: +\begin{ttbox} +Addsimps [add_0, add_Suc]; +\end{ttbox} +We state the goal for associativity of addition, and +use \ttindex{res_inst_tac} to invoke induction on~$k$: +\begin{ttbox} +Goal "(k+m)+n = k+(m+n)"; +{\out Level 0} +{\out k + m + n = k + (m + n)} +{\out 1. k + m + n = k + (m + n)} +\ttbreak +by (res_inst_tac [("n","k")] induct 1); +{\out Level 1} +{\out k + m + n = k + (m + n)} +{\out 1. 0 + m + n = 0 + (m + n)} +{\out 2. !!x. x + m + n = x + (m + n) ==>} +{\out Suc(x) + m + n = Suc(x) + (m + n)} +\end{ttbox} +The base case holds easily; both sides reduce to $m+n$. The +tactic~\ttindex{Simp_tac} rewrites with respect to the current +simplification set, applying the rewrite rules for addition: +\begin{ttbox} +by (Simp_tac 1); +{\out Level 2} +{\out k + m + n = k + (m + n)} +{\out 1. !!x. x + m + n = x + (m + n) ==>} +{\out Suc(x) + m + n = Suc(x) + (m + n)} +\end{ttbox} +The inductive step requires rewriting by the equations for addition +and with the induction hypothesis, which is also an equation. The +tactic~\ttindex{Asm_simp_tac} rewrites using the implicit +simplification set and any useful assumptions: +\begin{ttbox} +by (Asm_simp_tac 1); +{\out Level 3} +{\out k + m + n = k + (m + n)} +{\out No subgoals!} +\end{ttbox} +\index{instantiation|)} + + +\section{A Prolog interpreter} +\index{Prolog interpreter|bold} +To demonstrate the power of tacticals, let us construct a Prolog +interpreter and execute programs involving lists.\footnote{To run these +examples, see the file \texttt{FOL/ex/Prolog.ML}.} The Prolog program +consists of a theory. We declare a type constructor for lists, with an +arity declaration to say that $(\tau)list$ is of class~$term$ +provided~$\tau$ is: +\begin{eqnarray*} + list & :: & (term)term +\end{eqnarray*} +We declare four constants: the empty list~$Nil$; the infix list +constructor~{:}; the list concatenation predicate~$app$; the list reverse +predicate~$rev$. (In Prolog, functions on lists are expressed as +predicates.) +\begin{eqnarray*} + Nil & :: & \alpha list \\ + {:} & :: & [\alpha,\alpha list] \To \alpha list \\ + app & :: & [\alpha list,\alpha list,\alpha list] \To o \\ + rev & :: & [\alpha list,\alpha list] \To o +\end{eqnarray*} +The predicate $app$ should satisfy the Prolog-style rules +\[ {app(Nil,ys,ys)} \qquad + {app(xs,ys,zs) \over app(x:xs, ys, x:zs)} \] +We define the naive version of $rev$, which calls~$app$: +\[ {rev(Nil,Nil)} \qquad + {rev(xs,ys)\quad app(ys, x:Nil, zs) \over + rev(x:xs, zs)} +\] + +\index{examples!of theories} +Theory \thydx{Prolog} extends first-order logic in order to make use +of the class~$term$ and the type~$o$. The interpreter does not use the +rules of~\texttt{FOL}. +\begin{ttbox} +Prolog = FOL + +types 'a list +arities list :: (term)term +consts Nil :: 'a list + ":" :: ['a, 'a list]=> 'a list (infixr 60) + app :: ['a list, 'a list, 'a list] => o + rev :: ['a list, 'a list] => o +rules appNil "app(Nil,ys,ys)" + appCons "app(xs,ys,zs) ==> app(x:xs, ys, x:zs)" + revNil "rev(Nil,Nil)" + revCons "[| rev(xs,ys); app(ys,x:Nil,zs) |] ==> rev(x:xs,zs)" +end +\end{ttbox} +\subsection{Simple executions} +Repeated application of the rules solves Prolog goals. Let us +append the lists $[a,b,c]$ and~$[d,e]$. As the rules are applied, the +answer builds up in~\texttt{?x}. +\begin{ttbox} +Goal "app(a:b:c:Nil, d:e:Nil, ?x)"; +{\out Level 0} +{\out app(a : b : c : Nil, d : e : Nil, ?x)} +{\out 1. app(a : b : c : Nil, d : e : Nil, ?x)} +\ttbreak +by (resolve_tac [appNil,appCons] 1); +{\out Level 1} +{\out app(a : b : c : Nil, d : e : Nil, a : ?zs1)} +{\out 1. app(b : c : Nil, d : e : Nil, ?zs1)} +\ttbreak +by (resolve_tac [appNil,appCons] 1); +{\out Level 2} +{\out app(a : b : c : Nil, d : e : Nil, a : b : ?zs2)} +{\out 1. app(c : Nil, d : e : Nil, ?zs2)} +\end{ttbox} +At this point, the first two elements of the result are~$a$ and~$b$. +\begin{ttbox} +by (resolve_tac [appNil,appCons] 1); +{\out Level 3} +{\out app(a : b : c : Nil, d : e : Nil, a : b : c : ?zs3)} +{\out 1. app(Nil, d : e : Nil, ?zs3)} +\ttbreak +by (resolve_tac [appNil,appCons] 1); +{\out Level 4} +{\out app(a : b : c : Nil, d : e : Nil, a : b : c : d : e : Nil)} +{\out No subgoals!} +\end{ttbox} + +Prolog can run functions backwards. Which list can be appended +with $[c,d]$ to produce $[a,b,c,d]$? +Using \ttindex{REPEAT}, we find the answer at once, $[a,b]$: +\begin{ttbox} +Goal "app(?x, c:d:Nil, a:b:c:d:Nil)"; +{\out Level 0} +{\out app(?x, c : d : Nil, a : b : c : d : Nil)} +{\out 1. app(?x, c : d : Nil, a : b : c : d : Nil)} +\ttbreak +by (REPEAT (resolve_tac [appNil,appCons] 1)); +{\out Level 1} +{\out app(a : b : Nil, c : d : Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\end{ttbox} + + +\subsection{Backtracking}\index{backtracking!Prolog style} +Prolog backtracking can answer questions that have multiple solutions. +Which lists $x$ and $y$ can be appended to form the list $[a,b,c,d]$? This +question has five solutions. Using \ttindex{REPEAT} to apply the rules, we +quickly find the first solution, namely $x=[]$ and $y=[a,b,c,d]$: +\begin{ttbox} +Goal "app(?x, ?y, a:b:c:d:Nil)"; +{\out Level 0} +{\out app(?x, ?y, a : b : c : d : Nil)} +{\out 1. app(?x, ?y, a : b : c : d : Nil)} +\ttbreak +by (REPEAT (resolve_tac [appNil,appCons] 1)); +{\out Level 1} +{\out app(Nil, a : b : c : d : Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\end{ttbox} +Isabelle can lazily generate all the possibilities. The \ttindex{back} +command returns the tactic's next outcome, namely $x=[a]$ and $y=[b,c,d]$: +\begin{ttbox} +back(); +{\out Level 1} +{\out app(a : Nil, b : c : d : Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\end{ttbox} +The other solutions are generated similarly. +\begin{ttbox} +back(); +{\out Level 1} +{\out app(a : b : Nil, c : d : Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\ttbreak +back(); +{\out Level 1} +{\out app(a : b : c : Nil, d : Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\ttbreak +back(); +{\out Level 1} +{\out app(a : b : c : d : Nil, Nil, a : b : c : d : Nil)} +{\out No subgoals!} +\end{ttbox} + + +\subsection{Depth-first search} +\index{search!depth-first} +Now let us try $rev$, reversing a list. +Bundle the rules together as the \ML{} identifier \texttt{rules}. Naive +reverse requires 120 inferences for this 14-element list, but the tactic +terminates in a few seconds. +\begin{ttbox} +Goal "rev(a:b:c:d:e:f:g:h:i:j:k:l:m:n:Nil, ?w)"; +{\out Level 0} +{\out rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil, ?w)} +{\out 1. rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil,} +{\out ?w)} +\ttbreak +val rules = [appNil,appCons,revNil,revCons]; +\ttbreak +by (REPEAT (resolve_tac rules 1)); +{\out Level 1} +{\out rev(a : b : c : d : e : f : g : h : i : j : k : l : m : n : Nil,} +{\out n : m : l : k : j : i : h : g : f : e : d : c : b : a : Nil)} +{\out No subgoals!} +\end{ttbox} +We may execute $rev$ backwards. This, too, should reverse a list. What +is the reverse of $[a,b,c]$? +\begin{ttbox} +Goal "rev(?x, a:b:c:Nil)"; +{\out Level 0} +{\out rev(?x, a : b : c : Nil)} +{\out 1. rev(?x, a : b : c : Nil)} +\ttbreak +by (REPEAT (resolve_tac rules 1)); +{\out Level 1} +{\out rev(?x1 : Nil, a : b : c : Nil)} +{\out 1. app(Nil, ?x1 : Nil, a : b : c : Nil)} +\end{ttbox} +The tactic has failed to find a solution! It reached a dead end at +subgoal~1: there is no~$\Var{x@1}$ such that [] appended with~$[\Var{x@1}]$ +equals~$[a,b,c]$. Backtracking explores other outcomes. +\begin{ttbox} +back(); +{\out Level 1} +{\out rev(?x1 : a : Nil, a : b : c : Nil)} +{\out 1. app(Nil, ?x1 : Nil, b : c : Nil)} +\end{ttbox} +This too is a dead end, but the next outcome is successful. +\begin{ttbox} +back(); +{\out Level 1} +{\out rev(c : b : a : Nil, a : b : c : Nil)} +{\out No subgoals!} +\end{ttbox} +\ttindex{REPEAT} goes wrong because it is only a repetition tactical, not a +search tactical. \texttt{REPEAT} stops when it cannot continue, regardless of +which state is reached. The tactical \ttindex{DEPTH_FIRST} searches for a +satisfactory state, as specified by an \ML{} predicate. Below, +\ttindex{has_fewer_prems} specifies that the proof state should have no +subgoals. +\begin{ttbox} +val prolog_tac = DEPTH_FIRST (has_fewer_prems 1) + (resolve_tac rules 1); +\end{ttbox} +Since Prolog uses depth-first search, this tactic is a (slow!) +Prolog interpreter. We return to the start of the proof using +\ttindex{choplev}, and apply \texttt{prolog_tac}: +\begin{ttbox} +choplev 0; +{\out Level 0} +{\out rev(?x, a : b : c : Nil)} +{\out 1. rev(?x, a : b : c : Nil)} +\ttbreak +by prolog_tac; +{\out Level 1} +{\out rev(c : b : a : Nil, a : b : c : Nil)} +{\out No subgoals!} +\end{ttbox} +Let us try \texttt{prolog_tac} on one more example, containing four unknowns: +\begin{ttbox} +Goal "rev(a:?x:c:?y:Nil, d:?z:b:?u)"; +{\out Level 0} +{\out rev(a : ?x : c : ?y : Nil, d : ?z : b : ?u)} +{\out 1. rev(a : ?x : c : ?y : Nil, d : ?z : b : ?u)} +\ttbreak +by prolog_tac; +{\out Level 1} +{\out rev(a : b : c : d : Nil, d : c : b : a : Nil)} +{\out No subgoals!} +\end{ttbox} +Although Isabelle is much slower than a Prolog system, Isabelle +tactics can exploit logic programming techniques. + diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/document/build --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc-src/Intro/document/build Mon Aug 27 20:50:10 2012 +0200 @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +FORMAT="$1" +VARIANT="$2" + +"$ISABELLE_TOOL" logo -o isabelle.pdf "" +"$ISABELLE_TOOL" logo -o isabelle.eps "" + +cp "$ISABELLE_HOME/doc-src/iman.sty" . +cp "$ISABELLE_HOME/doc-src/extra.sty" . +cp "$ISABELLE_HOME/doc-src/ttbox.sty" . +cp "$ISABELLE_HOME/doc-src/proof.sty" . +cp "$ISABELLE_HOME/doc-src/manual.bib" . + +"$ISABELLE_TOOL" latex -o sty +"$ISABELLE_TOOL" latex -o "$FORMAT" +"$ISABELLE_TOOL" latex -o bbl +"$ISABELLE_TOOL" latex -o "$FORMAT" +"$ISABELLE_TOOL" latex -o "$FORMAT" +"$ISABELLE_HOME/doc-src/sedindex" root +[ -f root.out ] && "$ISABELLE_HOME/doc-src/fixbookmarks" root.out +"$ISABELLE_TOOL" latex -o "$FORMAT" +"$ISABELLE_TOOL" latex -o "$FORMAT" diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/document/foundations.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc-src/Intro/document/foundations.tex Mon Aug 27 20:50:10 2012 +0200 @@ -0,0 +1,1178 @@ +\part{Foundations} +The following sections discuss Isabelle's logical foundations in detail: +representing logical syntax in the typed $\lambda$-calculus; expressing +inference rules in Isabelle's meta-logic; combining rules by resolution. + +If you wish to use Isabelle immediately, please turn to +page~\pageref{chap:getting}. You can always read about foundations later, +either by returning to this point or by looking up particular items in the +index. + +\begin{figure} +\begin{eqnarray*} + \neg P & \hbox{abbreviates} & P\imp\bot \\ + P\bimp Q & \hbox{abbreviates} & (P\imp Q) \conj (Q\imp P) +\end{eqnarray*} +\vskip 4ex + +\(\begin{array}{c@{\qquad\qquad}c} + \infer[({\conj}I)]{P\conj Q}{P & Q} & + \infer[({\conj}E1)]{P}{P\conj Q} \qquad + \infer[({\conj}E2)]{Q}{P\conj Q} \\[4ex] + + \infer[({\disj}I1)]{P\disj Q}{P} \qquad + \infer[({\disj}I2)]{P\disj Q}{Q} & + \infer[({\disj}E)]{R}{P\disj Q & \infer*{R}{[P]} & \infer*{R}{[Q]}}\\[4ex] + + \infer[({\imp}I)]{P\imp Q}{\infer*{Q}{[P]}} & + \infer[({\imp}E)]{Q}{P\imp Q & P} \\[4ex] + + & + \infer[({\bot}E)]{P}{\bot}\\[4ex] + + \infer[({\forall}I)*]{\forall x.P}{P} & + \infer[({\forall}E)]{P[t/x]}{\forall x.P} \\[3ex] + + \infer[({\exists}I)]{\exists x.P}{P[t/x]} & + \infer[({\exists}E)*]{Q}{{\exists x.P} & \infer*{Q}{[P]} } \\[3ex] + + {t=t} \,(refl) & \vcenter{\infer[(subst)]{P[u/x]}{t=u & P[t/x]}} +\end{array} \) + +\bigskip\bigskip +*{\em Eigenvariable conditions\/}: + +$\forall I$: provided $x$ is not free in the assumptions + +$\exists E$: provided $x$ is not free in $Q$ or any assumption except $P$ +\caption{Intuitionistic first-order logic} \label{fol-fig} +\end{figure} + +\section{Formalizing logical syntax in Isabelle}\label{sec:logical-syntax} +\index{first-order logic} + +Figure~\ref{fol-fig} presents intuitionistic first-order logic, +including equality. Let us see how to formalize +this logic in Isabelle, illustrating the main features of Isabelle's +polymorphic meta-logic. + +\index{lambda calc@$\lambda$-calculus} +Isabelle represents syntax using the simply typed $\lambda$-calculus. We +declare a type for each syntactic category of the logic. We declare a +constant for each symbol of the logic, giving each $n$-place operation an +$n$-argument curried function type. Most importantly, +$\lambda$-abstraction represents variable binding in quantifiers. + +\index{types!syntax of}\index{types!function}\index{*fun type} +\index{type constructors} +Isabelle has \ML-style polymorphic types such as~$(\alpha)list$, where +$list$ is a type constructor and $\alpha$ is a type variable; for example, +$(bool)list$ is the type of lists of booleans. Function types have the +form $(\sigma,\tau)fun$ or $\sigma\To\tau$, where $\sigma$ and $\tau$ are +types. Curried function types may be abbreviated: +\[ \sigma@1\To (\cdots \sigma@n\To \tau\cdots) \quad \hbox{as} \quad +[\sigma@1, \ldots, \sigma@n] \To \tau \] + +\index{terms!syntax of} The syntax for terms is summarised below. +Note that there are two versions of function application syntax +available in Isabelle: either $t\,u$, which is the usual form for +higher-order languages, or $t(u)$, trying to look more like +first-order. The latter syntax is used throughout the manual. +\[ +\index{lambda abs@$\lambda$-abstractions}\index{function applications} +\begin{array}{ll} + t :: \tau & \hbox{type constraint, on a term or bound variable} \\ + \lambda x.t & \hbox{abstraction} \\ + \lambda x@1\ldots x@n.t + & \hbox{curried abstraction, $\lambda x@1. \ldots \lambda x@n.t$} \\ + t(u) & \hbox{application} \\ + t (u@1, \ldots, u@n) & \hbox{curried application, $t(u@1)\ldots(u@n)$} +\end{array} +\] + + +\subsection{Simple types and constants}\index{types!simple|bold} + +The syntactic categories of our logic (Fig.\ts\ref{fol-fig}) are {\bf + formulae} and {\bf terms}. Formulae denote truth values, so (following +tradition) let us call their type~$o$. To allow~0 and~$Suc(t)$ as terms, +let us declare a type~$nat$ of natural numbers. Later, we shall see +how to admit terms of other types. + +\index{constants}\index{*nat type}\index{*o type} +After declaring the types~$o$ and~$nat$, we may declare constants for the +symbols of our logic. Since $\bot$ denotes a truth value (falsity) and 0 +denotes a number, we put \begin{eqnarray*} + \bot & :: & o \\ + 0 & :: & nat. +\end{eqnarray*} +If a symbol requires operands, the corresponding constant must have a +function type. In our logic, the successor function +($Suc$) is from natural numbers to natural numbers, negation ($\neg$) is a +function from truth values to truth values, and the binary connectives are +curried functions taking two truth values as arguments: +\begin{eqnarray*} + Suc & :: & nat\To nat \\ + {\neg} & :: & o\To o \\ + \conj,\disj,\imp,\bimp & :: & [o,o]\To o +\end{eqnarray*} +The binary connectives can be declared as infixes, with appropriate +precedences, so that we write $P\conj Q\disj R$ instead of +$\disj(\conj(P,Q), R)$. + +Section~\ref{sec:defining-theories} below describes the syntax of Isabelle +theory files and illustrates it by extending our logic with mathematical +induction. + + +\subsection{Polymorphic types and constants} \label{polymorphic} +\index{types!polymorphic|bold} +\index{equality!polymorphic} +\index{constants!polymorphic} + +Which type should we assign to the equality symbol? If we tried +$[nat,nat]\To o$, then equality would be restricted to the natural +numbers; we should have to declare different equality symbols for each +type. Isabelle's type system is polymorphic, so we could declare +\begin{eqnarray*} + {=} & :: & [\alpha,\alpha]\To o, +\end{eqnarray*} +where the type variable~$\alpha$ ranges over all types. +But this is also wrong. The declaration is too polymorphic; $\alpha$ +includes types like~$o$ and $nat\To nat$. Thus, it admits +$\bot=\neg(\bot)$ and $Suc=Suc$ as formulae, which is acceptable in +higher-order logic but not in first-order logic. + +Isabelle's {\bf type classes}\index{classes} control +polymorphism~\cite{nipkow-prehofer}. Each type variable belongs to a +class, which denotes a set of types. Classes are partially ordered by the +subclass relation, which is essentially the subset relation on the sets of +types. They closely resemble the classes of the functional language +Haskell~\cite{haskell-tutorial,haskell-report}. + +\index{*logic class}\index{*term class} +Isabelle provides the built-in class $logic$, which consists of the logical +types: the ones we want to reason about. Let us declare a class $term$, to +consist of all legal types of terms in our logic. The subclass structure +is now $term\le logic$. + +\index{*nat type} +We put $nat$ in class $term$ by declaring $nat{::}term$. We declare the +equality constant by +\begin{eqnarray*} + {=} & :: & [\alpha{::}term,\alpha]\To o +\end{eqnarray*} +where $\alpha{::}term$ constrains the type variable~$\alpha$ to class +$term$. Such type variables resemble Standard~\ML's equality type +variables. + +We give~$o$ and function types the class $logic$ rather than~$term$, since +they are not legal types for terms. We may introduce new types of class +$term$ --- for instance, type $string$ or $real$ --- at any time. We can +even declare type constructors such as~$list$, and state that type +$(\tau)list$ belongs to class~$term$ provided $\tau$ does; equality +applies to lists of natural numbers but not to lists of formulae. We may +summarize this paragraph by a set of {\bf arity declarations} for type +constructors:\index{arities!declaring} +\begin{eqnarray*}\index{*o type}\index{*fun type} + o & :: & logic \\ + fun & :: & (logic,logic)logic \\ + nat, string, real & :: & term \\ + list & :: & (term)term +\end{eqnarray*} +(Recall that $fun$ is the type constructor for function types.) +In \rmindex{higher-order logic}, equality does apply to truth values and +functions; this requires the arity declarations ${o::term}$ +and ${fun::(term,term)term}$. The class system can also handle +overloading.\index{overloading|bold} We could declare $arith$ to be the +subclass of $term$ consisting of the `arithmetic' types, such as~$nat$. +Then we could declare the operators +\begin{eqnarray*} + {+},{-},{\times},{/} & :: & [\alpha{::}arith,\alpha]\To \alpha +\end{eqnarray*} +If we declare new types $real$ and $complex$ of class $arith$, then we +in effect have three sets of operators: +\begin{eqnarray*} + {+},{-},{\times},{/} & :: & [nat,nat]\To nat \\ + {+},{-},{\times},{/} & :: & [real,real]\To real \\ + {+},{-},{\times},{/} & :: & [complex,complex]\To complex +\end{eqnarray*} +Isabelle will regard these as distinct constants, each of which can be defined +separately. We could even introduce the type $(\alpha)vector$ and declare +its arity as $(arith)arith$. Then we could declare the constant +\begin{eqnarray*} + {+} & :: & [(\alpha)vector,(\alpha)vector]\To (\alpha)vector +\end{eqnarray*} +and specify it in terms of ${+} :: [\alpha,\alpha]\To \alpha$. + +A type variable may belong to any finite number of classes. Suppose that +we had declared yet another class $ord \le term$, the class of all +`ordered' types, and a constant +\begin{eqnarray*} + {\le} & :: & [\alpha{::}ord,\alpha]\To o. +\end{eqnarray*} +In this context the variable $x$ in $x \le (x+x)$ will be assigned type +$\alpha{::}\{arith,ord\}$, which means $\alpha$ belongs to both $arith$ and +$ord$. Semantically the set $\{arith,ord\}$ should be understood as the +intersection of the sets of types represented by $arith$ and $ord$. Such +intersections of classes are called \bfindex{sorts}. The empty +intersection of classes, $\{\}$, contains all types and is thus the {\bf + universal sort}. + +Even with overloading, each term has a unique, most general type. For this +to be possible, the class and type declarations must satisfy certain +technical constraints; see +\iflabelundefined{sec:ref-defining-theories}% + {Sect.\ Defining Theories in the {\em Reference Manual}}% + {\S\ref{sec:ref-defining-theories}}. + + +\subsection{Higher types and quantifiers} +\index{types!higher|bold}\index{quantifiers} +Quantifiers are regarded as operations upon functions. Ignoring polymorphism +for the moment, consider the formula $\forall x. P(x)$, where $x$ ranges +over type~$nat$. This is true if $P(x)$ is true for all~$x$. Abstracting +$P(x)$ into a function, this is the same as saying that $\lambda x.P(x)$ +returns true for all arguments. Thus, the universal quantifier can be +represented by a constant +\begin{eqnarray*} + \forall & :: & (nat\To o) \To o, +\end{eqnarray*} +which is essentially an infinitary truth table. The representation of $\forall +x. P(x)$ is $\forall(\lambda x. P(x))$. + +The existential quantifier is treated +in the same way. Other binding operators are also easily handled; for +instance, the summation operator $\Sigma@{k=i}^j f(k)$ can be represented as +$\Sigma(i,j,\lambda k.f(k))$, where +\begin{eqnarray*} + \Sigma & :: & [nat,nat, nat\To nat] \To nat. +\end{eqnarray*} +Quantifiers may be polymorphic. We may define $\forall$ and~$\exists$ over +all legal types of terms, not just the natural numbers, and +allow summations over all arithmetic types: +\begin{eqnarray*} + \forall,\exists & :: & (\alpha{::}term\To o) \To o \\ + \Sigma & :: & [nat,nat, nat\To \alpha{::}arith] \To \alpha +\end{eqnarray*} +Observe that the index variables still have type $nat$, while the values +being summed may belong to any arithmetic type. + + +\section{Formalizing logical rules in Isabelle} +\index{meta-implication|bold} +\index{meta-quantifiers|bold} +\index{meta-equality|bold} + +Object-logics are formalized by extending Isabelle's +meta-logic~\cite{paulson-found}, which is intuitionistic higher-order logic. +The meta-level connectives are {\bf implication}, the {\bf universal + quantifier}, and {\bf equality}. +\begin{itemize} + \item The implication \(\phi\Imp \psi\) means `\(\phi\) implies +\(\psi\)', and expresses logical {\bf entailment}. + + \item The quantification \(\Forall x.\phi\) means `\(\phi\) is true for +all $x$', and expresses {\bf generality} in rules and axiom schemes. + +\item The equality \(a\equiv b\) means `$a$ equals $b$', for expressing + {\bf definitions} (see~\S\ref{definitions}).\index{definitions} + Equalities left over from the unification process, so called {\bf + flex-flex constraints},\index{flex-flex constraints} are written $a\qeq + b$. The two equality symbols have the same logical meaning. + +\end{itemize} +The syntax of the meta-logic is formalized in the same manner +as object-logics, using the simply typed $\lambda$-calculus. Analogous to +type~$o$ above, there is a built-in type $prop$ of meta-level truth values. +Meta-level formulae will have this type. Type $prop$ belongs to +class~$logic$; also, $\sigma\To\tau$ belongs to $logic$ provided $\sigma$ +and $\tau$ do. Here are the types of the built-in connectives: +\begin{eqnarray*}\index{*prop type}\index{*logic class} + \Imp & :: & [prop,prop]\To prop \\ + \Forall & :: & (\alpha{::}logic\To prop) \To prop \\ + {\equiv} & :: & [\alpha{::}\{\},\alpha]\To prop \\ + \qeq & :: & [\alpha{::}\{\},\alpha]\To prop +\end{eqnarray*} +The polymorphism in $\Forall$ is restricted to class~$logic$ to exclude +certain types, those used just for parsing. The type variable +$\alpha{::}\{\}$ ranges over the universal sort. + +In our formalization of first-order logic, we declared a type~$o$ of +object-level truth values, rather than using~$prop$ for this purpose. If +we declared the object-level connectives to have types such as +${\neg}::prop\To prop$, then these connectives would be applicable to +meta-level formulae. Keeping $prop$ and $o$ as separate types maintains +the distinction between the meta-level and the object-level. To formalize +the inference rules, we shall need to relate the two levels; accordingly, +we declare the constant +\index{*Trueprop constant} +\begin{eqnarray*} + Trueprop & :: & o\To prop. +\end{eqnarray*} +We may regard $Trueprop$ as a meta-level predicate, reading $Trueprop(P)$ as +`$P$ is true at the object-level.' Put another way, $Trueprop$ is a coercion +from $o$ to $prop$. + + +\subsection{Expressing propositional rules} +\index{rules!propositional} +We shall illustrate the use of the meta-logic by formalizing the rules of +Fig.\ts\ref{fol-fig}. Each object-level rule is expressed as a meta-level +axiom. + +One of the simplest rules is $(\conj E1)$. Making +everything explicit, its formalization in the meta-logic is +$$ +\Forall P\;Q. Trueprop(P\conj Q) \Imp Trueprop(P). \eqno(\conj E1) +$$ +This may look formidable, but it has an obvious reading: for all object-level +truth values $P$ and~$Q$, if $P\conj Q$ is true then so is~$P$. The +reading is correct because the meta-logic has simple models, where +types denote sets and $\Forall$ really means `for all.' + +\index{*Trueprop constant} +Isabelle adopts notational conventions to ease the writing of rules. We may +hide the occurrences of $Trueprop$ by making it an implicit coercion. +Outer universal quantifiers may be dropped. Finally, the nested implication +\index{meta-implication} +\[ \phi@1\Imp(\cdots \phi@n\Imp\psi\cdots) \] +may be abbreviated as $\List{\phi@1; \ldots; \phi@n} \Imp \psi$, which +formalizes a rule of $n$~premises. + +Using these conventions, the conjunction rules become the following axioms. +These fully specify the properties of~$\conj$: +$$ \List{P; Q} \Imp P\conj Q \eqno(\conj I) $$ +$$ P\conj Q \Imp P \qquad P\conj Q \Imp Q \eqno(\conj E1,2) $$ + +\noindent +Next, consider the disjunction rules. The discharge of assumption in +$(\disj E)$ is expressed using $\Imp$: +\index{assumptions!discharge of}% +$$ P \Imp P\disj Q \qquad Q \Imp P\disj Q \eqno(\disj I1,2) $$ +$$ \List{P\disj Q; P\Imp R; Q\Imp R} \Imp R \eqno(\disj E) $$ +% +To understand this treatment of assumptions in natural +deduction, look at implication. The rule $({\imp}I)$ is the classic +example of natural deduction: to prove that $P\imp Q$ is true, assume $P$ +is true and show that $Q$ must then be true. More concisely, if $P$ +implies $Q$ (at the meta-level), then $P\imp Q$ is true (at the +object-level). Showing the coercion explicitly, this is formalized as +\[ (Trueprop(P)\Imp Trueprop(Q)) \Imp Trueprop(P\imp Q). \] +The rule $({\imp}E)$ is straightforward; hiding $Trueprop$, the axioms to +specify $\imp$ are +$$ (P \Imp Q) \Imp P\imp Q \eqno({\imp}I) $$ +$$ \List{P\imp Q; P} \Imp Q. \eqno({\imp}E) $$ + +\noindent +Finally, the intuitionistic contradiction rule is formalized as the axiom +$$ \bot \Imp P. \eqno(\bot E) $$ + +\begin{warn} +Earlier versions of Isabelle, and certain +papers~\cite{paulson-found,paulson700}, use $\List{P}$ to mean $Trueprop(P)$. +\end{warn} + +\subsection{Quantifier rules and substitution} +\index{quantifiers}\index{rules!quantifier}\index{substitution|bold} +\index{variables!bound}\index{lambda abs@$\lambda$-abstractions} +\index{function applications} + +Isabelle expresses variable binding using $\lambda$-abstraction; for instance, +$\forall x.P$ is formalized as $\forall(\lambda x.P)$. Recall that $F(t)$ +is Isabelle's syntax for application of the function~$F$ to the argument~$t$; +it is not a meta-notation for substitution. On the other hand, a substitution +will take place if $F$ has the form $\lambda x.P$; Isabelle transforms +$(\lambda x.P)(t)$ to~$P[t/x]$ by $\beta$-conversion. Thus, we can express +inference rules that involve substitution for bound variables. + +\index{parameters|bold}\index{eigenvariables|see{parameters}} +A logic may attach provisos to certain of its rules, especially quantifier +rules. We cannot hope to formalize arbitrary provisos. Fortunately, those +typical of quantifier rules always have the same form, namely `$x$ not free in +\ldots {\it (some set of formulae)},' where $x$ is a variable (called a {\bf +parameter} or {\bf eigenvariable}) in some premise. Isabelle treats +provisos using~$\Forall$, its inbuilt notion of `for all'. +\index{meta-quantifiers} + +The purpose of the proviso `$x$ not free in \ldots' is +to ensure that the premise may not make assumptions about the value of~$x$, +and therefore holds for all~$x$. We formalize $(\forall I)$ by +\[ \left(\Forall x. Trueprop(P(x))\right) \Imp Trueprop(\forall x.P(x)). \] +This means, `if $P(x)$ is true for all~$x$, then $\forall x.P(x)$ is true.' +The $\forall E$ rule exploits $\beta$-conversion. Hiding $Trueprop$, the +$\forall$ axioms are +$$ \left(\Forall x. P(x)\right) \Imp \forall x.P(x) \eqno(\forall I) $$ +$$ (\forall x.P(x)) \Imp P(t). \eqno(\forall E) $$ + +\noindent +We have defined the object-level universal quantifier~($\forall$) +using~$\Forall$. But we do not require meta-level counterparts of all the +connectives of the object-logic! Consider the existential quantifier: +$$ P(t) \Imp \exists x.P(x) \eqno(\exists I) $$ +$$ \List{\exists x.P(x);\; \Forall x. P(x)\Imp Q} \Imp Q \eqno(\exists E) $$ +Let us verify $(\exists E)$ semantically. Suppose that the premises +hold; since $\exists x.P(x)$ is true, we may choose an~$a$ such that $P(a)$ is +true. Instantiating $\Forall x. P(x)\Imp Q$ with $a$ yields $P(a)\Imp Q$, and +we obtain the desired conclusion, $Q$. + +The treatment of substitution deserves mention. The rule +\[ \infer{P[u/t]}{t=u & P} \] +would be hard to formalize in Isabelle. It calls for replacing~$t$ by $u$ +throughout~$P$, which cannot be expressed using $\beta$-conversion. Our +rule~$(subst)$ uses~$P$ as a template for substitution, inferring $P[u/x]$ +from~$P[t/x]$. When we formalize this as an axiom, the template becomes a +function variable: +$$ \List{t=u; P(t)} \Imp P(u). \eqno(subst) $$ + + +\subsection{Signatures and theories} +\index{signatures|bold} + +A {\bf signature} contains the information necessary for type-checking, +parsing and pretty printing a term. It specifies type classes and their +relationships, types and their arities, constants and their types, etc. It +also contains grammar rules, specified using mixfix declarations. + +Two signatures can be merged provided their specifications are compatible --- +they must not, for example, assign different types to the same constant. +Under similar conditions, a signature can be extended. Signatures are +managed internally by Isabelle; users seldom encounter them. + +\index{theories|bold} A {\bf theory} consists of a signature plus a collection +of axioms. The Pure theory contains only the meta-logic. Theories can be +combined provided their signatures are compatible. A theory definition +extends an existing theory with further signature specifications --- classes, +types, constants and mixfix declarations --- plus lists of axioms and +definitions etc., expressed as strings to be parsed. A theory can formalize a +small piece of mathematics, such as lists and their operations, or an entire +logic. A mathematical development typically involves many theories in a +hierarchy. For example, the Pure theory could be extended to form a theory +for Fig.\ts\ref{fol-fig}; this could be extended in two separate ways to form +a theory for natural numbers and a theory for lists; the union of these two +could be extended into a theory defining the length of a list: +\begin{tt} +\[ +\begin{array}{c@{}c@{}c@{}c@{}c} + {} & {} &\hbox{Pure}& {} & {} \\ + {} & {} & \downarrow & {} & {} \\ + {} & {} &\hbox{FOL} & {} & {} \\ + {} & \swarrow & {} & \searrow & {} \\ + \hbox{Nat} & {} & {} & {} & \hbox{List} \\ + {} & \searrow & {} & \swarrow & {} \\ + {} & {} &\hbox{Nat}+\hbox{List}& {} & {} \\ + {} & {} & \downarrow & {} & {} \\ + {} & {} & \hbox{Length} & {} & {} +\end{array} +\] +\end{tt}% +Each Isabelle proof typically works within a single theory, which is +associated with the proof state. However, many different theories may +coexist at the same time, and you may work in each of these during a single +session. + +\begin{warn}\index{constants!clashes with variables}% + Confusing problems arise if you work in the wrong theory. Each theory + defines its own syntax. An identifier may be regarded in one theory as a + constant and in another as a variable, for example. +\end{warn} + +\section{Proof construction in Isabelle} +I have elsewhere described the meta-logic and demonstrated it by +formalizing first-order logic~\cite{paulson-found}. There is a one-to-one +correspondence between meta-level proofs and object-level proofs. To each +use of a meta-level axiom, such as $(\forall I)$, there is a use of the +corresponding object-level rule. Object-level assumptions and parameters +have meta-level counterparts. The meta-level formalization is {\bf + faithful}, admitting no incorrect object-level inferences, and {\bf + adequate}, admitting all correct object-level inferences. These +properties must be demonstrated separately for each object-logic. + +The meta-logic is defined by a collection of inference rules, including +equational rules for the $\lambda$-calculus and logical rules. The rules +for~$\Imp$ and~$\Forall$ resemble those for~$\imp$ and~$\forall$ in +Fig.\ts\ref{fol-fig}. Proofs performed using the primitive meta-rules +would be lengthy; Isabelle proofs normally use certain derived rules. +{\bf Resolution}, in particular, is convenient for backward proof. + +Unification is central to theorem proving. It supports quantifier +reasoning by allowing certain `unknown' terms to be instantiated later, +possibly in stages. When proving that the time required to sort $n$ +integers is proportional to~$n^2$, we need not state the constant of +proportionality; when proving that a hardware adder will deliver the sum of +its inputs, we need not state how many clock ticks will be required. Such +quantities often emerge from the proof. + +Isabelle provides {\bf schematic variables}, or {\bf + unknowns},\index{unknowns} for unification. Logically, unknowns are free +variables. But while ordinary variables remain fixed, unification may +instantiate unknowns. Unknowns are written with a ?\ prefix and are +frequently subscripted: $\Var{a}$, $\Var{a@1}$, $\Var{a@2}$, \ldots, +$\Var{P}$, $\Var{P@1}$, \ldots. + +Recall that an inference rule of the form +\[ \infer{\phi}{\phi@1 & \ldots & \phi@n} \] +is formalized in Isabelle's meta-logic as the axiom +$\List{\phi@1; \ldots; \phi@n} \Imp \phi$.\index{resolution} +Such axioms resemble Prolog's Horn clauses, and can be combined by +resolution --- Isabelle's principal proof method. Resolution yields both +forward and backward proof. Backward proof works by unifying a goal with +the conclusion of a rule, whose premises become new subgoals. Forward proof +works by unifying theorems with the premises of a rule, deriving a new theorem. + +Isabelle formulae require an extended notion of resolution. +They differ from Horn clauses in two major respects: +\begin{itemize} + \item They are written in the typed $\lambda$-calculus, and therefore must be +resolved using higher-order unification. + +\item The constituents of a clause need not be atomic formulae. Any + formula of the form $Trueprop(\cdots)$ is atomic, but axioms such as + ${\imp}I$ and $\forall I$ contain non-atomic formulae. +\end{itemize} +Isabelle has little in common with classical resolution theorem provers +such as Otter~\cite{wos-bledsoe}. At the meta-level, Isabelle proves +theorems in their positive form, not by refutation. However, an +object-logic that includes a contradiction rule may employ a refutation +proof procedure. + + +\subsection{Higher-order unification} +\index{unification!higher-order|bold} +Unification is equation solving. The solution of $f(\Var{x},c) \qeq +f(d,\Var{y})$ is $\Var{x}\equiv d$ and $\Var{y}\equiv c$. {\bf +Higher-order unification} is equation solving for typed $\lambda$-terms. +To handle $\beta$-conversion, it must reduce $(\lambda x.t)u$ to $t[u/x]$. +That is easy --- in the typed $\lambda$-calculus, all reduction sequences +terminate at a normal form. But it must guess the unknown +function~$\Var{f}$ in order to solve the equation +\begin{equation} \label{hou-eqn} + \Var{f}(t) \qeq g(u@1,\ldots,u@k). +\end{equation} +Huet's~\cite{huet75} search procedure solves equations by imitation and +projection. {\bf Imitation} makes~$\Var{f}$ apply the leading symbol (if a +constant) of the right-hand side. To solve equation~(\ref{hou-eqn}), it +guesses +\[ \Var{f} \equiv \lambda x. g(\Var{h@1}(x),\ldots,\Var{h@k}(x)), \] +where $\Var{h@1}$, \ldots, $\Var{h@k}$ are new unknowns. Assuming there are no +other occurrences of~$\Var{f}$, equation~(\ref{hou-eqn}) simplifies to the +set of equations +\[ \Var{h@1}(t)\qeq u@1 \quad\ldots\quad \Var{h@k}(t)\qeq u@k. \] +If the procedure solves these equations, instantiating $\Var{h@1}$, \ldots, +$\Var{h@k}$, then it yields an instantiation for~$\Var{f}$. + +{\bf Projection} makes $\Var{f}$ apply one of its arguments. To solve +equation~(\ref{hou-eqn}), if $t$ expects~$m$ arguments and delivers a +result of suitable type, it guesses +\[ \Var{f} \equiv \lambda x. x(\Var{h@1}(x),\ldots,\Var{h@m}(x)), \] +where $\Var{h@1}$, \ldots, $\Var{h@m}$ are new unknowns. Assuming there are no +other occurrences of~$\Var{f}$, equation~(\ref{hou-eqn}) simplifies to the +equation +\[ t(\Var{h@1}(t),\ldots,\Var{h@m}(t)) \qeq g(u@1,\ldots,u@k). \] + +\begin{warn}\index{unification!incompleteness of}% +Huet's unification procedure is complete. Isabelle's polymorphic version, +which solves for type unknowns as well as for term unknowns, is incomplete. +The problem is that projection requires type information. In +equation~(\ref{hou-eqn}), if the type of~$t$ is unknown, then projections +are possible for all~$m\geq0$, and the types of the $\Var{h@i}$ will be +similarly unconstrained. Therefore, Isabelle never attempts such +projections, and may fail to find unifiers where a type unknown turns out +to be a function type. +\end{warn} + +\index{unknowns!function|bold} +Given $\Var{f}(t@1,\ldots,t@n)\qeq u$, Huet's procedure could make up to +$n+1$ guesses. The search tree and set of unifiers may be infinite. But +higher-order unification can work effectively, provided you are careful +with {\bf function unknowns}: +\begin{itemize} + \item Equations with no function unknowns are solved using first-order +unification, extended to treat bound variables. For example, $\lambda x.x +\qeq \lambda x.\Var{y}$ has no solution because $\Var{y}\equiv x$ would +capture the free variable~$x$. + + \item An occurrence of the term $\Var{f}(x,y,z)$, where the arguments are +distinct bound variables, causes no difficulties. Its projections can only +match the corresponding variables. + + \item Even an equation such as $\Var{f}(a)\qeq a+a$ is all right. It has +four solutions, but Isabelle evaluates them lazily, trying projection before +imitation. The first solution is usually the one desired: +\[ \Var{f}\equiv \lambda x. x+x \quad + \Var{f}\equiv \lambda x. a+x \quad + \Var{f}\equiv \lambda x. x+a \quad + \Var{f}\equiv \lambda x. a+a \] + \item Equations such as $\Var{f}(\Var{x},\Var{y})\qeq t$ and +$\Var{f}(\Var{g}(x))\qeq t$ admit vast numbers of unifiers, and must be +avoided. +\end{itemize} +In problematic cases, you may have to instantiate some unknowns before +invoking unification. + + +\subsection{Joining rules by resolution} \label{joining} +\index{resolution|bold} +Let $\List{\psi@1; \ldots; \psi@m} \Imp \psi$ and $\List{\phi@1; \ldots; +\phi@n} \Imp \phi$ be two Isabelle theorems, representing object-level rules. +Choosing some~$i$ from~1 to~$n$, suppose that $\psi$ and $\phi@i$ have a +higher-order unifier. Writing $Xs$ for the application of substitution~$s$ to +expression~$X$, this means there is some~$s$ such that $\psi s\equiv \phi@i s$. +By resolution, we may conclude +\[ (\List{\phi@1; \ldots; \phi@{i-1}; \psi@1; \ldots; \psi@m; + \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s. +\] +The substitution~$s$ may instantiate unknowns in both rules. In short, +resolution is the following rule: +\[ \infer[(\psi s\equiv \phi@i s)] + {(\List{\phi@1; \ldots; \phi@{i-1}; \psi@1; \ldots; \psi@m; + \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s} + {\List{\psi@1; \ldots; \psi@m} \Imp \psi & & + \List{\phi@1; \ldots; \phi@n} \Imp \phi} +\] +It operates at the meta-level, on Isabelle theorems, and is justified by +the properties of $\Imp$ and~$\Forall$. It takes the number~$i$ (for +$1\leq i\leq n$) as a parameter and may yield infinitely many conclusions, +one for each unifier of $\psi$ with $\phi@i$. Isabelle returns these +conclusions as a sequence (lazy list). + +Resolution expects the rules to have no outer quantifiers~($\Forall$). +It may rename or instantiate any schematic variables, but leaves free +variables unchanged. When constructing a theory, Isabelle puts the +rules into a standard form with all free variables converted into +schematic ones; for instance, $({\imp}E)$ becomes +\[ \List{\Var{P}\imp \Var{Q}; \Var{P}} \Imp \Var{Q}. +\] +When resolving two rules, the unknowns in the first rule are renamed, by +subscripting, to make them distinct from the unknowns in the second rule. To +resolve $({\imp}E)$ with itself, the first copy of the rule becomes +\[ \List{\Var{P@1}\imp \Var{Q@1}; \Var{P@1}} \Imp \Var{Q@1}. \] +Resolving this with $({\imp}E)$ in the first premise, unifying $\Var{Q@1}$ with +$\Var{P}\imp \Var{Q}$, is the meta-level inference +\[ \infer{\List{\Var{P@1}\imp (\Var{P}\imp \Var{Q}); \Var{P@1}; \Var{P}} + \Imp\Var{Q}.} + {\List{\Var{P@1}\imp \Var{Q@1}; \Var{P@1}} \Imp \Var{Q@1} & & + \List{\Var{P}\imp \Var{Q}; \Var{P}} \Imp \Var{Q}} +\] +Renaming the unknowns in the resolvent, we have derived the +object-level rule\index{rules!derived} +\[ \infer{Q.}{R\imp (P\imp Q) & R & P} \] +Joining rules in this fashion is a simple way of proving theorems. The +derived rules are conservative extensions of the object-logic, and may permit +simpler proofs. Let us consider another example. Suppose we have the axiom +$$ \forall x\,y. Suc(x)=Suc(y)\imp x=y. \eqno (inject) $$ + +\noindent +The standard form of $(\forall E)$ is +$\forall x.\Var{P}(x) \Imp \Var{P}(\Var{t})$. +Resolving $(inject)$ with $(\forall E)$ replaces $\Var{P}$ by +$\lambda x. \forall y. Suc(x)=Suc(y)\imp x=y$ and leaves $\Var{t}$ +unchanged, yielding +\[ \forall y. Suc(\Var{t})=Suc(y)\imp \Var{t}=y. \] +Resolving this with $(\forall E)$ puts a subscript on~$\Var{t}$ +and yields +\[ Suc(\Var{t@1})=Suc(\Var{t})\imp \Var{t@1}=\Var{t}. \] +Resolving this with $({\imp}E)$ increases the subscripts and yields +\[ Suc(\Var{t@2})=Suc(\Var{t@1})\Imp \Var{t@2}=\Var{t@1}. +\] +We have derived the rule +\[ \infer{m=n,}{Suc(m)=Suc(n)} \] +which goes directly from $Suc(m)=Suc(n)$ to $m=n$. It is handy for simplifying +an equation like $Suc(Suc(Suc(m)))=Suc(Suc(Suc(0)))$. + + +\section{Lifting a rule into a context} +The rules $({\imp}I)$ and $(\forall I)$ may seem unsuitable for +resolution. They have non-atomic premises, namely $P\Imp Q$ and $\Forall +x.P(x)$, while the conclusions of all the rules are atomic (they have the form +$Trueprop(\cdots)$). Isabelle gets round the problem through a meta-inference +called \bfindex{lifting}. Let us consider how to construct proofs such as +\[ \infer[({\imp}I)]{P\imp(Q\imp R)} + {\infer[({\imp}I)]{Q\imp R} + {\infer*{R}{[P,Q]}}} + \qquad + \infer[(\forall I)]{\forall x\,y.P(x,y)} + {\infer[(\forall I)]{\forall y.P(x,y)}{P(x,y)}} +\] + +\subsection{Lifting over assumptions} +\index{assumptions!lifting over} +Lifting over $\theta\Imp{}$ is the following meta-inference rule: +\[ \infer{\List{\theta\Imp\phi@1; \ldots; \theta\Imp\phi@n} \Imp + (\theta \Imp \phi)} + {\List{\phi@1; \ldots; \phi@n} \Imp \phi} \] +This is clearly sound: if $\List{\phi@1; \ldots; \phi@n} \Imp \phi$ is true and +$\theta\Imp\phi@1$, \ldots, $\theta\Imp\phi@n$ and $\theta$ are all true then +$\phi$ must be true. Iterated lifting over a series of meta-formulae +$\theta@k$, \ldots, $\theta@1$ yields an object-rule whose conclusion is +$\List{\theta@1; \ldots; \theta@k} \Imp \phi$. Typically the $\theta@i$ are +the assumptions in a natural deduction proof; lifting copies them into a rule's +premises and conclusion. + +When resolving two rules, Isabelle lifts the first one if necessary. The +standard form of $({\imp}I)$ is +\[ (\Var{P} \Imp \Var{Q}) \Imp \Var{P}\imp \Var{Q}. \] +To resolve this rule with itself, Isabelle modifies one copy as follows: it +renames the unknowns to $\Var{P@1}$ and $\Var{Q@1}$, then lifts the rule over +$\Var{P}\Imp{}$ to obtain +\[ (\Var{P}\Imp (\Var{P@1} \Imp \Var{Q@1})) \Imp (\Var{P} \Imp + (\Var{P@1}\imp \Var{Q@1})). \] +Using the $\List{\cdots}$ abbreviation, this can be written as +\[ \List{\List{\Var{P}; \Var{P@1}} \Imp \Var{Q@1}; \Var{P}} + \Imp \Var{P@1}\imp \Var{Q@1}. \] +Unifying $\Var{P}\Imp \Var{P@1}\imp\Var{Q@1}$ with $\Var{P} \Imp +\Var{Q}$ instantiates $\Var{Q}$ to ${\Var{P@1}\imp\Var{Q@1}}$. +Resolution yields +\[ (\List{\Var{P}; \Var{P@1}} \Imp \Var{Q@1}) \Imp +\Var{P}\imp(\Var{P@1}\imp\Var{Q@1}). \] +This represents the derived rule +\[ \infer{P\imp(Q\imp R).}{\infer*{R}{[P,Q]}} \] + +\subsection{Lifting over parameters} +\index{parameters!lifting over} +An analogous form of lifting handles premises of the form $\Forall x\ldots\,$. +Here, lifting prefixes an object-rule's premises and conclusion with $\Forall +x$. At the same time, lifting introduces a dependence upon~$x$. It replaces +each unknown $\Var{a}$ in the rule by $\Var{a'}(x)$, where $\Var{a'}$ is a new +unknown (by subscripting) of suitable type --- necessarily a function type. In +short, lifting is the meta-inference +\[ \infer{\List{\Forall x.\phi@1^x; \ldots; \Forall x.\phi@n^x} + \Imp \Forall x.\phi^x,} + {\List{\phi@1; \ldots; \phi@n} \Imp \phi} \] +% +where $\phi^x$ stands for the result of lifting unknowns over~$x$ in +$\phi$. It is not hard to verify that this meta-inference is sound. If +$\phi\Imp\psi$ then $\phi^x\Imp\psi^x$ for all~$x$; so if $\phi^x$ is true +for all~$x$ then so is $\psi^x$. Thus, from $\phi\Imp\psi$ we conclude +$(\Forall x.\phi^x) \Imp (\Forall x.\psi^x)$. + +For example, $(\disj I)$ might be lifted to +\[ (\Forall x.\Var{P@1}(x)) \Imp (\Forall x. \Var{P@1}(x)\disj \Var{Q@1}(x))\] +and $(\forall I)$ to +\[ (\Forall x\,y.\Var{P@1}(x,y)) \Imp (\Forall x. \forall y.\Var{P@1}(x,y)). \] +Isabelle has renamed a bound variable in $(\forall I)$ from $x$ to~$y$, +avoiding a clash. Resolving the above with $(\forall I)$ is the meta-inference +\[ \infer{\Forall x\,y.\Var{P@1}(x,y)) \Imp \forall x\,y.\Var{P@1}(x,y)) } + {(\Forall x\,y.\Var{P@1}(x,y)) \Imp + (\Forall x. \forall y.\Var{P@1}(x,y)) & + (\Forall x.\Var{P}(x)) \Imp (\forall x.\Var{P}(x))} \] +Here, $\Var{P}$ is replaced by $\lambda x.\forall y.\Var{P@1}(x,y)$; the +resolvent expresses the derived rule +\[ \vcenter{ \infer{\forall x\,y.Q(x,y)}{Q(x,y)} } + \quad\hbox{provided $x$, $y$ not free in the assumptions} +\] +I discuss lifting and parameters at length elsewhere~\cite{paulson-found}. +Miller goes into even greater detail~\cite{miller-mixed}. + + +\section{Backward proof by resolution} +\index{resolution!in backward proof} + +Resolution is convenient for deriving simple rules and for reasoning +forward from facts. It can also support backward proof, where we start +with a goal and refine it to progressively simpler subgoals until all have +been solved. {\sc lcf} and its descendants {\sc hol} and Nuprl provide +tactics and tacticals, which constitute a sophisticated language for +expressing proof searches. {\bf Tactics} refine subgoals while {\bf + tacticals} combine tactics. + +\index{LCF system} +Isabelle's tactics and tacticals work differently from {\sc lcf}'s. An +Isabelle rule is bidirectional: there is no distinction between +inputs and outputs. {\sc lcf} has a separate tactic for each rule; +Isabelle performs refinement by any rule in a uniform fashion, using +resolution. + +Isabelle works with meta-level theorems of the form +\( \List{\phi@1; \ldots; \phi@n} \Imp \phi \). +We have viewed this as the {\bf rule} with premises +$\phi@1$,~\ldots,~$\phi@n$ and conclusion~$\phi$. It can also be viewed as +the {\bf proof state}\index{proof state} +with subgoals $\phi@1$,~\ldots,~$\phi@n$ and main +goal~$\phi$. + +To prove the formula~$\phi$, take $\phi\Imp \phi$ as the initial proof +state. This assertion is, trivially, a theorem. At a later stage in the +backward proof, a typical proof state is $\List{\phi@1; \ldots; \phi@n} +\Imp \phi$. This proof state is a theorem, ensuring that the subgoals +$\phi@1$,~\ldots,~$\phi@n$ imply~$\phi$. If $n=0$ then we have +proved~$\phi$ outright. If $\phi$ contains unknowns, they may become +instantiated during the proof; a proof state may be $\List{\phi@1; \ldots; +\phi@n} \Imp \phi'$, where $\phi'$ is an instance of~$\phi$. + +\subsection{Refinement by resolution} +To refine subgoal~$i$ of a proof state by a rule, perform the following +resolution: +\[ \infer{\hbox{new proof state}}{\hbox{rule} & & \hbox{proof state}} \] +Suppose the rule is $\List{\psi'@1; \ldots; \psi'@m} \Imp \psi'$ after +lifting over subgoal~$i$'s assumptions and parameters. If the proof state +is $\List{\phi@1; \ldots; \phi@n} \Imp \phi$, then the new proof state is +(for~$1\leq i\leq n$) +\[ (\List{\phi@1; \ldots; \phi@{i-1}; \psi'@1; + \ldots; \psi'@m; \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s. \] +Substitution~$s$ unifies $\psi'$ with~$\phi@i$. In the proof state, +subgoal~$i$ is replaced by $m$ new subgoals, the rule's instantiated premises. +If some of the rule's unknowns are left un-instantiated, they become new +unknowns in the proof state. Refinement by~$(\exists I)$, namely +\[ \Var{P}(\Var{t}) \Imp \exists x. \Var{P}(x), \] +inserts a new unknown derived from~$\Var{t}$ by subscripting and lifting. +We do not have to specify an `existential witness' when +applying~$(\exists I)$. Further resolutions may instantiate unknowns in +the proof state. + +\subsection{Proof by assumption} +\index{assumptions!use of} +In the course of a natural deduction proof, parameters $x@1$, \ldots,~$x@l$ and +assumptions $\theta@1$, \ldots, $\theta@k$ accumulate, forming a context for +each subgoal. Repeated lifting steps can lift a rule into any context. To +aid readability, Isabelle puts contexts into a normal form, gathering the +parameters at the front: +\begin{equation} \label{context-eqn} +\Forall x@1 \ldots x@l. \List{\theta@1; \ldots; \theta@k}\Imp\theta. +\end{equation} +Under the usual reading of the connectives, this expresses that $\theta$ +follows from $\theta@1$,~\ldots~$\theta@k$ for arbitrary +$x@1$,~\ldots,~$x@l$. It is trivially true if $\theta$ equals any of +$\theta@1$,~\ldots~$\theta@k$, or is unifiable with any of them. This +models proof by assumption in natural deduction. + +Isabelle automates the meta-inference for proof by assumption. Its arguments +are the meta-theorem $\List{\phi@1; \ldots; \phi@n} \Imp \phi$, and some~$i$ +from~1 to~$n$, where $\phi@i$ has the form~(\ref{context-eqn}). Its results +are meta-theorems of the form +\[ (\List{\phi@1; \ldots; \phi@{i-1}; \phi@{i+1}; \phi@n} \Imp \phi)s \] +for each $s$ and~$j$ such that $s$ unifies $\lambda x@1 \ldots x@l. \theta@j$ +with $\lambda x@1 \ldots x@l. \theta$. Isabelle supplies the parameters +$x@1$,~\ldots,~$x@l$ to higher-order unification as bound variables, which +regards them as unique constants with a limited scope --- this enforces +parameter provisos~\cite{paulson-found}. + +The premise represents a proof state with~$n$ subgoals, of which the~$i$th +is to be solved by assumption. Isabelle searches the subgoal's context for +an assumption~$\theta@j$ that can solve it. For each unifier, the +meta-inference returns an instantiated proof state from which the $i$th +subgoal has been removed. Isabelle searches for a unifying assumption; for +readability and robustness, proofs do not refer to assumptions by number. + +Consider the proof state +\[ (\List{P(a); P(b)} \Imp P(\Var{x})) \Imp Q(\Var{x}). \] +Proof by assumption (with $i=1$, the only possibility) yields two results: +\begin{itemize} + \item $Q(a)$, instantiating $\Var{x}\equiv a$ + \item $Q(b)$, instantiating $\Var{x}\equiv b$ +\end{itemize} +Here, proof by assumption affects the main goal. It could also affect +other subgoals; if we also had the subgoal ${\List{P(b); P(c)} \Imp + P(\Var{x})}$, then $\Var{x}\equiv a$ would transform it to ${\List{P(b); + P(c)} \Imp P(a)}$, which might be unprovable. + + +\subsection{A propositional proof} \label{prop-proof} +\index{examples!propositional} +Our first example avoids quantifiers. Given the main goal $P\disj P\imp +P$, Isabelle creates the initial state +\[ (P\disj P\imp P)\Imp (P\disj P\imp P). \] +% +Bear in mind that every proof state we derive will be a meta-theorem, +expressing that the subgoals imply the main goal. Our aim is to reach the +state $P\disj P\imp P$; this meta-theorem is the desired result. + +The first step is to refine subgoal~1 by (${\imp}I)$, creating a new state +where $P\disj P$ is an assumption: +\[ (P\disj P\Imp P)\Imp (P\disj P\imp P) \] +The next step is $(\disj E)$, which replaces subgoal~1 by three new subgoals. +Because of lifting, each subgoal contains a copy of the context --- the +assumption $P\disj P$. (In fact, this assumption is now redundant; we shall +shortly see how to get rid of it!) The new proof state is the following +meta-theorem, laid out for clarity: +\[ \begin{array}{l@{}l@{\qquad\qquad}l} + \lbrakk\;& P\disj P\Imp \Var{P@1}\disj\Var{Q@1}; & \hbox{(subgoal 1)} \\ + & \List{P\disj P; \Var{P@1}} \Imp P; & \hbox{(subgoal 2)} \\ + & \List{P\disj P; \Var{Q@1}} \Imp P & \hbox{(subgoal 3)} \\ + \rbrakk\;& \Imp (P\disj P\imp P) & \hbox{(main goal)} + \end{array} +\] +Notice the unknowns in the proof state. Because we have applied $(\disj E)$, +we must prove some disjunction, $\Var{P@1}\disj\Var{Q@1}$. Of course, +subgoal~1 is provable by assumption. This instantiates both $\Var{P@1}$ and +$\Var{Q@1}$ to~$P$ throughout the proof state: +\[ \begin{array}{l@{}l@{\qquad\qquad}l} + \lbrakk\;& \List{P\disj P; P} \Imp P; & \hbox{(subgoal 1)} \\ + & \List{P\disj P; P} \Imp P & \hbox{(subgoal 2)} \\ + \rbrakk\;& \Imp (P\disj P\imp P) & \hbox{(main goal)} + \end{array} \] +Both of the remaining subgoals can be proved by assumption. After two such +steps, the proof state is $P\disj P\imp P$. + + +\subsection{A quantifier proof} +\index{examples!with quantifiers} +To illustrate quantifiers and $\Forall$-lifting, let us prove +$(\exists x.P(f(x)))\imp(\exists x.P(x))$. The initial proof +state is the trivial meta-theorem +\[ (\exists x.P(f(x)))\imp(\exists x.P(x)) \Imp + (\exists x.P(f(x)))\imp(\exists x.P(x)). \] +As above, the first step is refinement by (${\imp}I)$: +\[ (\exists x.P(f(x))\Imp \exists x.P(x)) \Imp + (\exists x.P(f(x)))\imp(\exists x.P(x)) +\] +The next step is $(\exists E)$, which replaces subgoal~1 by two new subgoals. +Both have the assumption $\exists x.P(f(x))$. The new proof +state is the meta-theorem +\[ \begin{array}{l@{}l@{\qquad\qquad}l} + \lbrakk\;& \exists x.P(f(x)) \Imp \exists x.\Var{P@1}(x); & \hbox{(subgoal 1)} \\ + & \Forall x.\List{\exists x.P(f(x)); \Var{P@1}(x)} \Imp + \exists x.P(x) & \hbox{(subgoal 2)} \\ + \rbrakk\;& \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) & \hbox{(main goal)} + \end{array} +\] +The unknown $\Var{P@1}$ appears in both subgoals. Because we have applied +$(\exists E)$, we must prove $\exists x.\Var{P@1}(x)$, where $\Var{P@1}(x)$ may +become any formula possibly containing~$x$. Proving subgoal~1 by assumption +instantiates $\Var{P@1}$ to~$\lambda x.P(f(x))$: +\[ \left(\Forall x.\List{\exists x.P(f(x)); P(f(x))} \Imp + \exists x.P(x)\right) + \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) +\] +The next step is refinement by $(\exists I)$. The rule is lifted into the +context of the parameter~$x$ and the assumption $P(f(x))$. This copies +the context to the subgoal and allows the existential witness to +depend upon~$x$: +\[ \left(\Forall x.\List{\exists x.P(f(x)); P(f(x))} \Imp + P(\Var{x@2}(x))\right) + \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) +\] +The existential witness, $\Var{x@2}(x)$, consists of an unknown +applied to a parameter. Proof by assumption unifies $\lambda x.P(f(x))$ +with $\lambda x.P(\Var{x@2}(x))$, instantiating $\Var{x@2}$ to $f$. The final +proof state contains no subgoals: $(\exists x.P(f(x)))\imp(\exists x.P(x))$. + + +\subsection{Tactics and tacticals} +\index{tactics|bold}\index{tacticals|bold} +{\bf Tactics} perform backward proof. Isabelle tactics differ from those +of {\sc lcf}, {\sc hol} and Nuprl by operating on entire proof states, +rather than on individual subgoals. An Isabelle tactic is a function that +takes a proof state and returns a sequence (lazy list) of possible +successor states. Lazy lists are coded in ML as functions, a standard +technique~\cite{paulson-ml2}. Isabelle represents proof states by theorems. + +Basic tactics execute the meta-rules described above, operating on a +given subgoal. The {\bf resolution tactics} take a list of rules and +return next states for each combination of rule and unifier. The {\bf +assumption tactic} examines the subgoal's assumptions and returns next +states for each combination of assumption and unifier. Lazy lists are +essential because higher-order resolution may return infinitely many +unifiers. If there are no matching rules or assumptions then no next +states are generated; a tactic application that returns an empty list is +said to {\bf fail}. + +Sequences realize their full potential with {\bf tacticals} --- operators +for combining tactics. Depth-first search, breadth-first search and +best-first search (where a heuristic function selects the best state to +explore) return their outcomes as a sequence. Isabelle provides such +procedures in the form of tacticals. Simpler procedures can be expressed +directly using the basic tacticals {\tt THEN}, {\tt ORELSE} and {\tt REPEAT}: +\begin{ttdescription} +\item[$tac1$ THEN $tac2$] is a tactic for sequential composition. Applied +to a proof state, it returns all states reachable in two steps by applying +$tac1$ followed by~$tac2$. + +\item[$tac1$ ORELSE $tac2$] is a choice tactic. Applied to a state, it +tries~$tac1$ and returns the result if non-empty; otherwise, it uses~$tac2$. + +\item[REPEAT $tac$] is a repetition tactic. Applied to a state, it +returns all states reachable by applying~$tac$ as long as possible --- until +it would fail. +\end{ttdescription} +For instance, this tactic repeatedly applies $tac1$ and~$tac2$, giving +$tac1$ priority: +\begin{center} \tt +REPEAT($tac1$ ORELSE $tac2$) +\end{center} + + +\section{Variations on resolution} +In principle, resolution and proof by assumption suffice to prove all +theorems. However, specialized forms of resolution are helpful for working +with elimination rules. Elim-resolution applies an elimination rule to an +assumption; destruct-resolution is similar, but applies a rule in a forward +style. + +The last part of the section shows how the techniques for proving theorems +can also serve to derive rules. + +\subsection{Elim-resolution} +\index{elim-resolution|bold}\index{assumptions!deleting} + +Consider proving the theorem $((R\disj R)\disj R)\disj R\imp R$. By +$({\imp}I)$, we prove~$R$ from the assumption $((R\disj R)\disj R)\disj R$. +Applying $(\disj E)$ to this assumption yields two subgoals, one that +assumes~$R$ (and is therefore trivial) and one that assumes $(R\disj +R)\disj R$. This subgoal admits another application of $(\disj E)$. Since +natural deduction never discards assumptions, we eventually generate a +subgoal containing much that is redundant: +\[ \List{((R\disj R)\disj R)\disj R; (R\disj R)\disj R; R\disj R; R} \Imp R. \] +In general, using $(\disj E)$ on the assumption $P\disj Q$ creates two new +subgoals with the additional assumption $P$ or~$Q$. In these subgoals, +$P\disj Q$ is redundant. Other elimination rules behave +similarly. In first-order logic, only universally quantified +assumptions are sometimes needed more than once --- say, to prove +$P(f(f(a)))$ from the assumptions $\forall x.P(x)\imp P(f(x))$ and~$P(a)$. + +Many logics can be formulated as sequent calculi that delete redundant +assumptions after use. The rule $(\disj E)$ might become +\[ \infer[\disj\hbox{-left}] + {\Gamma,P\disj Q,\Delta \turn \Theta} + {\Gamma,P,\Delta \turn \Theta && \Gamma,Q,\Delta \turn \Theta} \] +In backward proof, a goal containing $P\disj Q$ on the left of the~$\turn$ +(that is, as an assumption) splits into two subgoals, replacing $P\disj Q$ +by $P$ or~$Q$. But the sequent calculus, with its explicit handling of +assumptions, can be tiresome to use. + +Elim-resolution is Isabelle's way of getting sequent calculus behaviour +from natural deduction rules. It lets an elimination rule consume an +assumption. Elim-resolution combines two meta-theorems: +\begin{itemize} + \item a rule $\List{\psi@1; \ldots; \psi@m} \Imp \psi$ + \item a proof state $\List{\phi@1; \ldots; \phi@n} \Imp \phi$ +\end{itemize} +The rule must have at least one premise, thus $m>0$. Write the rule's +lifted form as $\List{\psi'@1; \ldots; \psi'@m} \Imp \psi'$. Suppose we +wish to change subgoal number~$i$. + +Ordinary resolution would attempt to reduce~$\phi@i$, +replacing subgoal~$i$ by $m$ new ones. Elim-resolution tries +simultaneously to reduce~$\phi@i$ and to solve~$\psi'@1$ by assumption; it +returns a sequence of next states. Each of these replaces subgoal~$i$ by +instances of $\psi'@2$, \ldots, $\psi'@m$ from which the selected +assumption has been deleted. Suppose $\phi@i$ has the parameter~$x$ and +assumptions $\theta@1$,~\ldots,~$\theta@k$. Then $\psi'@1$, the rule's first +premise after lifting, will be +\( \Forall x. \List{\theta@1; \ldots; \theta@k}\Imp \psi^{x}@1 \). +Elim-resolution tries to unify $\psi'\qeq\phi@i$ and +$\lambda x. \theta@j \qeq \lambda x. \psi^{x}@1$ simultaneously, for +$j=1$,~\ldots,~$k$. + +Let us redo the example from~\S\ref{prop-proof}. The elimination rule +is~$(\disj E)$, +\[ \List{\Var{P}\disj \Var{Q};\; \Var{P}\Imp \Var{R};\; \Var{Q}\Imp \Var{R}} + \Imp \Var{R} \] +and the proof state is $(P\disj P\Imp P)\Imp (P\disj P\imp P)$. The +lifted rule is +\[ \begin{array}{l@{}l} + \lbrakk\;& P\disj P \Imp \Var{P@1}\disj\Var{Q@1}; \\ + & \List{P\disj P ;\; \Var{P@1}} \Imp \Var{R@1}; \\ + & \List{P\disj P ;\; \Var{Q@1}} \Imp \Var{R@1} \\ + \rbrakk\;& \Imp (P\disj P \Imp \Var{R@1}) + \end{array} +\] +Unification takes the simultaneous equations +$P\disj P \qeq \Var{P@1}\disj\Var{Q@1}$ and $\Var{R@1} \qeq P$, yielding +$\Var{P@1}\equiv\Var{Q@1}\equiv\Var{R@1} \equiv P$. The new proof state +is simply +\[ \List{P \Imp P;\; P \Imp P} \Imp (P\disj P\imp P). +\] +Elim-resolution's simultaneous unification gives better control +than ordinary resolution. Recall the substitution rule: +$$ \List{\Var{t}=\Var{u}; \Var{P}(\Var{t})} \Imp \Var{P}(\Var{u}) +\eqno(subst) $$ +Unsuitable for ordinary resolution because $\Var{P}(\Var{u})$ admits many +unifiers, $(subst)$ works well with elim-resolution. It deletes some +assumption of the form $x=y$ and replaces every~$y$ by~$x$ in the subgoal +formula. The simultaneous unification instantiates $\Var{u}$ to~$y$; if +$y$ is not an unknown, then $\Var{P}(y)$ can easily be unified with another +formula. + +In logical parlance, the premise containing the connective to be eliminated +is called the \bfindex{major premise}. Elim-resolution expects the major +premise to come first. The order of the premises is significant in +Isabelle. + +\subsection{Destruction rules} \label{destruct} +\index{rules!destruction}\index{rules!elimination} +\index{forward proof} + +Looking back to Fig.\ts\ref{fol-fig}, notice that there are two kinds of +elimination rule. The rules $({\conj}E1)$, $({\conj}E2)$, $({\imp}E)$ and +$({\forall}E)$ extract the conclusion from the major premise. In Isabelle +parlance, such rules are called {\bf destruction rules}; they are readable +and easy to use in forward proof. The rules $({\disj}E)$, $({\bot}E)$ and +$({\exists}E)$ work by discharging assumptions; they support backward proof +in a style reminiscent of the sequent calculus. + +The latter style is the most general form of elimination rule. In natural +deduction, there is no way to recast $({\disj}E)$, $({\bot}E)$ or +$({\exists}E)$ as destruction rules. But we can write general elimination +rules for $\conj$, $\imp$ and~$\forall$: +\[ +\infer{R}{P\conj Q & \infer*{R}{[P,Q]}} \qquad +\infer{R}{P\imp Q & P & \infer*{R}{[Q]}} \qquad +\infer{Q}{\forall x.P & \infer*{Q}{[P[t/x]]}} +\] +Because they are concise, destruction rules are simpler to derive than the +corresponding elimination rules. To facilitate their use in backward +proof, Isabelle provides a means of transforming a destruction rule such as +\[ \infer[\quad\hbox{to the elimination rule}\quad]{Q}{P@1 & \ldots & P@m} + \infer{R.}{P@1 & \ldots & P@m & \infer*{R}{[Q]}} +\] +{\bf Destruct-resolution}\index{destruct-resolution} combines this +transformation with elim-resolution. It applies a destruction rule to some +assumption of a subgoal. Given the rule above, it replaces the +assumption~$P@1$ by~$Q$, with new subgoals of showing instances of $P@2$, +\ldots,~$P@m$. Destruct-resolution works forward from a subgoal's +assumptions. Ordinary resolution performs forward reasoning from theorems, +as illustrated in~\S\ref{joining}. + + +\subsection{Deriving rules by resolution} \label{deriving} +\index{rules!derived|bold}\index{meta-assumptions!syntax of} +The meta-logic, itself a form of the predicate calculus, is defined by a +system of natural deduction rules. Each theorem may depend upon +meta-assumptions. The theorem that~$\phi$ follows from the assumptions +$\phi@1$, \ldots, $\phi@n$ is written +\[ \phi \quad [\phi@1,\ldots,\phi@n]. \] +A more conventional notation might be $\phi@1,\ldots,\phi@n \turn \phi$, +but Isabelle's notation is more readable with large formulae. + +Meta-level natural deduction provides a convenient mechanism for deriving +new object-level rules. To derive the rule +\[ \infer{\phi,}{\theta@1 & \ldots & \theta@k} \] +assume the premises $\theta@1$,~\ldots,~$\theta@k$ at the +meta-level. Then prove $\phi$, possibly using these assumptions. +Starting with a proof state $\phi\Imp\phi$, assumptions may accumulate, +reaching a final state such as +\[ \phi \quad [\theta@1,\ldots,\theta@k]. \] +The meta-rule for $\Imp$ introduction discharges an assumption. +Discharging them in the order $\theta@k,\ldots,\theta@1$ yields the +meta-theorem $\List{\theta@1; \ldots; \theta@k} \Imp \phi$, with no +assumptions. This represents the desired rule. +Let us derive the general $\conj$ elimination rule: +$$ \infer{R}{P\conj Q & \infer*{R}{[P,Q]}} \eqno(\conj E) $$ +We assume $P\conj Q$ and $\List{P;Q}\Imp R$, and commence backward proof in +the state $R\Imp R$. Resolving this with the second assumption yields the +state +\[ \phantom{\List{P\conj Q;\; P\conj Q}} + \llap{$\List{P;Q}$}\Imp R \quad [\,\List{P;Q}\Imp R\,]. \] +Resolving subgoals~1 and~2 with~$({\conj}E1)$ and~$({\conj}E2)$, +respectively, yields the state +\[ \List{P\conj \Var{Q@1};\; \Var{P@2}\conj Q}\Imp R + \quad [\,\List{P;Q}\Imp R\,]. +\] +The unknowns $\Var{Q@1}$ and~$\Var{P@2}$ arise from unconstrained +subformulae in the premises of~$({\conj}E1)$ and~$({\conj}E2)$. Resolving +both subgoals with the assumption $P\conj Q$ instantiates the unknowns to yield +\[ R \quad [\, \List{P;Q}\Imp R, P\conj Q \,]. \] +The proof may use the meta-assumptions in any order, and as often as +necessary; when finished, we discharge them in the correct order to +obtain the desired form: +\[ \List{P\conj Q;\; \List{P;Q}\Imp R} \Imp R \] +We have derived the rule using free variables, which prevents their +premature instantiation during the proof; we may now replace them by +schematic variables. + +\begin{warn} + Schematic variables are not allowed in meta-assumptions, for a variety of + reasons. Meta-assumptions remain fixed throughout a proof. +\end{warn} + diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/document/getting.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc-src/Intro/document/getting.tex Mon Aug 27 20:50:10 2012 +0200 @@ -0,0 +1,956 @@ +\part{Using Isabelle from the ML Top-Level}\label{chap:getting} + +Most Isabelle users write proof scripts using the Isar language, as described in the \emph{Tutorial}, and debug them through the Proof General user interface~\cite{proofgeneral}. Isabelle's original user interface --- based on the \ML{} top-level --- is still available, however. +Proofs are conducted by +applying certain \ML{} functions, which update a stored proof state. +All syntax can be expressed using plain {\sc ascii} +characters, but Isabelle can support +alternative syntaxes, for example using mathematical symbols from a +special screen font. The meta-logic and main object-logics already +provide such fancy output as an option. + +Object-logics are built upon Pure Isabelle, which implements the +meta-logic and provides certain fundamental data structures: types, +terms, signatures, theorems and theories, tactics and tacticals. +These data structures have the corresponding \ML{} types \texttt{typ}, +\texttt{term}, \texttt{Sign.sg}, \texttt{thm}, \texttt{theory} and \texttt{tactic}; +tacticals have function types such as \texttt{tactic->tactic}. Isabelle +users can operate on these data structures by writing \ML{} programs. + + +\section{Forward proof}\label{sec:forward} \index{forward proof|(} +This section describes the concrete syntax for types, terms and theorems, +and demonstrates forward proof. The examples are set in first-order logic. +The command to start Isabelle running first-order logic is +\begin{ttbox} +isabelle FOL +\end{ttbox} +Note that just typing \texttt{isabelle} usually brings up higher-order logic +(HOL) by default. + + +\subsection{Lexical matters} +\index{identifiers}\index{reserved words} +An {\bf identifier} is a string of letters, digits, underscores~(\verb|_|) +and single quotes~({\tt'}), beginning with a letter. Single quotes are +regarded as primes; for instance \texttt{x'} is read as~$x'$. Identifiers are +separated by white space and special characters. {\bf Reserved words} are +identifiers that appear in Isabelle syntax definitions. + +An Isabelle theory can declare symbols composed of special characters, such +as {\tt=}, {\tt==}, {\tt=>} and {\tt==>}. (The latter three are part of +the syntax of the meta-logic.) Such symbols may be run together; thus if +\verb|}| and \verb|{| are used for set brackets then \verb|{{a},{a,b}}| is +valid notation for a set of sets --- but only if \verb|}}| and \verb|{{| +have not been declared as symbols! The parser resolves any ambiguity by +taking the longest possible symbol that has been declared. Thus the string +{\tt==>} is read as a single symbol. But \hbox{\tt= =>} is read as two +symbols. + +Identifiers that are not reserved words may serve as free variables or +constants. A {\bf type identifier} consists of an identifier prefixed by a +prime, for example {\tt'a} and \hbox{\tt'hello}. Type identifiers stand +for (free) type variables, which remain fixed during a proof. +\index{type identifiers} + +An {\bf unknown}\index{unknowns} (or type unknown) consists of a question +mark, an identifier (or type identifier), and a subscript. The subscript, +a non-negative integer, +allows the renaming of unknowns prior to unification.% +\footnote{The subscript may appear after the identifier, separated by a + dot; this prevents ambiguity when the identifier ends with a digit. Thus + {\tt?z6.0} has identifier {\tt"z6"} and subscript~0, while {\tt?a0.5} + has identifier {\tt"a0"} and subscript~5. If the identifier does not + end with a digit, then no dot appears and a subscript of~0 is omitted; + for example, {\tt?hello} has identifier {\tt"hello"} and subscript + zero, while {\tt?z6} has identifier {\tt"z"} and subscript~6. The same + conventions apply to type unknowns. The question mark is {\it not\/} + part of the identifier!} + + +\subsection{Syntax of types and terms} +\index{classes!built-in|bold}\index{syntax!of types and terms} + +Classes are denoted by identifiers; the built-in class \cldx{logic} +contains the `logical' types. Sorts are lists of classes enclosed in +braces~\} and \{; singleton sorts may be abbreviated by dropping the braces. + +\index{types!syntax of|bold}\index{sort constraints} Types are written +with a syntax like \ML's. The built-in type \tydx{prop} is the type +of propositions. Type variables can be constrained to particular +classes or sorts, for example \texttt{'a::term} and \texttt{?'b::\ttlbrace + ord, arith\ttrbrace}. +\[\dquotes +\index{*:: symbol}\index{*=> symbol} +\index{{}@{\tt\ttlbrace} symbol}\index{{}@{\tt\ttrbrace} symbol} +\index{*[ symbol}\index{*] symbol} +\begin{array}{ll} + \multicolumn{2}{c}{\hbox{ASCII Notation for Types}} \\ \hline + \alpha "::" C & \hbox{class constraint} \\ + \alpha "::" "\ttlbrace" C@1 "," \ldots "," C@n "\ttrbrace" & + \hbox{sort constraint} \\ + \sigma " => " \tau & \hbox{function type } \sigma\To\tau \\ + "[" \sigma@1 "," \ldots "," \sigma@n "] => " \tau + & \hbox{$n$-argument function type} \\ + "(" \tau@1"," \ldots "," \tau@n ")" tycon & \hbox{type construction} +\end{array} +\] +Terms are those of the typed $\lambda$-calculus. +\index{terms!syntax of|bold}\index{type constraints} +\[\dquotes +\index{%@{\tt\%} symbol}\index{lambda abs@$\lambda$-abstractions} +\index{*:: symbol} +\begin{array}{ll} + \multicolumn{2}{c}{\hbox{ASCII Notation for Terms}} \\ \hline + t "::" \sigma & \hbox{type constraint} \\ + "\%" x "." t & \hbox{abstraction } \lambda x.t \\ + "\%" x@1\ldots x@n "." t & \hbox{abstraction over several arguments} \\ + t "(" u@1"," \ldots "," u@n ")" & + \hbox{application to several arguments (FOL and ZF)} \\ + t\; u@1 \ldots\; u@n & \hbox{application to several arguments (HOL)} +\end{array} +\] +Note that HOL uses its traditional ``higher-order'' syntax for application, +which differs from that used in FOL. + +The theorems and rules of an object-logic are represented by theorems in +the meta-logic, which are expressed using meta-formulae. Since the +meta-logic is higher-order, meta-formulae~$\phi$, $\psi$, $\theta$,~\ldots{} +are just terms of type~\texttt{prop}. +\index{meta-implication} +\index{meta-quantifiers}\index{meta-equality} +\index{*"!"! symbol} + +\index{["!@{\tt[\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working +\index{"!]@{\tt\char124]} symbol} % so these are [| and |] + +\index{*== symbol}\index{*=?= symbol}\index{*==> symbol} +\[\dquotes + \begin{array}{l@{\quad}l@{\quad}l} + \multicolumn{3}{c}{\hbox{ASCII Notation for Meta-Formulae}} \\ \hline + a " == " b & a\equiv b & \hbox{meta-equality} \\ + a " =?= " b & a\qeq b & \hbox{flex-flex constraint} \\ + \phi " ==> " \psi & \phi\Imp \psi & \hbox{meta-implication} \\ + "[|" \phi@1 ";" \ldots ";" \phi@n "|] ==> " \psi & + \List{\phi@1;\ldots;\phi@n} \Imp \psi & \hbox{nested implication} \\ + "!!" x "." \phi & \Forall x.\phi & \hbox{meta-quantification} \\ + "!!" x@1\ldots x@n "." \phi & + \Forall x@1. \ldots x@n.\phi & \hbox{nested quantification} + \end{array} +\] +Flex-flex constraints are meta-equalities arising from unification; they +require special treatment. See~\S\ref{flexflex}. +\index{flex-flex constraints} + +\index{*Trueprop constant} +Most logics define the implicit coercion $Trueprop$ from object-formulae to +propositions. This could cause an ambiguity: in $P\Imp Q$, do the +variables $P$ and $Q$ stand for meta-formulae or object-formulae? If the +latter, $P\Imp Q$ really abbreviates $Trueprop(P)\Imp Trueprop(Q)$. To +prevent such ambiguities, Isabelle's syntax does not allow a meta-formula +to consist of a variable. Variables of type~\tydx{prop} are seldom +useful, but you can make a variable stand for a meta-formula by prefixing +it with the symbol \texttt{PROP}:\index{*PROP symbol} +\begin{ttbox} +PROP ?psi ==> PROP ?theta +\end{ttbox} + +Symbols of object-logics are typically rendered into {\sc ascii} as +follows: +\[ \begin{tabular}{l@{\quad}l@{\quad}l} + \tt True & $\top$ & true \\ + \tt False & $\bot$ & false \\ + \tt $P$ \& $Q$ & $P\conj Q$ & conjunction \\ + \tt $P$ | $Q$ & $P\disj Q$ & disjunction \\ + \verb'~' $P$ & $\neg P$ & negation \\ + \tt $P$ --> $Q$ & $P\imp Q$ & implication \\ + \tt $P$ <-> $Q$ & $P\bimp Q$ & bi-implication \\ + \tt ALL $x\,y\,z$ .\ $P$ & $\forall x\,y\,z.P$ & for all \\ + \tt EX $x\,y\,z$ .\ $P$ & $\exists x\,y\,z.P$ & there exists + \end{tabular} +\] +To illustrate the notation, consider two axioms for first-order logic: +$$ \List{P; Q} \Imp P\conj Q \eqno(\conj I) $$ +$$ \List{\exists x. P(x); \Forall x. P(x)\imp Q} \Imp Q \eqno(\exists E) $$ +$({\conj}I)$ translates into {\sc ascii} characters as +\begin{ttbox} +[| ?P; ?Q |] ==> ?P & ?Q +\end{ttbox} +The schematic variables let unification instantiate the rule. To avoid +cluttering logic definitions with question marks, Isabelle converts any +free variables in a rule to schematic variables; we normally declare +$({\conj}I)$ as +\begin{ttbox} +[| P; Q |] ==> P & Q +\end{ttbox} +This variables convention agrees with the treatment of variables in goals. +Free variables in a goal remain fixed throughout the proof. After the +proof is finished, Isabelle converts them to scheme variables in the +resulting theorem. Scheme variables in a goal may be replaced by terms +during the proof, supporting answer extraction, program synthesis, and so +forth. + +For a final example, the rule $(\exists E)$ is rendered in {\sc ascii} as +\begin{ttbox} +[| EX x. P(x); !!x. P(x) ==> Q |] ==> Q +\end{ttbox} + + +\subsection{Basic operations on theorems} +\index{theorems!basic operations on|bold} +\index{LCF system} +Meta-level theorems have the \ML{} type \mltydx{thm}. They represent the +theorems and inference rules of object-logics. Isabelle's meta-logic is +implemented using the {\sc lcf} approach: each meta-level inference rule is +represented by a function from theorems to theorems. Object-level rules +are taken as axioms. + +The main theorem printing commands are \texttt{prth}, \texttt{prths} and~{\tt + prthq}. Of the other operations on theorems, most useful are \texttt{RS} +and \texttt{RSN}, which perform resolution. + +\index{theorems!printing of} +\begin{ttdescription} +\item[\ttindex{prth} {\it thm};] + pretty-prints {\it thm\/} at the terminal. + +\item[\ttindex{prths} {\it thms};] + pretty-prints {\it thms}, a list of theorems. + +\item[\ttindex{prthq} {\it thmq};] + pretty-prints {\it thmq}, a sequence of theorems; this is useful for + inspecting the output of a tactic. + +\item[$thm1$ RS $thm2$] \index{*RS} + resolves the conclusion of $thm1$ with the first premise of~$thm2$. + +\item[$thm1$ RSN $(i,thm2)$] \index{*RSN} + resolves the conclusion of $thm1$ with the $i$th premise of~$thm2$. + +\item[\ttindex{standard} $thm$] + puts $thm$ into a standard format. It also renames schematic variables + to have subscript zero, improving readability and reducing subscript + growth. +\end{ttdescription} +The rules of a theory are normally bound to \ML\ identifiers. Suppose we are +running an Isabelle session containing theory~FOL, natural deduction +first-order logic.\footnote{For a listing of the FOL rules and their \ML{} + names, turn to +\iflabelundefined{fol-rules}{{\em Isabelle's Object-Logics}}% + {page~\pageref{fol-rules}}.} +Let us try an example given in~\S\ref{joining}. We +first print \tdx{mp}, which is the rule~$({\imp}E)$, then resolve it with +itself. +\begin{ttbox} +prth mp; +{\out [| ?P --> ?Q; ?P |] ==> ?Q} +{\out val it = "[| ?P --> ?Q; ?P |] ==> ?Q" : thm} +prth (mp RS mp); +{\out [| ?P1 --> ?P --> ?Q; ?P1; ?P |] ==> ?Q} +{\out val it = "[| ?P1 --> ?P --> ?Q; ?P1; ?P |] ==> ?Q" : thm} +\end{ttbox} +User input appears in {\footnotesize\tt typewriter characters}, and output +appears in{\out slanted typewriter characters}. \ML's response {\out val + }~\ldots{} is compiler-dependent and will sometimes be suppressed. This +session illustrates two formats for the display of theorems. Isabelle's +top-level displays theorems as \ML{} values, enclosed in quotes. Printing +commands like \texttt{prth} omit the quotes and the surrounding \texttt{val + \ldots :\ thm}. Ignoring their side-effects, the printing commands are +identity functions. + +To contrast \texttt{RS} with \texttt{RSN}, we resolve +\tdx{conjunct1}, which stands for~$(\conj E1)$, with~\tdx{mp}. +\begin{ttbox} +conjunct1 RS mp; +{\out val it = "[| (?P --> ?Q) & ?Q1; ?P |] ==> ?Q" : thm} +conjunct1 RSN (2,mp); +{\out val it = "[| ?P --> ?Q; ?P & ?Q1 |] ==> ?Q" : thm} +\end{ttbox} +These correspond to the following proofs: +\[ \infer[({\imp}E)]{Q}{\infer[({\conj}E1)]{P\imp Q}{(P\imp Q)\conj Q@1} & P} + \qquad + \infer[({\imp}E)]{Q}{P\imp Q & \infer[({\conj}E1)]{P}{P\conj Q@1}} +\] +% +Rules can be derived by pasting other rules together. Let us join +\tdx{spec}, which stands for~$(\forall E)$, with \texttt{mp} and {\tt + conjunct1}. In \ML{}, the identifier~\texttt{it} denotes the value just +printed. +\begin{ttbox} +spec; +{\out val it = "ALL x. ?P(x) ==> ?P(?x)" : thm} +it RS mp; +{\out val it = "[| ALL x. ?P3(x) --> ?Q2(x); ?P3(?x1) |] ==>} +{\out ?Q2(?x1)" : thm} +it RS conjunct1; +{\out val it = "[| ALL x. ?P4(x) --> ?P6(x) & ?Q5(x); ?P4(?x2) |] ==>} +{\out ?P6(?x2)" : thm} +standard it; +{\out val it = "[| ALL x. ?P(x) --> ?Pa(x) & ?Q(x); ?P(?x) |] ==>} +{\out ?Pa(?x)" : thm} +\end{ttbox} +By resolving $(\forall E)$ with (${\imp}E)$ and (${\conj}E1)$, we have +derived a destruction rule for formulae of the form $\forall x. +P(x)\imp(Q(x)\conj R(x))$. Used with destruct-resolution, such specialized +rules provide a way of referring to particular assumptions. +\index{assumptions!use of} + +\subsection{*Flex-flex constraints} \label{flexflex} +\index{flex-flex constraints|bold}\index{unknowns!function} +In higher-order unification, {\bf flex-flex} equations are those where both +sides begin with a function unknown, such as $\Var{f}(0)\qeq\Var{g}(0)$. +They admit a trivial unifier, here $\Var{f}\equiv \lambda x.\Var{a}$ and +$\Var{g}\equiv \lambda y.\Var{a}$, where $\Var{a}$ is a new unknown. They +admit many other unifiers, such as $\Var{f} \equiv \lambda x.\Var{g}(0)$ +and $\{\Var{f} \equiv \lambda x.x,\, \Var{g} \equiv \lambda x.0\}$. Huet's +procedure does not enumerate the unifiers; instead, it retains flex-flex +equations as constraints on future unifications. Flex-flex constraints +occasionally become attached to a proof state; more frequently, they appear +during use of \texttt{RS} and~\texttt{RSN}: +\begin{ttbox} +refl; +{\out val it = "?a = ?a" : thm} +exI; +{\out val it = "?P(?x) ==> EX x. ?P(x)" : thm} +refl RS exI; +{\out val it = "EX x. ?a3(x) = ?a2(x)" [.] : thm} +\end{ttbox} +% +The mysterious symbol \texttt{[.]} indicates that the result is subject to +a meta-level hypothesis. We can make all such hypotheses visible by setting the +\ttindexbold{show_hyps} flag: +\begin{ttbox} +set show_hyps; +{\out val it = true : bool} +refl RS exI; +{\out val it = "EX x. ?a3(x) = ?a2(x)" ["?a3(?x) =?= ?a2(?x)"] : thm} +\end{ttbox} + +\noindent +Renaming variables, this is $\exists x.\Var{f}(x)=\Var{g}(x)$ with +the constraint ${\Var{f}(\Var{u})\qeq\Var{g}(\Var{u})}$. Instances +satisfying the constraint include $\exists x.\Var{f}(x)=\Var{f}(x)$ and +$\exists x.x=\Var{u}$. Calling \ttindex{flexflex_rule} removes all +constraints by applying the trivial unifier:\index{*prthq} +\begin{ttbox} +prthq (flexflex_rule it); +{\out EX x. ?a4 = ?a4} +\end{ttbox} +Isabelle simplifies flex-flex equations to eliminate redundant bound +variables. In $\lambda x\,y.\Var{f}(k(y),x) \qeq \lambda x\,y.\Var{g}(y)$, +there is no bound occurrence of~$x$ on the right side; thus, there will be +none on the left in a common instance of these terms. Choosing a new +variable~$\Var{h}$, Isabelle assigns $\Var{f}\equiv \lambda u\,v.?h(u)$, +simplifying the left side to $\lambda x\,y.\Var{h}(k(y))$. Dropping $x$ +from the equation leaves $\lambda y.\Var{h}(k(y)) \qeq \lambda +y.\Var{g}(y)$. By $\eta$-conversion, this simplifies to the assignment +$\Var{g}\equiv\lambda y.?h(k(y))$. + +\begin{warn} +\ttindex{RS} and \ttindex{RSN} fail (by raising exception \texttt{THM}) unless +the resolution delivers {\bf exactly one} resolvent. For multiple results, +use \ttindex{RL} and \ttindex{RLN}, which operate on theorem lists. The +following example uses \ttindex{read_instantiate} to create an instance +of \tdx{refl} containing no schematic variables. +\begin{ttbox} +val reflk = read_instantiate [("a","k")] refl; +{\out val reflk = "k = k" : thm} +\end{ttbox} + +\noindent +A flex-flex constraint is no longer possible; resolution does not find a +unique unifier: +\begin{ttbox} +reflk RS exI; +{\out uncaught exception} +{\out THM ("RSN: multiple unifiers", 1,} +{\out ["k = k", "?P(?x) ==> EX x. ?P(x)"])} +\end{ttbox} +Using \ttindex{RL} this time, we discover that there are four unifiers, and +four resolvents: +\begin{ttbox} +[reflk] RL [exI]; +{\out val it = ["EX x. x = x", "EX x. k = x",} +{\out "EX x. x = k", "EX x. k = k"] : thm list} +\end{ttbox} +\end{warn} + +\index{forward proof|)} + +\section{Backward proof} +Although \texttt{RS} and \texttt{RSN} are fine for simple forward reasoning, +large proofs require tactics. Isabelle provides a suite of commands for +conducting a backward proof using tactics. + +\subsection{The basic tactics} +The tactics \texttt{assume_tac}, {\tt +resolve_tac}, \texttt{eresolve_tac}, and \texttt{dresolve_tac} suffice for most +single-step proofs. Although \texttt{eresolve_tac} and \texttt{dresolve_tac} are +not strictly necessary, they simplify proofs involving elimination and +destruction rules. All the tactics act on a subgoal designated by a +positive integer~$i$, failing if~$i$ is out of range. The resolution +tactics try their list of theorems in left-to-right order. + +\begin{ttdescription} +\item[\ttindex{assume_tac} {\it i}] \index{tactics!assumption} + is the tactic that attempts to solve subgoal~$i$ by assumption. Proof by + assumption is not a trivial step; it can falsify other subgoals by + instantiating shared variables. There may be several ways of solving the + subgoal by assumption. + +\item[\ttindex{resolve_tac} {\it thms} {\it i}]\index{tactics!resolution} + is the basic resolution tactic, used for most proof steps. The $thms$ + represent object-rules, which are resolved against subgoal~$i$ of the + proof state. For each rule, resolution forms next states by unifying the + conclusion with the subgoal and inserting instantiated premises in its + place. A rule can admit many higher-order unifiers. The tactic fails if + none of the rules generates next states. + +\item[\ttindex{eresolve_tac} {\it thms} {\it i}] \index{elim-resolution} + performs elim-resolution. Like \texttt{resolve_tac~{\it thms}~{\it i\/}} + followed by \texttt{assume_tac~{\it i}}, it applies a rule then solves its + first premise by assumption. But \texttt{eresolve_tac} additionally deletes + that assumption from any subgoals arising from the resolution. + +\item[\ttindex{dresolve_tac} {\it thms} {\it i}] + \index{forward proof}\index{destruct-resolution} + performs destruct-resolution with the~$thms$, as described + in~\S\ref{destruct}. It is useful for forward reasoning from the + assumptions. +\end{ttdescription} + +\subsection{Commands for backward proof} +\index{proofs!commands for} +Tactics are normally applied using the subgoal module, which maintains a +proof state and manages the proof construction. It allows interactive +backtracking through the proof space, going away to prove lemmas, etc.; of +its many commands, most important are the following: +\begin{ttdescription} +\item[\ttindex{Goal} {\it formula}; ] +begins a new proof, where the {\it formula\/} is written as an \ML\ string. + +\item[\ttindex{by} {\it tactic}; ] +applies the {\it tactic\/} to the current proof +state, raising an exception if the tactic fails. + +\item[\ttindex{undo}(); ] + reverts to the previous proof state. Undo can be repeated but cannot be + undone. Do not omit the parentheses; typing {\tt\ \ undo;\ \ } merely + causes \ML\ to echo the value of that function. + +\item[\ttindex{result}();] +returns the theorem just proved, in a standard format. It fails if +unproved subgoals are left, etc. + +\item[\ttindex{qed} {\it name};] is the usual way of ending a proof. + It gets the theorem using \texttt{result}, stores it in Isabelle's + theorem database and binds it to an \ML{} identifier. + +\end{ttdescription} +The commands and tactics given above are cumbersome for interactive use. +Although our examples will use the full commands, you may prefer Isabelle's +shortcuts: +\begin{center} \tt +\index{*br} \index{*be} \index{*bd} \index{*ba} +\begin{tabular}{l@{\qquad\rm abbreviates\qquad}l} + ba {\it i}; & by (assume_tac {\it i}); \\ + + br {\it thm} {\it i}; & by (resolve_tac [{\it thm}] {\it i}); \\ + + be {\it thm} {\it i}; & by (eresolve_tac [{\it thm}] {\it i}); \\ + + bd {\it thm} {\it i}; & by (dresolve_tac [{\it thm}] {\it i}); +\end{tabular} +\end{center} + +\subsection{A trivial example in propositional logic} +\index{examples!propositional} + +Directory \texttt{FOL} of the Isabelle distribution defines the theory of +first-order logic. Let us try the example from \S\ref{prop-proof}, +entering the goal $P\disj P\imp P$ in that theory.\footnote{To run these + examples, see the file \texttt{FOL/ex/intro.ML}.} +\begin{ttbox} +Goal "P|P --> P"; +{\out Level 0} +{\out P | P --> P} +{\out 1. P | P --> P} +\end{ttbox}\index{level of a proof} +Isabelle responds by printing the initial proof state, which has $P\disj +P\imp P$ as the main goal and the only subgoal. The {\bf level} of the +state is the number of \texttt{by} commands that have been applied to reach +it. We now use \ttindex{resolve_tac} to apply the rule \tdx{impI}, +or~$({\imp}I)$, to subgoal~1: +\begin{ttbox} +by (resolve_tac [impI] 1); +{\out Level 1} +{\out P | P --> P} +{\out 1. P | P ==> P} +\end{ttbox} +In the new proof state, subgoal~1 is $P$ under the assumption $P\disj P$. +(The meta-implication {\tt==>} indicates assumptions.) We apply +\tdx{disjE}, or~(${\disj}E)$, to that subgoal: +\begin{ttbox} +by (resolve_tac [disjE] 1); +{\out Level 2} +{\out P | P --> P} +{\out 1. P | P ==> ?P1 | ?Q1} +{\out 2. [| P | P; ?P1 |] ==> P} +{\out 3. [| P | P; ?Q1 |] ==> P} +\end{ttbox} +At Level~2 there are three subgoals, each provable by assumption. We +deviate from~\S\ref{prop-proof} by tackling subgoal~3 first, using +\ttindex{assume_tac}. This affects subgoal~1, updating {\tt?Q1} to~{\tt + P}. +\begin{ttbox} +by (assume_tac 3); +{\out Level 3} +{\out P | P --> P} +{\out 1. P | P ==> ?P1 | P} +{\out 2. [| P | P; ?P1 |] ==> P} +\end{ttbox} +Next we tackle subgoal~2, instantiating {\tt?P1} to~\texttt{P} in subgoal~1. +\begin{ttbox} +by (assume_tac 2); +{\out Level 4} +{\out P | P --> P} +{\out 1. P | P ==> P | P} +\end{ttbox} +Lastly we prove the remaining subgoal by assumption: +\begin{ttbox} +by (assume_tac 1); +{\out Level 5} +{\out P | P --> P} +{\out No subgoals!} +\end{ttbox} +Isabelle tells us that there are no longer any subgoals: the proof is +complete. Calling \texttt{qed} stores the theorem. +\begin{ttbox} +qed "mythm"; +{\out val mythm = "?P | ?P --> ?P" : thm} +\end{ttbox} +Isabelle has replaced the free variable~\texttt{P} by the scheme +variable~{\tt?P}\@. Free variables in the proof state remain fixed +throughout the proof. Isabelle finally converts them to scheme variables +so that the resulting theorem can be instantiated with any formula. + +As an exercise, try doing the proof as in \S\ref{prop-proof}, observing how +instantiations affect the proof state. + + +\subsection{Part of a distributive law} +\index{examples!propositional} +To demonstrate the tactics \ttindex{eresolve_tac}, \ttindex{dresolve_tac} +and the tactical \texttt{REPEAT}, let us prove part of the distributive +law +\[ (P\conj Q)\disj R \,\bimp\, (P\disj R)\conj (Q\disj R). \] +We begin by stating the goal to Isabelle and applying~$({\imp}I)$ to it: +\begin{ttbox} +Goal "(P & Q) | R --> (P | R)"; +{\out Level 0} +{\out P & Q | R --> P | R} +{\out 1. P & Q | R --> P | R} +\ttbreak +by (resolve_tac [impI] 1); +{\out Level 1} +{\out P & Q | R --> P | R} +{\out 1. P & Q | R ==> P | R} +\end{ttbox} +Previously we applied~(${\disj}E)$ using \texttt{resolve_tac}, but +\ttindex{eresolve_tac} deletes the assumption after use. The resulting proof +state is simpler. +\begin{ttbox} +by (eresolve_tac [disjE] 1); +{\out Level 2} +{\out P & Q | R --> P | R} +{\out 1. P & Q ==> P | R} +{\out 2. R ==> P | R} +\end{ttbox} +Using \ttindex{dresolve_tac}, we can apply~(${\conj}E1)$ to subgoal~1, +replacing the assumption $P\conj Q$ by~$P$. Normally we should apply the +rule~(${\conj}E)$, given in~\S\ref{destruct}. That is an elimination rule +and requires \texttt{eresolve_tac}; it would replace $P\conj Q$ by the two +assumptions~$P$ and~$Q$. Because the present example does not need~$Q$, we +may try out \texttt{dresolve_tac}. +\begin{ttbox} +by (dresolve_tac [conjunct1] 1); +{\out Level 3} +{\out P & Q | R --> P | R} +{\out 1. P ==> P | R} +{\out 2. R ==> P | R} +\end{ttbox} +The next two steps apply~(${\disj}I1$) and~(${\disj}I2$) in an obvious manner. +\begin{ttbox} +by (resolve_tac [disjI1] 1); +{\out Level 4} +{\out P & Q | R --> P | R} +{\out 1. P ==> P} +{\out 2. R ==> P | R} +\ttbreak +by (resolve_tac [disjI2] 2); +{\out Level 5} +{\out P & Q | R --> P | R} +{\out 1. P ==> P} +{\out 2. R ==> R} +\end{ttbox} +Two calls of \texttt{assume_tac} can finish the proof. The +tactical~\ttindex{REPEAT} here expresses a tactic that calls \texttt{assume_tac~1} +as many times as possible. We can restrict attention to subgoal~1 because +the other subgoals move up after subgoal~1 disappears. +\begin{ttbox} +by (REPEAT (assume_tac 1)); +{\out Level 6} +{\out P & Q | R --> P | R} +{\out No subgoals!} +\end{ttbox} + + +\section{Quantifier reasoning} +\index{quantifiers}\index{parameters}\index{unknowns}\index{unknowns!function} +This section illustrates how Isabelle enforces quantifier provisos. +Suppose that $x$, $y$ and~$z$ are parameters of a subgoal. Quantifier +rules create terms such as~$\Var{f}(x,z)$, where~$\Var{f}$ is a function +unknown. Instantiating $\Var{f}$ to $\lambda x\,z.t$ has the effect of +replacing~$\Var{f}(x,z)$ by~$t$, where the term~$t$ may contain free +occurrences of~$x$ and~$z$. On the other hand, no instantiation +of~$\Var{f}$ can replace~$\Var{f}(x,z)$ by a term containing free +occurrences of~$y$, since parameters are bound variables. + +\subsection{Two quantifier proofs: a success and a failure} +\index{examples!with quantifiers} +Let us contrast a proof of the theorem $\forall x.\exists y.x=y$ with an +attempted proof of the non-theorem $\exists y.\forall x.x=y$. The former +proof succeeds, and the latter fails, because of the scope of quantified +variables~\cite{paulson-found}. Unification helps even in these trivial +proofs. In $\forall x.\exists y.x=y$ the $y$ that `exists' is simply $x$, +but we need never say so. This choice is forced by the reflexive law for +equality, and happens automatically. + +\paragraph{The successful proof.} +The proof of $\forall x.\exists y.x=y$ demonstrates the introduction rules +$(\forall I)$ and~$(\exists I)$. We state the goal and apply $(\forall I)$: +\begin{ttbox} +Goal "ALL x. EX y. x=y"; +{\out Level 0} +{\out ALL x. EX y. x = y} +{\out 1. ALL x. EX y. x = y} +\ttbreak +by (resolve_tac [allI] 1); +{\out Level 1} +{\out ALL x. EX y. x = y} +{\out 1. !!x. EX y. x = y} +\end{ttbox} +The variable~\texttt{x} is no longer universally quantified, but is a +parameter in the subgoal; thus, it is universally quantified at the +meta-level. The subgoal must be proved for all possible values of~\texttt{x}. + +To remove the existential quantifier, we apply the rule $(\exists I)$: +\begin{ttbox} +by (resolve_tac [exI] 1); +{\out Level 2} +{\out ALL x. EX y. x = y} +{\out 1. !!x. x = ?y1(x)} +\end{ttbox} +The bound variable \texttt{y} has become {\tt?y1(x)}. This term consists of +the function unknown~{\tt?y1} applied to the parameter~\texttt{x}. +Instances of {\tt?y1(x)} may or may not contain~\texttt{x}. We resolve the +subgoal with the reflexivity axiom. +\begin{ttbox} +by (resolve_tac [refl] 1); +{\out Level 3} +{\out ALL x. EX y. x = y} +{\out No subgoals!} +\end{ttbox} +Let us consider what has happened in detail. The reflexivity axiom is +lifted over~$x$ to become $\Forall x.\Var{f}(x)=\Var{f}(x)$, which is +unified with $\Forall x.x=\Var{y@1}(x)$. The function unknowns $\Var{f}$ +and~$\Var{y@1}$ are both instantiated to the identity function, and +$x=\Var{y@1}(x)$ collapses to~$x=x$ by $\beta$-reduction. + +\paragraph{The unsuccessful proof.} +We state the goal $\exists y.\forall x.x=y$, which is not a theorem, and +try~$(\exists I)$: +\begin{ttbox} +Goal "EX y. ALL x. x=y"; +{\out Level 0} +{\out EX y. ALL x. x = y} +{\out 1. EX y. ALL x. x = y} +\ttbreak +by (resolve_tac [exI] 1); +{\out Level 1} +{\out EX y. ALL x. x = y} +{\out 1. ALL x. x = ?y} +\end{ttbox} +The unknown \texttt{?y} may be replaced by any term, but this can never +introduce another bound occurrence of~\texttt{x}. We now apply~$(\forall I)$: +\begin{ttbox} +by (resolve_tac [allI] 1); +{\out Level 2} +{\out EX y. ALL x. x = y} +{\out 1. !!x. x = ?y} +\end{ttbox} +Compare our position with the previous Level~2. Instead of {\tt?y1(x)} we +have~{\tt?y}, whose instances may not contain the bound variable~\texttt{x}. +The reflexivity axiom does not unify with subgoal~1. +\begin{ttbox} +by (resolve_tac [refl] 1); +{\out by: tactic failed} +\end{ttbox} +There can be no proof of $\exists y.\forall x.x=y$ by the soundness of +first-order logic. I have elsewhere proved the faithfulness of Isabelle's +encoding of first-order logic~\cite{paulson-found}; there could, of course, be +faults in the implementation. + + +\subsection{Nested quantifiers} +\index{examples!with quantifiers} +Multiple quantifiers create complex terms. Proving +\[ (\forall x\,y.P(x,y)) \imp (\forall z\,w.P(w,z)) \] +will demonstrate how parameters and unknowns develop. If they appear in +the wrong order, the proof will fail. + +This section concludes with a demonstration of \texttt{REPEAT} +and~\texttt{ORELSE}. +\begin{ttbox} +Goal "(ALL x y.P(x,y)) --> (ALL z w.P(w,z))"; +{\out Level 0} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +\ttbreak +by (resolve_tac [impI] 1); +{\out Level 1} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. ALL x y. P(x,y) ==> ALL z w. P(w,z)} +\end{ttbox} + +\paragraph{The wrong approach.} +Using \texttt{dresolve_tac}, we apply the rule $(\forall E)$, bound to the +\ML\ identifier \tdx{spec}. Then we apply $(\forall I)$. +\begin{ttbox} +by (dresolve_tac [spec] 1); +{\out Level 2} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. ALL y. P(?x1,y) ==> ALL z w. P(w,z)} +\ttbreak +by (resolve_tac [allI] 1); +{\out Level 3} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z. ALL y. P(?x1,y) ==> ALL w. P(w,z)} +\end{ttbox} +The unknown \texttt{?x1} and the parameter \texttt{z} have appeared. We again +apply $(\forall E)$ and~$(\forall I)$. +\begin{ttbox} +by (dresolve_tac [spec] 1); +{\out Level 4} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z. P(?x1,?y3(z)) ==> ALL w. P(w,z)} +\ttbreak +by (resolve_tac [allI] 1); +{\out Level 5} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z w. P(?x1,?y3(z)) ==> P(w,z)} +\end{ttbox} +The unknown \texttt{?y3} and the parameter \texttt{w} have appeared. Each +unknown is applied to the parameters existing at the time of its creation; +instances of~\texttt{?x1} cannot contain~\texttt{z} or~\texttt{w}, while instances +of {\tt?y3(z)} can only contain~\texttt{z}. Due to the restriction on~\texttt{?x1}, +proof by assumption will fail. +\begin{ttbox} +by (assume_tac 1); +{\out by: tactic failed} +{\out uncaught exception ERROR} +\end{ttbox} + +\paragraph{The right approach.} +To do this proof, the rules must be applied in the correct order. +Parameters should be created before unknowns. The +\ttindex{choplev} command returns to an earlier stage of the proof; +let us return to the result of applying~$({\imp}I)$: +\begin{ttbox} +choplev 1; +{\out Level 1} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. ALL x y. P(x,y) ==> ALL z w. P(w,z)} +\end{ttbox} +Previously we made the mistake of applying $(\forall E)$ before $(\forall I)$. +\begin{ttbox} +by (resolve_tac [allI] 1); +{\out Level 2} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z. ALL x y. P(x,y) ==> ALL w. P(w,z)} +\ttbreak +by (resolve_tac [allI] 1); +{\out Level 3} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z w. ALL x y. P(x,y) ==> P(w,z)} +\end{ttbox} +The parameters \texttt{z} and~\texttt{w} have appeared. We now create the +unknowns: +\begin{ttbox} +by (dresolve_tac [spec] 1); +{\out Level 4} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z w. ALL y. P(?x3(z,w),y) ==> P(w,z)} +\ttbreak +by (dresolve_tac [spec] 1); +{\out Level 5} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. !!z w. P(?x3(z,w),?y4(z,w)) ==> P(w,z)} +\end{ttbox} +Both {\tt?x3(z,w)} and~{\tt?y4(z,w)} could become any terms containing {\tt +z} and~\texttt{w}: +\begin{ttbox} +by (assume_tac 1); +{\out Level 6} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out No subgoals!} +\end{ttbox} + +\paragraph{A one-step proof using tacticals.} +\index{tacticals} \index{examples!of tacticals} + +Repeated application of rules can be effective, but the rules should be +attempted in the correct order. Let us return to the original goal using +\ttindex{choplev}: +\begin{ttbox} +choplev 0; +{\out Level 0} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +\end{ttbox} +As we have just seen, \tdx{allI} should be attempted +before~\tdx{spec}, while \ttindex{assume_tac} generally can be +attempted first. Such priorities can easily be expressed +using~\ttindex{ORELSE}, and repeated using~\ttindex{REPEAT}. +\begin{ttbox} +by (REPEAT (assume_tac 1 ORELSE resolve_tac [impI,allI] 1 + ORELSE dresolve_tac [spec] 1)); +{\out Level 1} +{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} +{\out No subgoals!} +\end{ttbox} + + +\subsection{A realistic quantifier proof} +\index{examples!with quantifiers} +To see the practical use of parameters and unknowns, let us prove half of +the equivalence +\[ (\forall x. P(x) \imp Q) \,\bimp\, ((\exists x. P(x)) \imp Q). \] +We state the left-to-right half to Isabelle in the normal way. +Since $\imp$ is nested to the right, $({\imp}I)$ can be applied twice; we +use \texttt{REPEAT}: +\begin{ttbox} +Goal "(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q"; +{\out Level 0} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out 1. (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +\ttbreak +by (REPEAT (resolve_tac [impI] 1)); +{\out Level 1} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out 1. [| ALL x. P(x) --> Q; EX x. P(x) |] ==> Q} +\end{ttbox} +We can eliminate the universal or the existential quantifier. The +existential quantifier should be eliminated first, since this creates a +parameter. The rule~$(\exists E)$ is bound to the +identifier~\tdx{exE}. +\begin{ttbox} +by (eresolve_tac [exE] 1); +{\out Level 2} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out 1. !!x. [| ALL x. P(x) --> Q; P(x) |] ==> Q} +\end{ttbox} +The only possibility now is $(\forall E)$, a destruction rule. We use +\ttindex{dresolve_tac}, which discards the quantified assumption; it is +only needed once. +\begin{ttbox} +by (dresolve_tac [spec] 1); +{\out Level 3} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out 1. !!x. [| P(x); P(?x3(x)) --> Q |] ==> Q} +\end{ttbox} +Because we applied $(\exists E)$ before $(\forall E)$, the unknown +term~{\tt?x3(x)} may depend upon the parameter~\texttt{x}. + +Although $({\imp}E)$ is a destruction rule, it works with +\ttindex{eresolve_tac} to perform backward chaining. This technique is +frequently useful. +\begin{ttbox} +by (eresolve_tac [mp] 1); +{\out Level 4} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out 1. !!x. P(x) ==> P(?x3(x))} +\end{ttbox} +The tactic has reduced~\texttt{Q} to~\texttt{P(?x3(x))}, deleting the +implication. The final step is trivial, thanks to the occurrence of~\texttt{x}. +\begin{ttbox} +by (assume_tac 1); +{\out Level 5} +{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} +{\out No subgoals!} +\end{ttbox} + + +\subsection{The classical reasoner} +\index{classical reasoner} +Isabelle provides enough automation to tackle substantial examples. +The classical +reasoner can be set up for any classical natural deduction logic; +see \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}% + {Chap.\ts\ref{chap:classical}}. +It cannot compete with fully automatic theorem provers, but it is +competitive with tools found in other interactive provers. + +Rules are packaged into {\bf classical sets}. The classical reasoner +provides several tactics, which apply rules using naive algorithms. +Unification handles quantifiers as shown above. The most useful tactic +is~\ttindex{Blast_tac}. + +Let us solve problems~40 and~60 of Pelletier~\cite{pelletier86}. (The +backslashes~\hbox{\verb|\|\ldots\verb|\|} are an \ML{} string escape +sequence, to break the long string over two lines.) +\begin{ttbox} +Goal "(EX y. ALL x. J(y,x) <-> ~J(x,x)) \ttback +\ttback --> ~ (ALL x. EX y. ALL z. J(z,y) <-> ~ J(z,x))"; +{\out Level 0} +{\out (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} +{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} +{\out 1. (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} +{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} +\end{ttbox} +\ttindex{Blast_tac} proves subgoal~1 at a stroke. +\begin{ttbox} +by (Blast_tac 1); +{\out Depth = 0} +{\out Depth = 1} +{\out Level 1} +{\out (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} +{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} +{\out No subgoals!} +\end{ttbox} +Sceptics may examine the proof by calling the package's single-step +tactics, such as~\texttt{step_tac}. This would take up much space, however, +so let us proceed to the next example: +\begin{ttbox} +Goal "ALL x. P(x,f(x)) <-> \ttback +\ttback (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))"; +{\out Level 0} +{\out ALL x. P(x,f(x)) <-> (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} +{\out 1. ALL x. P(x,f(x)) <->} +{\out (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} +\end{ttbox} +Again, subgoal~1 succumbs immediately. +\begin{ttbox} +by (Blast_tac 1); +{\out Depth = 0} +{\out Depth = 1} +{\out Level 1} +{\out ALL x. P(x,f(x)) <-> (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} +{\out No subgoals!} +\end{ttbox} +The classical reasoner is not restricted to the usual logical connectives. +The natural deduction rules for unions and intersections resemble those for +disjunction and conjunction. The rules for infinite unions and +intersections resemble those for quantifiers. Given such rules, the classical +reasoner is effective for reasoning in set theory. + diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/document/root.tex --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/doc-src/Intro/document/root.tex Mon Aug 27 20:50:10 2012 +0200 @@ -0,0 +1,154 @@ +\documentclass[12pt,a4paper]{article} +\usepackage{graphicx,iman,extra,ttbox,proof,pdfsetup} + +%% run bibtex intro to prepare bibliography +%% run ../sedindex intro to prepare index file +%prth *(\(.*\)); \1; +%{\\out \(.*\)} {\\out val it = "\1" : thm} + +\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Introduction to Isabelle} +\author{{\em Lawrence C. Paulson}\\ + Computer Laboratory \\ University of Cambridge \\ + \texttt{lcp@cl.cam.ac.uk}\\[3ex] + With Contributions by Tobias Nipkow and Markus Wenzel +} +\makeindex + +\underscoreoff + +\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2} + +\sloppy +\binperiod %%%treat . like a binary operator + +\newcommand\qeq{\stackrel{?}{\equiv}} %for disagreement pairs in unification +\newcommand{\nand}{\mathbin{\lnot\&}} +\newcommand{\xor}{\mathbin{\#}} + +\pagenumbering{roman} +\begin{document} +\pagestyle{empty} +\begin{titlepage} +\maketitle +\emph{Note}: this document is part of the earlier Isabelle documentation, +which is largely superseded by the Isabelle/HOL +\emph{Tutorial}~\cite{isa-tutorial}. It describes the old-style theory +syntax and shows how to conduct proofs using the +ML top level. This style of interaction is largely obsolete: +most Isabelle proofs are now written using the Isar +language and the Proof General interface. However, this paper contains valuable +information that is not available elsewhere. Its examples are based +on first-order logic rather than higher-order logic. + +\thispagestyle{empty} +\vfill +{\small Copyright \copyright{} \number\year{} by Lawrence C. Paulson} +\end{titlepage} + +\pagestyle{headings} +\part*{Preface} +\index{Isabelle!overview} \index{Isabelle!object-logics supported} +Isabelle~\cite{paulson-natural,paulson-found,paulson700} is a generic theorem +prover. It has been instantiated to support reasoning in several +object-logics: +\begin{itemize} +\item first-order logic, constructive and classical versions +\item higher-order logic, similar to that of Gordon's {\sc +hol}~\cite{mgordon-hol} +\item Zermelo-Fraenkel set theory~\cite{suppes72} +\item an extensional version of Martin-L\"of's Type Theory~\cite{nordstrom90} +\item the classical first-order sequent calculus, {\sc lk} +\item the modal logics $T$, $S4$, and $S43$ +\item the Logic for Computable Functions~\cite{paulson87} +\end{itemize} +A logic's syntax and inference rules are specified declaratively; this +allows single-step proof construction. Isabelle provides control +structures for expressing search procedures. Isabelle also provides +several generic tools, such as simplifiers and classical theorem provers, +which can be applied to object-logics. + +Isabelle is a large system, but beginners can get by with a small +repertoire of commands and a basic knowledge of how Isabelle works. +The Isabelle/HOL \emph{Tutorial}~\cite{isa-tutorial} describes how to get started. Advanced Isabelle users will benefit from some +knowledge of Standard~\ML{}, because Isabelle is written in \ML{}; +\index{ML} +if you are prepared to writing \ML{} +code, you can get Isabelle to do almost anything. My book +on~\ML{}~\cite{paulson-ml2} covers much material connected with Isabelle, +including a simple theorem prover. Users must be familiar with logic as +used in computer science; there are many good +texts~\cite{galton90,reeves90}. + +\index{LCF} +{\sc lcf}, developed by Robin Milner and colleagues~\cite{mgordon79}, is an +ancestor of {\sc hol}, Nuprl, and several other systems. Isabelle borrows +ideas from {\sc lcf}: formulae are~\ML{} values; theorems belong to an +abstract type; tactics and tacticals support backward proof. But {\sc lcf} +represents object-level rules by functions, while Isabelle represents them +by terms. You may find my other writings~\cite{paulson87,paulson-handbook} +helpful in understanding the relationship between {\sc lcf} and Isabelle. + +\index{Isabelle!release history} Isabelle was first distributed in 1986. +The 1987 version introduced a higher-order meta-logic with an improved +treatment of quantifiers. The 1988 version added limited polymorphism and +support for natural deduction. The 1989 version included a parser and +pretty printer generator. The 1992 version introduced type classes, to +support many-sorted and higher-order logics. The 1994 version introduced +greater support for theories. The most important recent change is the +introduction of the Isar proof language, thanks to Markus Wenzel. +Isabelle is still under +development and will continue to change. + +\subsubsection*{Overview} +This manual consists of three parts. Part~I discusses the Isabelle's +foundations. Part~II, presents simple on-line sessions, starting with +forward proof. It also covers basic tactics and tacticals, and some +commands for invoking them. Part~III contains further examples for users +with a bit of experience. It explains how to derive rules define theories, +and concludes with an extended example: a Prolog interpreter. + +Isabelle's Reference Manual and Object-Logics manual contain more details. +They assume familiarity with the concepts presented here. + + +\subsubsection*{Acknowledgements} +Tobias Nipkow contributed most of the section on defining theories. +Stefan Berghofer, Sara Kalvala and Viktor Kuncak suggested improvements. + +Tobias Nipkow has made immense contributions to Isabelle, including the parser +generator, type classes, and the simplifier. Carsten Clasohm and Markus +Wenzel made major contributions; Sonia Mahjoub and Karin Nimmermann also +helped. Isabelle was developed using Dave Matthews's Standard~{\sc ml} +compiler, Poly/{\sc ml}. Many people have contributed to Isabelle's standard +object-logics, including Martin Coen, Philippe de Groote, Philippe No\"el. +The research has been funded by the EPSRC (grants GR/G53279, GR/H40570, +GR/K57381, GR/K77051, GR/M75440) and by ESPRIT (projects 3245: Logical +Frameworks, and 6453: Types), and by the DFG Schwerpunktprogramm +\emph{Deduktion}. + +\newpage +\pagestyle{plain} \tableofcontents +\newpage + +\newfont{\sanssi}{cmssi12} +\vspace*{2.5cm} +\begin{quote} +\raggedleft +{\sanssi +You can only find truth with logic\\ +if you have already found truth without it.}\\ +\bigskip + +G.K. Chesterton, {\em The Man who was Orthodox} +\end{quote} + +\clearfirst \pagestyle{headings} +\include{foundations} +\include{getting} +\include{advanced} + +\bibliographystyle{plain} \small\raggedright\frenchspacing +\bibliography{manual} + +\printindex +\end{document} diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/foundations.tex --- a/doc-src/Intro/foundations.tex Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1178 +0,0 @@ -\part{Foundations} -The following sections discuss Isabelle's logical foundations in detail: -representing logical syntax in the typed $\lambda$-calculus; expressing -inference rules in Isabelle's meta-logic; combining rules by resolution. - -If you wish to use Isabelle immediately, please turn to -page~\pageref{chap:getting}. You can always read about foundations later, -either by returning to this point or by looking up particular items in the -index. - -\begin{figure} -\begin{eqnarray*} - \neg P & \hbox{abbreviates} & P\imp\bot \\ - P\bimp Q & \hbox{abbreviates} & (P\imp Q) \conj (Q\imp P) -\end{eqnarray*} -\vskip 4ex - -\(\begin{array}{c@{\qquad\qquad}c} - \infer[({\conj}I)]{P\conj Q}{P & Q} & - \infer[({\conj}E1)]{P}{P\conj Q} \qquad - \infer[({\conj}E2)]{Q}{P\conj Q} \\[4ex] - - \infer[({\disj}I1)]{P\disj Q}{P} \qquad - \infer[({\disj}I2)]{P\disj Q}{Q} & - \infer[({\disj}E)]{R}{P\disj Q & \infer*{R}{[P]} & \infer*{R}{[Q]}}\\[4ex] - - \infer[({\imp}I)]{P\imp Q}{\infer*{Q}{[P]}} & - \infer[({\imp}E)]{Q}{P\imp Q & P} \\[4ex] - - & - \infer[({\bot}E)]{P}{\bot}\\[4ex] - - \infer[({\forall}I)*]{\forall x.P}{P} & - \infer[({\forall}E)]{P[t/x]}{\forall x.P} \\[3ex] - - \infer[({\exists}I)]{\exists x.P}{P[t/x]} & - \infer[({\exists}E)*]{Q}{{\exists x.P} & \infer*{Q}{[P]} } \\[3ex] - - {t=t} \,(refl) & \vcenter{\infer[(subst)]{P[u/x]}{t=u & P[t/x]}} -\end{array} \) - -\bigskip\bigskip -*{\em Eigenvariable conditions\/}: - -$\forall I$: provided $x$ is not free in the assumptions - -$\exists E$: provided $x$ is not free in $Q$ or any assumption except $P$ -\caption{Intuitionistic first-order logic} \label{fol-fig} -\end{figure} - -\section{Formalizing logical syntax in Isabelle}\label{sec:logical-syntax} -\index{first-order logic} - -Figure~\ref{fol-fig} presents intuitionistic first-order logic, -including equality. Let us see how to formalize -this logic in Isabelle, illustrating the main features of Isabelle's -polymorphic meta-logic. - -\index{lambda calc@$\lambda$-calculus} -Isabelle represents syntax using the simply typed $\lambda$-calculus. We -declare a type for each syntactic category of the logic. We declare a -constant for each symbol of the logic, giving each $n$-place operation an -$n$-argument curried function type. Most importantly, -$\lambda$-abstraction represents variable binding in quantifiers. - -\index{types!syntax of}\index{types!function}\index{*fun type} -\index{type constructors} -Isabelle has \ML-style polymorphic types such as~$(\alpha)list$, where -$list$ is a type constructor and $\alpha$ is a type variable; for example, -$(bool)list$ is the type of lists of booleans. Function types have the -form $(\sigma,\tau)fun$ or $\sigma\To\tau$, where $\sigma$ and $\tau$ are -types. Curried function types may be abbreviated: -\[ \sigma@1\To (\cdots \sigma@n\To \tau\cdots) \quad \hbox{as} \quad -[\sigma@1, \ldots, \sigma@n] \To \tau \] - -\index{terms!syntax of} The syntax for terms is summarised below. -Note that there are two versions of function application syntax -available in Isabelle: either $t\,u$, which is the usual form for -higher-order languages, or $t(u)$, trying to look more like -first-order. The latter syntax is used throughout the manual. -\[ -\index{lambda abs@$\lambda$-abstractions}\index{function applications} -\begin{array}{ll} - t :: \tau & \hbox{type constraint, on a term or bound variable} \\ - \lambda x.t & \hbox{abstraction} \\ - \lambda x@1\ldots x@n.t - & \hbox{curried abstraction, $\lambda x@1. \ldots \lambda x@n.t$} \\ - t(u) & \hbox{application} \\ - t (u@1, \ldots, u@n) & \hbox{curried application, $t(u@1)\ldots(u@n)$} -\end{array} -\] - - -\subsection{Simple types and constants}\index{types!simple|bold} - -The syntactic categories of our logic (Fig.\ts\ref{fol-fig}) are {\bf - formulae} and {\bf terms}. Formulae denote truth values, so (following -tradition) let us call their type~$o$. To allow~0 and~$Suc(t)$ as terms, -let us declare a type~$nat$ of natural numbers. Later, we shall see -how to admit terms of other types. - -\index{constants}\index{*nat type}\index{*o type} -After declaring the types~$o$ and~$nat$, we may declare constants for the -symbols of our logic. Since $\bot$ denotes a truth value (falsity) and 0 -denotes a number, we put \begin{eqnarray*} - \bot & :: & o \\ - 0 & :: & nat. -\end{eqnarray*} -If a symbol requires operands, the corresponding constant must have a -function type. In our logic, the successor function -($Suc$) is from natural numbers to natural numbers, negation ($\neg$) is a -function from truth values to truth values, and the binary connectives are -curried functions taking two truth values as arguments: -\begin{eqnarray*} - Suc & :: & nat\To nat \\ - {\neg} & :: & o\To o \\ - \conj,\disj,\imp,\bimp & :: & [o,o]\To o -\end{eqnarray*} -The binary connectives can be declared as infixes, with appropriate -precedences, so that we write $P\conj Q\disj R$ instead of -$\disj(\conj(P,Q), R)$. - -Section~\ref{sec:defining-theories} below describes the syntax of Isabelle -theory files and illustrates it by extending our logic with mathematical -induction. - - -\subsection{Polymorphic types and constants} \label{polymorphic} -\index{types!polymorphic|bold} -\index{equality!polymorphic} -\index{constants!polymorphic} - -Which type should we assign to the equality symbol? If we tried -$[nat,nat]\To o$, then equality would be restricted to the natural -numbers; we should have to declare different equality symbols for each -type. Isabelle's type system is polymorphic, so we could declare -\begin{eqnarray*} - {=} & :: & [\alpha,\alpha]\To o, -\end{eqnarray*} -where the type variable~$\alpha$ ranges over all types. -But this is also wrong. The declaration is too polymorphic; $\alpha$ -includes types like~$o$ and $nat\To nat$. Thus, it admits -$\bot=\neg(\bot)$ and $Suc=Suc$ as formulae, which is acceptable in -higher-order logic but not in first-order logic. - -Isabelle's {\bf type classes}\index{classes} control -polymorphism~\cite{nipkow-prehofer}. Each type variable belongs to a -class, which denotes a set of types. Classes are partially ordered by the -subclass relation, which is essentially the subset relation on the sets of -types. They closely resemble the classes of the functional language -Haskell~\cite{haskell-tutorial,haskell-report}. - -\index{*logic class}\index{*term class} -Isabelle provides the built-in class $logic$, which consists of the logical -types: the ones we want to reason about. Let us declare a class $term$, to -consist of all legal types of terms in our logic. The subclass structure -is now $term\le logic$. - -\index{*nat type} -We put $nat$ in class $term$ by declaring $nat{::}term$. We declare the -equality constant by -\begin{eqnarray*} - {=} & :: & [\alpha{::}term,\alpha]\To o -\end{eqnarray*} -where $\alpha{::}term$ constrains the type variable~$\alpha$ to class -$term$. Such type variables resemble Standard~\ML's equality type -variables. - -We give~$o$ and function types the class $logic$ rather than~$term$, since -they are not legal types for terms. We may introduce new types of class -$term$ --- for instance, type $string$ or $real$ --- at any time. We can -even declare type constructors such as~$list$, and state that type -$(\tau)list$ belongs to class~$term$ provided $\tau$ does; equality -applies to lists of natural numbers but not to lists of formulae. We may -summarize this paragraph by a set of {\bf arity declarations} for type -constructors:\index{arities!declaring} -\begin{eqnarray*}\index{*o type}\index{*fun type} - o & :: & logic \\ - fun & :: & (logic,logic)logic \\ - nat, string, real & :: & term \\ - list & :: & (term)term -\end{eqnarray*} -(Recall that $fun$ is the type constructor for function types.) -In \rmindex{higher-order logic}, equality does apply to truth values and -functions; this requires the arity declarations ${o::term}$ -and ${fun::(term,term)term}$. The class system can also handle -overloading.\index{overloading|bold} We could declare $arith$ to be the -subclass of $term$ consisting of the `arithmetic' types, such as~$nat$. -Then we could declare the operators -\begin{eqnarray*} - {+},{-},{\times},{/} & :: & [\alpha{::}arith,\alpha]\To \alpha -\end{eqnarray*} -If we declare new types $real$ and $complex$ of class $arith$, then we -in effect have three sets of operators: -\begin{eqnarray*} - {+},{-},{\times},{/} & :: & [nat,nat]\To nat \\ - {+},{-},{\times},{/} & :: & [real,real]\To real \\ - {+},{-},{\times},{/} & :: & [complex,complex]\To complex -\end{eqnarray*} -Isabelle will regard these as distinct constants, each of which can be defined -separately. We could even introduce the type $(\alpha)vector$ and declare -its arity as $(arith)arith$. Then we could declare the constant -\begin{eqnarray*} - {+} & :: & [(\alpha)vector,(\alpha)vector]\To (\alpha)vector -\end{eqnarray*} -and specify it in terms of ${+} :: [\alpha,\alpha]\To \alpha$. - -A type variable may belong to any finite number of classes. Suppose that -we had declared yet another class $ord \le term$, the class of all -`ordered' types, and a constant -\begin{eqnarray*} - {\le} & :: & [\alpha{::}ord,\alpha]\To o. -\end{eqnarray*} -In this context the variable $x$ in $x \le (x+x)$ will be assigned type -$\alpha{::}\{arith,ord\}$, which means $\alpha$ belongs to both $arith$ and -$ord$. Semantically the set $\{arith,ord\}$ should be understood as the -intersection of the sets of types represented by $arith$ and $ord$. Such -intersections of classes are called \bfindex{sorts}. The empty -intersection of classes, $\{\}$, contains all types and is thus the {\bf - universal sort}. - -Even with overloading, each term has a unique, most general type. For this -to be possible, the class and type declarations must satisfy certain -technical constraints; see -\iflabelundefined{sec:ref-defining-theories}% - {Sect.\ Defining Theories in the {\em Reference Manual}}% - {\S\ref{sec:ref-defining-theories}}. - - -\subsection{Higher types and quantifiers} -\index{types!higher|bold}\index{quantifiers} -Quantifiers are regarded as operations upon functions. Ignoring polymorphism -for the moment, consider the formula $\forall x. P(x)$, where $x$ ranges -over type~$nat$. This is true if $P(x)$ is true for all~$x$. Abstracting -$P(x)$ into a function, this is the same as saying that $\lambda x.P(x)$ -returns true for all arguments. Thus, the universal quantifier can be -represented by a constant -\begin{eqnarray*} - \forall & :: & (nat\To o) \To o, -\end{eqnarray*} -which is essentially an infinitary truth table. The representation of $\forall -x. P(x)$ is $\forall(\lambda x. P(x))$. - -The existential quantifier is treated -in the same way. Other binding operators are also easily handled; for -instance, the summation operator $\Sigma@{k=i}^j f(k)$ can be represented as -$\Sigma(i,j,\lambda k.f(k))$, where -\begin{eqnarray*} - \Sigma & :: & [nat,nat, nat\To nat] \To nat. -\end{eqnarray*} -Quantifiers may be polymorphic. We may define $\forall$ and~$\exists$ over -all legal types of terms, not just the natural numbers, and -allow summations over all arithmetic types: -\begin{eqnarray*} - \forall,\exists & :: & (\alpha{::}term\To o) \To o \\ - \Sigma & :: & [nat,nat, nat\To \alpha{::}arith] \To \alpha -\end{eqnarray*} -Observe that the index variables still have type $nat$, while the values -being summed may belong to any arithmetic type. - - -\section{Formalizing logical rules in Isabelle} -\index{meta-implication|bold} -\index{meta-quantifiers|bold} -\index{meta-equality|bold} - -Object-logics are formalized by extending Isabelle's -meta-logic~\cite{paulson-found}, which is intuitionistic higher-order logic. -The meta-level connectives are {\bf implication}, the {\bf universal - quantifier}, and {\bf equality}. -\begin{itemize} - \item The implication \(\phi\Imp \psi\) means `\(\phi\) implies -\(\psi\)', and expresses logical {\bf entailment}. - - \item The quantification \(\Forall x.\phi\) means `\(\phi\) is true for -all $x$', and expresses {\bf generality} in rules and axiom schemes. - -\item The equality \(a\equiv b\) means `$a$ equals $b$', for expressing - {\bf definitions} (see~\S\ref{definitions}).\index{definitions} - Equalities left over from the unification process, so called {\bf - flex-flex constraints},\index{flex-flex constraints} are written $a\qeq - b$. The two equality symbols have the same logical meaning. - -\end{itemize} -The syntax of the meta-logic is formalized in the same manner -as object-logics, using the simply typed $\lambda$-calculus. Analogous to -type~$o$ above, there is a built-in type $prop$ of meta-level truth values. -Meta-level formulae will have this type. Type $prop$ belongs to -class~$logic$; also, $\sigma\To\tau$ belongs to $logic$ provided $\sigma$ -and $\tau$ do. Here are the types of the built-in connectives: -\begin{eqnarray*}\index{*prop type}\index{*logic class} - \Imp & :: & [prop,prop]\To prop \\ - \Forall & :: & (\alpha{::}logic\To prop) \To prop \\ - {\equiv} & :: & [\alpha{::}\{\},\alpha]\To prop \\ - \qeq & :: & [\alpha{::}\{\},\alpha]\To prop -\end{eqnarray*} -The polymorphism in $\Forall$ is restricted to class~$logic$ to exclude -certain types, those used just for parsing. The type variable -$\alpha{::}\{\}$ ranges over the universal sort. - -In our formalization of first-order logic, we declared a type~$o$ of -object-level truth values, rather than using~$prop$ for this purpose. If -we declared the object-level connectives to have types such as -${\neg}::prop\To prop$, then these connectives would be applicable to -meta-level formulae. Keeping $prop$ and $o$ as separate types maintains -the distinction between the meta-level and the object-level. To formalize -the inference rules, we shall need to relate the two levels; accordingly, -we declare the constant -\index{*Trueprop constant} -\begin{eqnarray*} - Trueprop & :: & o\To prop. -\end{eqnarray*} -We may regard $Trueprop$ as a meta-level predicate, reading $Trueprop(P)$ as -`$P$ is true at the object-level.' Put another way, $Trueprop$ is a coercion -from $o$ to $prop$. - - -\subsection{Expressing propositional rules} -\index{rules!propositional} -We shall illustrate the use of the meta-logic by formalizing the rules of -Fig.\ts\ref{fol-fig}. Each object-level rule is expressed as a meta-level -axiom. - -One of the simplest rules is $(\conj E1)$. Making -everything explicit, its formalization in the meta-logic is -$$ -\Forall P\;Q. Trueprop(P\conj Q) \Imp Trueprop(P). \eqno(\conj E1) -$$ -This may look formidable, but it has an obvious reading: for all object-level -truth values $P$ and~$Q$, if $P\conj Q$ is true then so is~$P$. The -reading is correct because the meta-logic has simple models, where -types denote sets and $\Forall$ really means `for all.' - -\index{*Trueprop constant} -Isabelle adopts notational conventions to ease the writing of rules. We may -hide the occurrences of $Trueprop$ by making it an implicit coercion. -Outer universal quantifiers may be dropped. Finally, the nested implication -\index{meta-implication} -\[ \phi@1\Imp(\cdots \phi@n\Imp\psi\cdots) \] -may be abbreviated as $\List{\phi@1; \ldots; \phi@n} \Imp \psi$, which -formalizes a rule of $n$~premises. - -Using these conventions, the conjunction rules become the following axioms. -These fully specify the properties of~$\conj$: -$$ \List{P; Q} \Imp P\conj Q \eqno(\conj I) $$ -$$ P\conj Q \Imp P \qquad P\conj Q \Imp Q \eqno(\conj E1,2) $$ - -\noindent -Next, consider the disjunction rules. The discharge of assumption in -$(\disj E)$ is expressed using $\Imp$: -\index{assumptions!discharge of}% -$$ P \Imp P\disj Q \qquad Q \Imp P\disj Q \eqno(\disj I1,2) $$ -$$ \List{P\disj Q; P\Imp R; Q\Imp R} \Imp R \eqno(\disj E) $$ -% -To understand this treatment of assumptions in natural -deduction, look at implication. The rule $({\imp}I)$ is the classic -example of natural deduction: to prove that $P\imp Q$ is true, assume $P$ -is true and show that $Q$ must then be true. More concisely, if $P$ -implies $Q$ (at the meta-level), then $P\imp Q$ is true (at the -object-level). Showing the coercion explicitly, this is formalized as -\[ (Trueprop(P)\Imp Trueprop(Q)) \Imp Trueprop(P\imp Q). \] -The rule $({\imp}E)$ is straightforward; hiding $Trueprop$, the axioms to -specify $\imp$ are -$$ (P \Imp Q) \Imp P\imp Q \eqno({\imp}I) $$ -$$ \List{P\imp Q; P} \Imp Q. \eqno({\imp}E) $$ - -\noindent -Finally, the intuitionistic contradiction rule is formalized as the axiom -$$ \bot \Imp P. \eqno(\bot E) $$ - -\begin{warn} -Earlier versions of Isabelle, and certain -papers~\cite{paulson-found,paulson700}, use $\List{P}$ to mean $Trueprop(P)$. -\end{warn} - -\subsection{Quantifier rules and substitution} -\index{quantifiers}\index{rules!quantifier}\index{substitution|bold} -\index{variables!bound}\index{lambda abs@$\lambda$-abstractions} -\index{function applications} - -Isabelle expresses variable binding using $\lambda$-abstraction; for instance, -$\forall x.P$ is formalized as $\forall(\lambda x.P)$. Recall that $F(t)$ -is Isabelle's syntax for application of the function~$F$ to the argument~$t$; -it is not a meta-notation for substitution. On the other hand, a substitution -will take place if $F$ has the form $\lambda x.P$; Isabelle transforms -$(\lambda x.P)(t)$ to~$P[t/x]$ by $\beta$-conversion. Thus, we can express -inference rules that involve substitution for bound variables. - -\index{parameters|bold}\index{eigenvariables|see{parameters}} -A logic may attach provisos to certain of its rules, especially quantifier -rules. We cannot hope to formalize arbitrary provisos. Fortunately, those -typical of quantifier rules always have the same form, namely `$x$ not free in -\ldots {\it (some set of formulae)},' where $x$ is a variable (called a {\bf -parameter} or {\bf eigenvariable}) in some premise. Isabelle treats -provisos using~$\Forall$, its inbuilt notion of `for all'. -\index{meta-quantifiers} - -The purpose of the proviso `$x$ not free in \ldots' is -to ensure that the premise may not make assumptions about the value of~$x$, -and therefore holds for all~$x$. We formalize $(\forall I)$ by -\[ \left(\Forall x. Trueprop(P(x))\right) \Imp Trueprop(\forall x.P(x)). \] -This means, `if $P(x)$ is true for all~$x$, then $\forall x.P(x)$ is true.' -The $\forall E$ rule exploits $\beta$-conversion. Hiding $Trueprop$, the -$\forall$ axioms are -$$ \left(\Forall x. P(x)\right) \Imp \forall x.P(x) \eqno(\forall I) $$ -$$ (\forall x.P(x)) \Imp P(t). \eqno(\forall E) $$ - -\noindent -We have defined the object-level universal quantifier~($\forall$) -using~$\Forall$. But we do not require meta-level counterparts of all the -connectives of the object-logic! Consider the existential quantifier: -$$ P(t) \Imp \exists x.P(x) \eqno(\exists I) $$ -$$ \List{\exists x.P(x);\; \Forall x. P(x)\Imp Q} \Imp Q \eqno(\exists E) $$ -Let us verify $(\exists E)$ semantically. Suppose that the premises -hold; since $\exists x.P(x)$ is true, we may choose an~$a$ such that $P(a)$ is -true. Instantiating $\Forall x. P(x)\Imp Q$ with $a$ yields $P(a)\Imp Q$, and -we obtain the desired conclusion, $Q$. - -The treatment of substitution deserves mention. The rule -\[ \infer{P[u/t]}{t=u & P} \] -would be hard to formalize in Isabelle. It calls for replacing~$t$ by $u$ -throughout~$P$, which cannot be expressed using $\beta$-conversion. Our -rule~$(subst)$ uses~$P$ as a template for substitution, inferring $P[u/x]$ -from~$P[t/x]$. When we formalize this as an axiom, the template becomes a -function variable: -$$ \List{t=u; P(t)} \Imp P(u). \eqno(subst) $$ - - -\subsection{Signatures and theories} -\index{signatures|bold} - -A {\bf signature} contains the information necessary for type-checking, -parsing and pretty printing a term. It specifies type classes and their -relationships, types and their arities, constants and their types, etc. It -also contains grammar rules, specified using mixfix declarations. - -Two signatures can be merged provided their specifications are compatible --- -they must not, for example, assign different types to the same constant. -Under similar conditions, a signature can be extended. Signatures are -managed internally by Isabelle; users seldom encounter them. - -\index{theories|bold} A {\bf theory} consists of a signature plus a collection -of axioms. The Pure theory contains only the meta-logic. Theories can be -combined provided their signatures are compatible. A theory definition -extends an existing theory with further signature specifications --- classes, -types, constants and mixfix declarations --- plus lists of axioms and -definitions etc., expressed as strings to be parsed. A theory can formalize a -small piece of mathematics, such as lists and their operations, or an entire -logic. A mathematical development typically involves many theories in a -hierarchy. For example, the Pure theory could be extended to form a theory -for Fig.\ts\ref{fol-fig}; this could be extended in two separate ways to form -a theory for natural numbers and a theory for lists; the union of these two -could be extended into a theory defining the length of a list: -\begin{tt} -\[ -\begin{array}{c@{}c@{}c@{}c@{}c} - {} & {} &\hbox{Pure}& {} & {} \\ - {} & {} & \downarrow & {} & {} \\ - {} & {} &\hbox{FOL} & {} & {} \\ - {} & \swarrow & {} & \searrow & {} \\ - \hbox{Nat} & {} & {} & {} & \hbox{List} \\ - {} & \searrow & {} & \swarrow & {} \\ - {} & {} &\hbox{Nat}+\hbox{List}& {} & {} \\ - {} & {} & \downarrow & {} & {} \\ - {} & {} & \hbox{Length} & {} & {} -\end{array} -\] -\end{tt}% -Each Isabelle proof typically works within a single theory, which is -associated with the proof state. However, many different theories may -coexist at the same time, and you may work in each of these during a single -session. - -\begin{warn}\index{constants!clashes with variables}% - Confusing problems arise if you work in the wrong theory. Each theory - defines its own syntax. An identifier may be regarded in one theory as a - constant and in another as a variable, for example. -\end{warn} - -\section{Proof construction in Isabelle} -I have elsewhere described the meta-logic and demonstrated it by -formalizing first-order logic~\cite{paulson-found}. There is a one-to-one -correspondence between meta-level proofs and object-level proofs. To each -use of a meta-level axiom, such as $(\forall I)$, there is a use of the -corresponding object-level rule. Object-level assumptions and parameters -have meta-level counterparts. The meta-level formalization is {\bf - faithful}, admitting no incorrect object-level inferences, and {\bf - adequate}, admitting all correct object-level inferences. These -properties must be demonstrated separately for each object-logic. - -The meta-logic is defined by a collection of inference rules, including -equational rules for the $\lambda$-calculus and logical rules. The rules -for~$\Imp$ and~$\Forall$ resemble those for~$\imp$ and~$\forall$ in -Fig.\ts\ref{fol-fig}. Proofs performed using the primitive meta-rules -would be lengthy; Isabelle proofs normally use certain derived rules. -{\bf Resolution}, in particular, is convenient for backward proof. - -Unification is central to theorem proving. It supports quantifier -reasoning by allowing certain `unknown' terms to be instantiated later, -possibly in stages. When proving that the time required to sort $n$ -integers is proportional to~$n^2$, we need not state the constant of -proportionality; when proving that a hardware adder will deliver the sum of -its inputs, we need not state how many clock ticks will be required. Such -quantities often emerge from the proof. - -Isabelle provides {\bf schematic variables}, or {\bf - unknowns},\index{unknowns} for unification. Logically, unknowns are free -variables. But while ordinary variables remain fixed, unification may -instantiate unknowns. Unknowns are written with a ?\ prefix and are -frequently subscripted: $\Var{a}$, $\Var{a@1}$, $\Var{a@2}$, \ldots, -$\Var{P}$, $\Var{P@1}$, \ldots. - -Recall that an inference rule of the form -\[ \infer{\phi}{\phi@1 & \ldots & \phi@n} \] -is formalized in Isabelle's meta-logic as the axiom -$\List{\phi@1; \ldots; \phi@n} \Imp \phi$.\index{resolution} -Such axioms resemble Prolog's Horn clauses, and can be combined by -resolution --- Isabelle's principal proof method. Resolution yields both -forward and backward proof. Backward proof works by unifying a goal with -the conclusion of a rule, whose premises become new subgoals. Forward proof -works by unifying theorems with the premises of a rule, deriving a new theorem. - -Isabelle formulae require an extended notion of resolution. -They differ from Horn clauses in two major respects: -\begin{itemize} - \item They are written in the typed $\lambda$-calculus, and therefore must be -resolved using higher-order unification. - -\item The constituents of a clause need not be atomic formulae. Any - formula of the form $Trueprop(\cdots)$ is atomic, but axioms such as - ${\imp}I$ and $\forall I$ contain non-atomic formulae. -\end{itemize} -Isabelle has little in common with classical resolution theorem provers -such as Otter~\cite{wos-bledsoe}. At the meta-level, Isabelle proves -theorems in their positive form, not by refutation. However, an -object-logic that includes a contradiction rule may employ a refutation -proof procedure. - - -\subsection{Higher-order unification} -\index{unification!higher-order|bold} -Unification is equation solving. The solution of $f(\Var{x},c) \qeq -f(d,\Var{y})$ is $\Var{x}\equiv d$ and $\Var{y}\equiv c$. {\bf -Higher-order unification} is equation solving for typed $\lambda$-terms. -To handle $\beta$-conversion, it must reduce $(\lambda x.t)u$ to $t[u/x]$. -That is easy --- in the typed $\lambda$-calculus, all reduction sequences -terminate at a normal form. But it must guess the unknown -function~$\Var{f}$ in order to solve the equation -\begin{equation} \label{hou-eqn} - \Var{f}(t) \qeq g(u@1,\ldots,u@k). -\end{equation} -Huet's~\cite{huet75} search procedure solves equations by imitation and -projection. {\bf Imitation} makes~$\Var{f}$ apply the leading symbol (if a -constant) of the right-hand side. To solve equation~(\ref{hou-eqn}), it -guesses -\[ \Var{f} \equiv \lambda x. g(\Var{h@1}(x),\ldots,\Var{h@k}(x)), \] -where $\Var{h@1}$, \ldots, $\Var{h@k}$ are new unknowns. Assuming there are no -other occurrences of~$\Var{f}$, equation~(\ref{hou-eqn}) simplifies to the -set of equations -\[ \Var{h@1}(t)\qeq u@1 \quad\ldots\quad \Var{h@k}(t)\qeq u@k. \] -If the procedure solves these equations, instantiating $\Var{h@1}$, \ldots, -$\Var{h@k}$, then it yields an instantiation for~$\Var{f}$. - -{\bf Projection} makes $\Var{f}$ apply one of its arguments. To solve -equation~(\ref{hou-eqn}), if $t$ expects~$m$ arguments and delivers a -result of suitable type, it guesses -\[ \Var{f} \equiv \lambda x. x(\Var{h@1}(x),\ldots,\Var{h@m}(x)), \] -where $\Var{h@1}$, \ldots, $\Var{h@m}$ are new unknowns. Assuming there are no -other occurrences of~$\Var{f}$, equation~(\ref{hou-eqn}) simplifies to the -equation -\[ t(\Var{h@1}(t),\ldots,\Var{h@m}(t)) \qeq g(u@1,\ldots,u@k). \] - -\begin{warn}\index{unification!incompleteness of}% -Huet's unification procedure is complete. Isabelle's polymorphic version, -which solves for type unknowns as well as for term unknowns, is incomplete. -The problem is that projection requires type information. In -equation~(\ref{hou-eqn}), if the type of~$t$ is unknown, then projections -are possible for all~$m\geq0$, and the types of the $\Var{h@i}$ will be -similarly unconstrained. Therefore, Isabelle never attempts such -projections, and may fail to find unifiers where a type unknown turns out -to be a function type. -\end{warn} - -\index{unknowns!function|bold} -Given $\Var{f}(t@1,\ldots,t@n)\qeq u$, Huet's procedure could make up to -$n+1$ guesses. The search tree and set of unifiers may be infinite. But -higher-order unification can work effectively, provided you are careful -with {\bf function unknowns}: -\begin{itemize} - \item Equations with no function unknowns are solved using first-order -unification, extended to treat bound variables. For example, $\lambda x.x -\qeq \lambda x.\Var{y}$ has no solution because $\Var{y}\equiv x$ would -capture the free variable~$x$. - - \item An occurrence of the term $\Var{f}(x,y,z)$, where the arguments are -distinct bound variables, causes no difficulties. Its projections can only -match the corresponding variables. - - \item Even an equation such as $\Var{f}(a)\qeq a+a$ is all right. It has -four solutions, but Isabelle evaluates them lazily, trying projection before -imitation. The first solution is usually the one desired: -\[ \Var{f}\equiv \lambda x. x+x \quad - \Var{f}\equiv \lambda x. a+x \quad - \Var{f}\equiv \lambda x. x+a \quad - \Var{f}\equiv \lambda x. a+a \] - \item Equations such as $\Var{f}(\Var{x},\Var{y})\qeq t$ and -$\Var{f}(\Var{g}(x))\qeq t$ admit vast numbers of unifiers, and must be -avoided. -\end{itemize} -In problematic cases, you may have to instantiate some unknowns before -invoking unification. - - -\subsection{Joining rules by resolution} \label{joining} -\index{resolution|bold} -Let $\List{\psi@1; \ldots; \psi@m} \Imp \psi$ and $\List{\phi@1; \ldots; -\phi@n} \Imp \phi$ be two Isabelle theorems, representing object-level rules. -Choosing some~$i$ from~1 to~$n$, suppose that $\psi$ and $\phi@i$ have a -higher-order unifier. Writing $Xs$ for the application of substitution~$s$ to -expression~$X$, this means there is some~$s$ such that $\psi s\equiv \phi@i s$. -By resolution, we may conclude -\[ (\List{\phi@1; \ldots; \phi@{i-1}; \psi@1; \ldots; \psi@m; - \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s. -\] -The substitution~$s$ may instantiate unknowns in both rules. In short, -resolution is the following rule: -\[ \infer[(\psi s\equiv \phi@i s)] - {(\List{\phi@1; \ldots; \phi@{i-1}; \psi@1; \ldots; \psi@m; - \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s} - {\List{\psi@1; \ldots; \psi@m} \Imp \psi & & - \List{\phi@1; \ldots; \phi@n} \Imp \phi} -\] -It operates at the meta-level, on Isabelle theorems, and is justified by -the properties of $\Imp$ and~$\Forall$. It takes the number~$i$ (for -$1\leq i\leq n$) as a parameter and may yield infinitely many conclusions, -one for each unifier of $\psi$ with $\phi@i$. Isabelle returns these -conclusions as a sequence (lazy list). - -Resolution expects the rules to have no outer quantifiers~($\Forall$). -It may rename or instantiate any schematic variables, but leaves free -variables unchanged. When constructing a theory, Isabelle puts the -rules into a standard form with all free variables converted into -schematic ones; for instance, $({\imp}E)$ becomes -\[ \List{\Var{P}\imp \Var{Q}; \Var{P}} \Imp \Var{Q}. -\] -When resolving two rules, the unknowns in the first rule are renamed, by -subscripting, to make them distinct from the unknowns in the second rule. To -resolve $({\imp}E)$ with itself, the first copy of the rule becomes -\[ \List{\Var{P@1}\imp \Var{Q@1}; \Var{P@1}} \Imp \Var{Q@1}. \] -Resolving this with $({\imp}E)$ in the first premise, unifying $\Var{Q@1}$ with -$\Var{P}\imp \Var{Q}$, is the meta-level inference -\[ \infer{\List{\Var{P@1}\imp (\Var{P}\imp \Var{Q}); \Var{P@1}; \Var{P}} - \Imp\Var{Q}.} - {\List{\Var{P@1}\imp \Var{Q@1}; \Var{P@1}} \Imp \Var{Q@1} & & - \List{\Var{P}\imp \Var{Q}; \Var{P}} \Imp \Var{Q}} -\] -Renaming the unknowns in the resolvent, we have derived the -object-level rule\index{rules!derived} -\[ \infer{Q.}{R\imp (P\imp Q) & R & P} \] -Joining rules in this fashion is a simple way of proving theorems. The -derived rules are conservative extensions of the object-logic, and may permit -simpler proofs. Let us consider another example. Suppose we have the axiom -$$ \forall x\,y. Suc(x)=Suc(y)\imp x=y. \eqno (inject) $$ - -\noindent -The standard form of $(\forall E)$ is -$\forall x.\Var{P}(x) \Imp \Var{P}(\Var{t})$. -Resolving $(inject)$ with $(\forall E)$ replaces $\Var{P}$ by -$\lambda x. \forall y. Suc(x)=Suc(y)\imp x=y$ and leaves $\Var{t}$ -unchanged, yielding -\[ \forall y. Suc(\Var{t})=Suc(y)\imp \Var{t}=y. \] -Resolving this with $(\forall E)$ puts a subscript on~$\Var{t}$ -and yields -\[ Suc(\Var{t@1})=Suc(\Var{t})\imp \Var{t@1}=\Var{t}. \] -Resolving this with $({\imp}E)$ increases the subscripts and yields -\[ Suc(\Var{t@2})=Suc(\Var{t@1})\Imp \Var{t@2}=\Var{t@1}. -\] -We have derived the rule -\[ \infer{m=n,}{Suc(m)=Suc(n)} \] -which goes directly from $Suc(m)=Suc(n)$ to $m=n$. It is handy for simplifying -an equation like $Suc(Suc(Suc(m)))=Suc(Suc(Suc(0)))$. - - -\section{Lifting a rule into a context} -The rules $({\imp}I)$ and $(\forall I)$ may seem unsuitable for -resolution. They have non-atomic premises, namely $P\Imp Q$ and $\Forall -x.P(x)$, while the conclusions of all the rules are atomic (they have the form -$Trueprop(\cdots)$). Isabelle gets round the problem through a meta-inference -called \bfindex{lifting}. Let us consider how to construct proofs such as -\[ \infer[({\imp}I)]{P\imp(Q\imp R)} - {\infer[({\imp}I)]{Q\imp R} - {\infer*{R}{[P,Q]}}} - \qquad - \infer[(\forall I)]{\forall x\,y.P(x,y)} - {\infer[(\forall I)]{\forall y.P(x,y)}{P(x,y)}} -\] - -\subsection{Lifting over assumptions} -\index{assumptions!lifting over} -Lifting over $\theta\Imp{}$ is the following meta-inference rule: -\[ \infer{\List{\theta\Imp\phi@1; \ldots; \theta\Imp\phi@n} \Imp - (\theta \Imp \phi)} - {\List{\phi@1; \ldots; \phi@n} \Imp \phi} \] -This is clearly sound: if $\List{\phi@1; \ldots; \phi@n} \Imp \phi$ is true and -$\theta\Imp\phi@1$, \ldots, $\theta\Imp\phi@n$ and $\theta$ are all true then -$\phi$ must be true. Iterated lifting over a series of meta-formulae -$\theta@k$, \ldots, $\theta@1$ yields an object-rule whose conclusion is -$\List{\theta@1; \ldots; \theta@k} \Imp \phi$. Typically the $\theta@i$ are -the assumptions in a natural deduction proof; lifting copies them into a rule's -premises and conclusion. - -When resolving two rules, Isabelle lifts the first one if necessary. The -standard form of $({\imp}I)$ is -\[ (\Var{P} \Imp \Var{Q}) \Imp \Var{P}\imp \Var{Q}. \] -To resolve this rule with itself, Isabelle modifies one copy as follows: it -renames the unknowns to $\Var{P@1}$ and $\Var{Q@1}$, then lifts the rule over -$\Var{P}\Imp{}$ to obtain -\[ (\Var{P}\Imp (\Var{P@1} \Imp \Var{Q@1})) \Imp (\Var{P} \Imp - (\Var{P@1}\imp \Var{Q@1})). \] -Using the $\List{\cdots}$ abbreviation, this can be written as -\[ \List{\List{\Var{P}; \Var{P@1}} \Imp \Var{Q@1}; \Var{P}} - \Imp \Var{P@1}\imp \Var{Q@1}. \] -Unifying $\Var{P}\Imp \Var{P@1}\imp\Var{Q@1}$ with $\Var{P} \Imp -\Var{Q}$ instantiates $\Var{Q}$ to ${\Var{P@1}\imp\Var{Q@1}}$. -Resolution yields -\[ (\List{\Var{P}; \Var{P@1}} \Imp \Var{Q@1}) \Imp -\Var{P}\imp(\Var{P@1}\imp\Var{Q@1}). \] -This represents the derived rule -\[ \infer{P\imp(Q\imp R).}{\infer*{R}{[P,Q]}} \] - -\subsection{Lifting over parameters} -\index{parameters!lifting over} -An analogous form of lifting handles premises of the form $\Forall x\ldots\,$. -Here, lifting prefixes an object-rule's premises and conclusion with $\Forall -x$. At the same time, lifting introduces a dependence upon~$x$. It replaces -each unknown $\Var{a}$ in the rule by $\Var{a'}(x)$, where $\Var{a'}$ is a new -unknown (by subscripting) of suitable type --- necessarily a function type. In -short, lifting is the meta-inference -\[ \infer{\List{\Forall x.\phi@1^x; \ldots; \Forall x.\phi@n^x} - \Imp \Forall x.\phi^x,} - {\List{\phi@1; \ldots; \phi@n} \Imp \phi} \] -% -where $\phi^x$ stands for the result of lifting unknowns over~$x$ in -$\phi$. It is not hard to verify that this meta-inference is sound. If -$\phi\Imp\psi$ then $\phi^x\Imp\psi^x$ for all~$x$; so if $\phi^x$ is true -for all~$x$ then so is $\psi^x$. Thus, from $\phi\Imp\psi$ we conclude -$(\Forall x.\phi^x) \Imp (\Forall x.\psi^x)$. - -For example, $(\disj I)$ might be lifted to -\[ (\Forall x.\Var{P@1}(x)) \Imp (\Forall x. \Var{P@1}(x)\disj \Var{Q@1}(x))\] -and $(\forall I)$ to -\[ (\Forall x\,y.\Var{P@1}(x,y)) \Imp (\Forall x. \forall y.\Var{P@1}(x,y)). \] -Isabelle has renamed a bound variable in $(\forall I)$ from $x$ to~$y$, -avoiding a clash. Resolving the above with $(\forall I)$ is the meta-inference -\[ \infer{\Forall x\,y.\Var{P@1}(x,y)) \Imp \forall x\,y.\Var{P@1}(x,y)) } - {(\Forall x\,y.\Var{P@1}(x,y)) \Imp - (\Forall x. \forall y.\Var{P@1}(x,y)) & - (\Forall x.\Var{P}(x)) \Imp (\forall x.\Var{P}(x))} \] -Here, $\Var{P}$ is replaced by $\lambda x.\forall y.\Var{P@1}(x,y)$; the -resolvent expresses the derived rule -\[ \vcenter{ \infer{\forall x\,y.Q(x,y)}{Q(x,y)} } - \quad\hbox{provided $x$, $y$ not free in the assumptions} -\] -I discuss lifting and parameters at length elsewhere~\cite{paulson-found}. -Miller goes into even greater detail~\cite{miller-mixed}. - - -\section{Backward proof by resolution} -\index{resolution!in backward proof} - -Resolution is convenient for deriving simple rules and for reasoning -forward from facts. It can also support backward proof, where we start -with a goal and refine it to progressively simpler subgoals until all have -been solved. {\sc lcf} and its descendants {\sc hol} and Nuprl provide -tactics and tacticals, which constitute a sophisticated language for -expressing proof searches. {\bf Tactics} refine subgoals while {\bf - tacticals} combine tactics. - -\index{LCF system} -Isabelle's tactics and tacticals work differently from {\sc lcf}'s. An -Isabelle rule is bidirectional: there is no distinction between -inputs and outputs. {\sc lcf} has a separate tactic for each rule; -Isabelle performs refinement by any rule in a uniform fashion, using -resolution. - -Isabelle works with meta-level theorems of the form -\( \List{\phi@1; \ldots; \phi@n} \Imp \phi \). -We have viewed this as the {\bf rule} with premises -$\phi@1$,~\ldots,~$\phi@n$ and conclusion~$\phi$. It can also be viewed as -the {\bf proof state}\index{proof state} -with subgoals $\phi@1$,~\ldots,~$\phi@n$ and main -goal~$\phi$. - -To prove the formula~$\phi$, take $\phi\Imp \phi$ as the initial proof -state. This assertion is, trivially, a theorem. At a later stage in the -backward proof, a typical proof state is $\List{\phi@1; \ldots; \phi@n} -\Imp \phi$. This proof state is a theorem, ensuring that the subgoals -$\phi@1$,~\ldots,~$\phi@n$ imply~$\phi$. If $n=0$ then we have -proved~$\phi$ outright. If $\phi$ contains unknowns, they may become -instantiated during the proof; a proof state may be $\List{\phi@1; \ldots; -\phi@n} \Imp \phi'$, where $\phi'$ is an instance of~$\phi$. - -\subsection{Refinement by resolution} -To refine subgoal~$i$ of a proof state by a rule, perform the following -resolution: -\[ \infer{\hbox{new proof state}}{\hbox{rule} & & \hbox{proof state}} \] -Suppose the rule is $\List{\psi'@1; \ldots; \psi'@m} \Imp \psi'$ after -lifting over subgoal~$i$'s assumptions and parameters. If the proof state -is $\List{\phi@1; \ldots; \phi@n} \Imp \phi$, then the new proof state is -(for~$1\leq i\leq n$) -\[ (\List{\phi@1; \ldots; \phi@{i-1}; \psi'@1; - \ldots; \psi'@m; \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s. \] -Substitution~$s$ unifies $\psi'$ with~$\phi@i$. In the proof state, -subgoal~$i$ is replaced by $m$ new subgoals, the rule's instantiated premises. -If some of the rule's unknowns are left un-instantiated, they become new -unknowns in the proof state. Refinement by~$(\exists I)$, namely -\[ \Var{P}(\Var{t}) \Imp \exists x. \Var{P}(x), \] -inserts a new unknown derived from~$\Var{t}$ by subscripting and lifting. -We do not have to specify an `existential witness' when -applying~$(\exists I)$. Further resolutions may instantiate unknowns in -the proof state. - -\subsection{Proof by assumption} -\index{assumptions!use of} -In the course of a natural deduction proof, parameters $x@1$, \ldots,~$x@l$ and -assumptions $\theta@1$, \ldots, $\theta@k$ accumulate, forming a context for -each subgoal. Repeated lifting steps can lift a rule into any context. To -aid readability, Isabelle puts contexts into a normal form, gathering the -parameters at the front: -\begin{equation} \label{context-eqn} -\Forall x@1 \ldots x@l. \List{\theta@1; \ldots; \theta@k}\Imp\theta. -\end{equation} -Under the usual reading of the connectives, this expresses that $\theta$ -follows from $\theta@1$,~\ldots~$\theta@k$ for arbitrary -$x@1$,~\ldots,~$x@l$. It is trivially true if $\theta$ equals any of -$\theta@1$,~\ldots~$\theta@k$, or is unifiable with any of them. This -models proof by assumption in natural deduction. - -Isabelle automates the meta-inference for proof by assumption. Its arguments -are the meta-theorem $\List{\phi@1; \ldots; \phi@n} \Imp \phi$, and some~$i$ -from~1 to~$n$, where $\phi@i$ has the form~(\ref{context-eqn}). Its results -are meta-theorems of the form -\[ (\List{\phi@1; \ldots; \phi@{i-1}; \phi@{i+1}; \phi@n} \Imp \phi)s \] -for each $s$ and~$j$ such that $s$ unifies $\lambda x@1 \ldots x@l. \theta@j$ -with $\lambda x@1 \ldots x@l. \theta$. Isabelle supplies the parameters -$x@1$,~\ldots,~$x@l$ to higher-order unification as bound variables, which -regards them as unique constants with a limited scope --- this enforces -parameter provisos~\cite{paulson-found}. - -The premise represents a proof state with~$n$ subgoals, of which the~$i$th -is to be solved by assumption. Isabelle searches the subgoal's context for -an assumption~$\theta@j$ that can solve it. For each unifier, the -meta-inference returns an instantiated proof state from which the $i$th -subgoal has been removed. Isabelle searches for a unifying assumption; for -readability and robustness, proofs do not refer to assumptions by number. - -Consider the proof state -\[ (\List{P(a); P(b)} \Imp P(\Var{x})) \Imp Q(\Var{x}). \] -Proof by assumption (with $i=1$, the only possibility) yields two results: -\begin{itemize} - \item $Q(a)$, instantiating $\Var{x}\equiv a$ - \item $Q(b)$, instantiating $\Var{x}\equiv b$ -\end{itemize} -Here, proof by assumption affects the main goal. It could also affect -other subgoals; if we also had the subgoal ${\List{P(b); P(c)} \Imp - P(\Var{x})}$, then $\Var{x}\equiv a$ would transform it to ${\List{P(b); - P(c)} \Imp P(a)}$, which might be unprovable. - - -\subsection{A propositional proof} \label{prop-proof} -\index{examples!propositional} -Our first example avoids quantifiers. Given the main goal $P\disj P\imp -P$, Isabelle creates the initial state -\[ (P\disj P\imp P)\Imp (P\disj P\imp P). \] -% -Bear in mind that every proof state we derive will be a meta-theorem, -expressing that the subgoals imply the main goal. Our aim is to reach the -state $P\disj P\imp P$; this meta-theorem is the desired result. - -The first step is to refine subgoal~1 by (${\imp}I)$, creating a new state -where $P\disj P$ is an assumption: -\[ (P\disj P\Imp P)\Imp (P\disj P\imp P) \] -The next step is $(\disj E)$, which replaces subgoal~1 by three new subgoals. -Because of lifting, each subgoal contains a copy of the context --- the -assumption $P\disj P$. (In fact, this assumption is now redundant; we shall -shortly see how to get rid of it!) The new proof state is the following -meta-theorem, laid out for clarity: -\[ \begin{array}{l@{}l@{\qquad\qquad}l} - \lbrakk\;& P\disj P\Imp \Var{P@1}\disj\Var{Q@1}; & \hbox{(subgoal 1)} \\ - & \List{P\disj P; \Var{P@1}} \Imp P; & \hbox{(subgoal 2)} \\ - & \List{P\disj P; \Var{Q@1}} \Imp P & \hbox{(subgoal 3)} \\ - \rbrakk\;& \Imp (P\disj P\imp P) & \hbox{(main goal)} - \end{array} -\] -Notice the unknowns in the proof state. Because we have applied $(\disj E)$, -we must prove some disjunction, $\Var{P@1}\disj\Var{Q@1}$. Of course, -subgoal~1 is provable by assumption. This instantiates both $\Var{P@1}$ and -$\Var{Q@1}$ to~$P$ throughout the proof state: -\[ \begin{array}{l@{}l@{\qquad\qquad}l} - \lbrakk\;& \List{P\disj P; P} \Imp P; & \hbox{(subgoal 1)} \\ - & \List{P\disj P; P} \Imp P & \hbox{(subgoal 2)} \\ - \rbrakk\;& \Imp (P\disj P\imp P) & \hbox{(main goal)} - \end{array} \] -Both of the remaining subgoals can be proved by assumption. After two such -steps, the proof state is $P\disj P\imp P$. - - -\subsection{A quantifier proof} -\index{examples!with quantifiers} -To illustrate quantifiers and $\Forall$-lifting, let us prove -$(\exists x.P(f(x)))\imp(\exists x.P(x))$. The initial proof -state is the trivial meta-theorem -\[ (\exists x.P(f(x)))\imp(\exists x.P(x)) \Imp - (\exists x.P(f(x)))\imp(\exists x.P(x)). \] -As above, the first step is refinement by (${\imp}I)$: -\[ (\exists x.P(f(x))\Imp \exists x.P(x)) \Imp - (\exists x.P(f(x)))\imp(\exists x.P(x)) -\] -The next step is $(\exists E)$, which replaces subgoal~1 by two new subgoals. -Both have the assumption $\exists x.P(f(x))$. The new proof -state is the meta-theorem -\[ \begin{array}{l@{}l@{\qquad\qquad}l} - \lbrakk\;& \exists x.P(f(x)) \Imp \exists x.\Var{P@1}(x); & \hbox{(subgoal 1)} \\ - & \Forall x.\List{\exists x.P(f(x)); \Var{P@1}(x)} \Imp - \exists x.P(x) & \hbox{(subgoal 2)} \\ - \rbrakk\;& \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) & \hbox{(main goal)} - \end{array} -\] -The unknown $\Var{P@1}$ appears in both subgoals. Because we have applied -$(\exists E)$, we must prove $\exists x.\Var{P@1}(x)$, where $\Var{P@1}(x)$ may -become any formula possibly containing~$x$. Proving subgoal~1 by assumption -instantiates $\Var{P@1}$ to~$\lambda x.P(f(x))$: -\[ \left(\Forall x.\List{\exists x.P(f(x)); P(f(x))} \Imp - \exists x.P(x)\right) - \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) -\] -The next step is refinement by $(\exists I)$. The rule is lifted into the -context of the parameter~$x$ and the assumption $P(f(x))$. This copies -the context to the subgoal and allows the existential witness to -depend upon~$x$: -\[ \left(\Forall x.\List{\exists x.P(f(x)); P(f(x))} \Imp - P(\Var{x@2}(x))\right) - \Imp (\exists x.P(f(x)))\imp(\exists x.P(x)) -\] -The existential witness, $\Var{x@2}(x)$, consists of an unknown -applied to a parameter. Proof by assumption unifies $\lambda x.P(f(x))$ -with $\lambda x.P(\Var{x@2}(x))$, instantiating $\Var{x@2}$ to $f$. The final -proof state contains no subgoals: $(\exists x.P(f(x)))\imp(\exists x.P(x))$. - - -\subsection{Tactics and tacticals} -\index{tactics|bold}\index{tacticals|bold} -{\bf Tactics} perform backward proof. Isabelle tactics differ from those -of {\sc lcf}, {\sc hol} and Nuprl by operating on entire proof states, -rather than on individual subgoals. An Isabelle tactic is a function that -takes a proof state and returns a sequence (lazy list) of possible -successor states. Lazy lists are coded in ML as functions, a standard -technique~\cite{paulson-ml2}. Isabelle represents proof states by theorems. - -Basic tactics execute the meta-rules described above, operating on a -given subgoal. The {\bf resolution tactics} take a list of rules and -return next states for each combination of rule and unifier. The {\bf -assumption tactic} examines the subgoal's assumptions and returns next -states for each combination of assumption and unifier. Lazy lists are -essential because higher-order resolution may return infinitely many -unifiers. If there are no matching rules or assumptions then no next -states are generated; a tactic application that returns an empty list is -said to {\bf fail}. - -Sequences realize their full potential with {\bf tacticals} --- operators -for combining tactics. Depth-first search, breadth-first search and -best-first search (where a heuristic function selects the best state to -explore) return their outcomes as a sequence. Isabelle provides such -procedures in the form of tacticals. Simpler procedures can be expressed -directly using the basic tacticals {\tt THEN}, {\tt ORELSE} and {\tt REPEAT}: -\begin{ttdescription} -\item[$tac1$ THEN $tac2$] is a tactic for sequential composition. Applied -to a proof state, it returns all states reachable in two steps by applying -$tac1$ followed by~$tac2$. - -\item[$tac1$ ORELSE $tac2$] is a choice tactic. Applied to a state, it -tries~$tac1$ and returns the result if non-empty; otherwise, it uses~$tac2$. - -\item[REPEAT $tac$] is a repetition tactic. Applied to a state, it -returns all states reachable by applying~$tac$ as long as possible --- until -it would fail. -\end{ttdescription} -For instance, this tactic repeatedly applies $tac1$ and~$tac2$, giving -$tac1$ priority: -\begin{center} \tt -REPEAT($tac1$ ORELSE $tac2$) -\end{center} - - -\section{Variations on resolution} -In principle, resolution and proof by assumption suffice to prove all -theorems. However, specialized forms of resolution are helpful for working -with elimination rules. Elim-resolution applies an elimination rule to an -assumption; destruct-resolution is similar, but applies a rule in a forward -style. - -The last part of the section shows how the techniques for proving theorems -can also serve to derive rules. - -\subsection{Elim-resolution} -\index{elim-resolution|bold}\index{assumptions!deleting} - -Consider proving the theorem $((R\disj R)\disj R)\disj R\imp R$. By -$({\imp}I)$, we prove~$R$ from the assumption $((R\disj R)\disj R)\disj R$. -Applying $(\disj E)$ to this assumption yields two subgoals, one that -assumes~$R$ (and is therefore trivial) and one that assumes $(R\disj -R)\disj R$. This subgoal admits another application of $(\disj E)$. Since -natural deduction never discards assumptions, we eventually generate a -subgoal containing much that is redundant: -\[ \List{((R\disj R)\disj R)\disj R; (R\disj R)\disj R; R\disj R; R} \Imp R. \] -In general, using $(\disj E)$ on the assumption $P\disj Q$ creates two new -subgoals with the additional assumption $P$ or~$Q$. In these subgoals, -$P\disj Q$ is redundant. Other elimination rules behave -similarly. In first-order logic, only universally quantified -assumptions are sometimes needed more than once --- say, to prove -$P(f(f(a)))$ from the assumptions $\forall x.P(x)\imp P(f(x))$ and~$P(a)$. - -Many logics can be formulated as sequent calculi that delete redundant -assumptions after use. The rule $(\disj E)$ might become -\[ \infer[\disj\hbox{-left}] - {\Gamma,P\disj Q,\Delta \turn \Theta} - {\Gamma,P,\Delta \turn \Theta && \Gamma,Q,\Delta \turn \Theta} \] -In backward proof, a goal containing $P\disj Q$ on the left of the~$\turn$ -(that is, as an assumption) splits into two subgoals, replacing $P\disj Q$ -by $P$ or~$Q$. But the sequent calculus, with its explicit handling of -assumptions, can be tiresome to use. - -Elim-resolution is Isabelle's way of getting sequent calculus behaviour -from natural deduction rules. It lets an elimination rule consume an -assumption. Elim-resolution combines two meta-theorems: -\begin{itemize} - \item a rule $\List{\psi@1; \ldots; \psi@m} \Imp \psi$ - \item a proof state $\List{\phi@1; \ldots; \phi@n} \Imp \phi$ -\end{itemize} -The rule must have at least one premise, thus $m>0$. Write the rule's -lifted form as $\List{\psi'@1; \ldots; \psi'@m} \Imp \psi'$. Suppose we -wish to change subgoal number~$i$. - -Ordinary resolution would attempt to reduce~$\phi@i$, -replacing subgoal~$i$ by $m$ new ones. Elim-resolution tries -simultaneously to reduce~$\phi@i$ and to solve~$\psi'@1$ by assumption; it -returns a sequence of next states. Each of these replaces subgoal~$i$ by -instances of $\psi'@2$, \ldots, $\psi'@m$ from which the selected -assumption has been deleted. Suppose $\phi@i$ has the parameter~$x$ and -assumptions $\theta@1$,~\ldots,~$\theta@k$. Then $\psi'@1$, the rule's first -premise after lifting, will be -\( \Forall x. \List{\theta@1; \ldots; \theta@k}\Imp \psi^{x}@1 \). -Elim-resolution tries to unify $\psi'\qeq\phi@i$ and -$\lambda x. \theta@j \qeq \lambda x. \psi^{x}@1$ simultaneously, for -$j=1$,~\ldots,~$k$. - -Let us redo the example from~\S\ref{prop-proof}. The elimination rule -is~$(\disj E)$, -\[ \List{\Var{P}\disj \Var{Q};\; \Var{P}\Imp \Var{R};\; \Var{Q}\Imp \Var{R}} - \Imp \Var{R} \] -and the proof state is $(P\disj P\Imp P)\Imp (P\disj P\imp P)$. The -lifted rule is -\[ \begin{array}{l@{}l} - \lbrakk\;& P\disj P \Imp \Var{P@1}\disj\Var{Q@1}; \\ - & \List{P\disj P ;\; \Var{P@1}} \Imp \Var{R@1}; \\ - & \List{P\disj P ;\; \Var{Q@1}} \Imp \Var{R@1} \\ - \rbrakk\;& \Imp (P\disj P \Imp \Var{R@1}) - \end{array} -\] -Unification takes the simultaneous equations -$P\disj P \qeq \Var{P@1}\disj\Var{Q@1}$ and $\Var{R@1} \qeq P$, yielding -$\Var{P@1}\equiv\Var{Q@1}\equiv\Var{R@1} \equiv P$. The new proof state -is simply -\[ \List{P \Imp P;\; P \Imp P} \Imp (P\disj P\imp P). -\] -Elim-resolution's simultaneous unification gives better control -than ordinary resolution. Recall the substitution rule: -$$ \List{\Var{t}=\Var{u}; \Var{P}(\Var{t})} \Imp \Var{P}(\Var{u}) -\eqno(subst) $$ -Unsuitable for ordinary resolution because $\Var{P}(\Var{u})$ admits many -unifiers, $(subst)$ works well with elim-resolution. It deletes some -assumption of the form $x=y$ and replaces every~$y$ by~$x$ in the subgoal -formula. The simultaneous unification instantiates $\Var{u}$ to~$y$; if -$y$ is not an unknown, then $\Var{P}(y)$ can easily be unified with another -formula. - -In logical parlance, the premise containing the connective to be eliminated -is called the \bfindex{major premise}. Elim-resolution expects the major -premise to come first. The order of the premises is significant in -Isabelle. - -\subsection{Destruction rules} \label{destruct} -\index{rules!destruction}\index{rules!elimination} -\index{forward proof} - -Looking back to Fig.\ts\ref{fol-fig}, notice that there are two kinds of -elimination rule. The rules $({\conj}E1)$, $({\conj}E2)$, $({\imp}E)$ and -$({\forall}E)$ extract the conclusion from the major premise. In Isabelle -parlance, such rules are called {\bf destruction rules}; they are readable -and easy to use in forward proof. The rules $({\disj}E)$, $({\bot}E)$ and -$({\exists}E)$ work by discharging assumptions; they support backward proof -in a style reminiscent of the sequent calculus. - -The latter style is the most general form of elimination rule. In natural -deduction, there is no way to recast $({\disj}E)$, $({\bot}E)$ or -$({\exists}E)$ as destruction rules. But we can write general elimination -rules for $\conj$, $\imp$ and~$\forall$: -\[ -\infer{R}{P\conj Q & \infer*{R}{[P,Q]}} \qquad -\infer{R}{P\imp Q & P & \infer*{R}{[Q]}} \qquad -\infer{Q}{\forall x.P & \infer*{Q}{[P[t/x]]}} -\] -Because they are concise, destruction rules are simpler to derive than the -corresponding elimination rules. To facilitate their use in backward -proof, Isabelle provides a means of transforming a destruction rule such as -\[ \infer[\quad\hbox{to the elimination rule}\quad]{Q}{P@1 & \ldots & P@m} - \infer{R.}{P@1 & \ldots & P@m & \infer*{R}{[Q]}} -\] -{\bf Destruct-resolution}\index{destruct-resolution} combines this -transformation with elim-resolution. It applies a destruction rule to some -assumption of a subgoal. Given the rule above, it replaces the -assumption~$P@1$ by~$Q$, with new subgoals of showing instances of $P@2$, -\ldots,~$P@m$. Destruct-resolution works forward from a subgoal's -assumptions. Ordinary resolution performs forward reasoning from theorems, -as illustrated in~\S\ref{joining}. - - -\subsection{Deriving rules by resolution} \label{deriving} -\index{rules!derived|bold}\index{meta-assumptions!syntax of} -The meta-logic, itself a form of the predicate calculus, is defined by a -system of natural deduction rules. Each theorem may depend upon -meta-assumptions. The theorem that~$\phi$ follows from the assumptions -$\phi@1$, \ldots, $\phi@n$ is written -\[ \phi \quad [\phi@1,\ldots,\phi@n]. \] -A more conventional notation might be $\phi@1,\ldots,\phi@n \turn \phi$, -but Isabelle's notation is more readable with large formulae. - -Meta-level natural deduction provides a convenient mechanism for deriving -new object-level rules. To derive the rule -\[ \infer{\phi,}{\theta@1 & \ldots & \theta@k} \] -assume the premises $\theta@1$,~\ldots,~$\theta@k$ at the -meta-level. Then prove $\phi$, possibly using these assumptions. -Starting with a proof state $\phi\Imp\phi$, assumptions may accumulate, -reaching a final state such as -\[ \phi \quad [\theta@1,\ldots,\theta@k]. \] -The meta-rule for $\Imp$ introduction discharges an assumption. -Discharging them in the order $\theta@k,\ldots,\theta@1$ yields the -meta-theorem $\List{\theta@1; \ldots; \theta@k} \Imp \phi$, with no -assumptions. This represents the desired rule. -Let us derive the general $\conj$ elimination rule: -$$ \infer{R}{P\conj Q & \infer*{R}{[P,Q]}} \eqno(\conj E) $$ -We assume $P\conj Q$ and $\List{P;Q}\Imp R$, and commence backward proof in -the state $R\Imp R$. Resolving this with the second assumption yields the -state -\[ \phantom{\List{P\conj Q;\; P\conj Q}} - \llap{$\List{P;Q}$}\Imp R \quad [\,\List{P;Q}\Imp R\,]. \] -Resolving subgoals~1 and~2 with~$({\conj}E1)$ and~$({\conj}E2)$, -respectively, yields the state -\[ \List{P\conj \Var{Q@1};\; \Var{P@2}\conj Q}\Imp R - \quad [\,\List{P;Q}\Imp R\,]. -\] -The unknowns $\Var{Q@1}$ and~$\Var{P@2}$ arise from unconstrained -subformulae in the premises of~$({\conj}E1)$ and~$({\conj}E2)$. Resolving -both subgoals with the assumption $P\conj Q$ instantiates the unknowns to yield -\[ R \quad [\, \List{P;Q}\Imp R, P\conj Q \,]. \] -The proof may use the meta-assumptions in any order, and as often as -necessary; when finished, we discharge them in the correct order to -obtain the desired form: -\[ \List{P\conj Q;\; \List{P;Q}\Imp R} \Imp R \] -We have derived the rule using free variables, which prevents their -premature instantiation during the proof; we may now replace them by -schematic variables. - -\begin{warn} - Schematic variables are not allowed in meta-assumptions, for a variety of - reasons. Meta-assumptions remain fixed throughout a proof. -\end{warn} - diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/gate.thy --- a/doc-src/Intro/gate.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,5 +0,0 @@ -Gate = FOL + -consts nand,xor :: "[o,o] => o" -rules nand_def "nand(P,Q) == ~(P & Q)" - xor_def "xor(P,Q) == P & ~Q | ~P & Q" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/gate2.thy --- a/doc-src/Intro/gate2.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,8 +0,0 @@ -Gate2 = FOL + -consts "~&" :: "[o,o] => o" (infixl 35) - "#" :: "[o,o] => o" (infixl 30) - If :: "[o,o,o] => o" ("if _ then _ else _") -rules nand_def "P ~& Q == ~(P & Q)" - xor_def "P # Q == P & ~Q | ~P & Q" - If_def "if P then Q else R == P&Q | ~P&R" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/getting.tex --- a/doc-src/Intro/getting.tex Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,956 +0,0 @@ -\part{Using Isabelle from the ML Top-Level}\label{chap:getting} - -Most Isabelle users write proof scripts using the Isar language, as described in the \emph{Tutorial}, and debug them through the Proof General user interface~\cite{proofgeneral}. Isabelle's original user interface --- based on the \ML{} top-level --- is still available, however. -Proofs are conducted by -applying certain \ML{} functions, which update a stored proof state. -All syntax can be expressed using plain {\sc ascii} -characters, but Isabelle can support -alternative syntaxes, for example using mathematical symbols from a -special screen font. The meta-logic and main object-logics already -provide such fancy output as an option. - -Object-logics are built upon Pure Isabelle, which implements the -meta-logic and provides certain fundamental data structures: types, -terms, signatures, theorems and theories, tactics and tacticals. -These data structures have the corresponding \ML{} types \texttt{typ}, -\texttt{term}, \texttt{Sign.sg}, \texttt{thm}, \texttt{theory} and \texttt{tactic}; -tacticals have function types such as \texttt{tactic->tactic}. Isabelle -users can operate on these data structures by writing \ML{} programs. - - -\section{Forward proof}\label{sec:forward} \index{forward proof|(} -This section describes the concrete syntax for types, terms and theorems, -and demonstrates forward proof. The examples are set in first-order logic. -The command to start Isabelle running first-order logic is -\begin{ttbox} -isabelle FOL -\end{ttbox} -Note that just typing \texttt{isabelle} usually brings up higher-order logic -(HOL) by default. - - -\subsection{Lexical matters} -\index{identifiers}\index{reserved words} -An {\bf identifier} is a string of letters, digits, underscores~(\verb|_|) -and single quotes~({\tt'}), beginning with a letter. Single quotes are -regarded as primes; for instance \texttt{x'} is read as~$x'$. Identifiers are -separated by white space and special characters. {\bf Reserved words} are -identifiers that appear in Isabelle syntax definitions. - -An Isabelle theory can declare symbols composed of special characters, such -as {\tt=}, {\tt==}, {\tt=>} and {\tt==>}. (The latter three are part of -the syntax of the meta-logic.) Such symbols may be run together; thus if -\verb|}| and \verb|{| are used for set brackets then \verb|{{a},{a,b}}| is -valid notation for a set of sets --- but only if \verb|}}| and \verb|{{| -have not been declared as symbols! The parser resolves any ambiguity by -taking the longest possible symbol that has been declared. Thus the string -{\tt==>} is read as a single symbol. But \hbox{\tt= =>} is read as two -symbols. - -Identifiers that are not reserved words may serve as free variables or -constants. A {\bf type identifier} consists of an identifier prefixed by a -prime, for example {\tt'a} and \hbox{\tt'hello}. Type identifiers stand -for (free) type variables, which remain fixed during a proof. -\index{type identifiers} - -An {\bf unknown}\index{unknowns} (or type unknown) consists of a question -mark, an identifier (or type identifier), and a subscript. The subscript, -a non-negative integer, -allows the renaming of unknowns prior to unification.% -\footnote{The subscript may appear after the identifier, separated by a - dot; this prevents ambiguity when the identifier ends with a digit. Thus - {\tt?z6.0} has identifier {\tt"z6"} and subscript~0, while {\tt?a0.5} - has identifier {\tt"a0"} and subscript~5. If the identifier does not - end with a digit, then no dot appears and a subscript of~0 is omitted; - for example, {\tt?hello} has identifier {\tt"hello"} and subscript - zero, while {\tt?z6} has identifier {\tt"z"} and subscript~6. The same - conventions apply to type unknowns. The question mark is {\it not\/} - part of the identifier!} - - -\subsection{Syntax of types and terms} -\index{classes!built-in|bold}\index{syntax!of types and terms} - -Classes are denoted by identifiers; the built-in class \cldx{logic} -contains the `logical' types. Sorts are lists of classes enclosed in -braces~\} and \{; singleton sorts may be abbreviated by dropping the braces. - -\index{types!syntax of|bold}\index{sort constraints} Types are written -with a syntax like \ML's. The built-in type \tydx{prop} is the type -of propositions. Type variables can be constrained to particular -classes or sorts, for example \texttt{'a::term} and \texttt{?'b::\ttlbrace - ord, arith\ttrbrace}. -\[\dquotes -\index{*:: symbol}\index{*=> symbol} -\index{{}@{\tt\ttlbrace} symbol}\index{{}@{\tt\ttrbrace} symbol} -\index{*[ symbol}\index{*] symbol} -\begin{array}{ll} - \multicolumn{2}{c}{\hbox{ASCII Notation for Types}} \\ \hline - \alpha "::" C & \hbox{class constraint} \\ - \alpha "::" "\ttlbrace" C@1 "," \ldots "," C@n "\ttrbrace" & - \hbox{sort constraint} \\ - \sigma " => " \tau & \hbox{function type } \sigma\To\tau \\ - "[" \sigma@1 "," \ldots "," \sigma@n "] => " \tau - & \hbox{$n$-argument function type} \\ - "(" \tau@1"," \ldots "," \tau@n ")" tycon & \hbox{type construction} -\end{array} -\] -Terms are those of the typed $\lambda$-calculus. -\index{terms!syntax of|bold}\index{type constraints} -\[\dquotes -\index{%@{\tt\%} symbol}\index{lambda abs@$\lambda$-abstractions} -\index{*:: symbol} -\begin{array}{ll} - \multicolumn{2}{c}{\hbox{ASCII Notation for Terms}} \\ \hline - t "::" \sigma & \hbox{type constraint} \\ - "\%" x "." t & \hbox{abstraction } \lambda x.t \\ - "\%" x@1\ldots x@n "." t & \hbox{abstraction over several arguments} \\ - t "(" u@1"," \ldots "," u@n ")" & - \hbox{application to several arguments (FOL and ZF)} \\ - t\; u@1 \ldots\; u@n & \hbox{application to several arguments (HOL)} -\end{array} -\] -Note that HOL uses its traditional ``higher-order'' syntax for application, -which differs from that used in FOL. - -The theorems and rules of an object-logic are represented by theorems in -the meta-logic, which are expressed using meta-formulae. Since the -meta-logic is higher-order, meta-formulae~$\phi$, $\psi$, $\theta$,~\ldots{} -are just terms of type~\texttt{prop}. -\index{meta-implication} -\index{meta-quantifiers}\index{meta-equality} -\index{*"!"! symbol} - -\index{["!@{\tt[\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working -\index{"!]@{\tt\char124]} symbol} % so these are [| and |] - -\index{*== symbol}\index{*=?= symbol}\index{*==> symbol} -\[\dquotes - \begin{array}{l@{\quad}l@{\quad}l} - \multicolumn{3}{c}{\hbox{ASCII Notation for Meta-Formulae}} \\ \hline - a " == " b & a\equiv b & \hbox{meta-equality} \\ - a " =?= " b & a\qeq b & \hbox{flex-flex constraint} \\ - \phi " ==> " \psi & \phi\Imp \psi & \hbox{meta-implication} \\ - "[|" \phi@1 ";" \ldots ";" \phi@n "|] ==> " \psi & - \List{\phi@1;\ldots;\phi@n} \Imp \psi & \hbox{nested implication} \\ - "!!" x "." \phi & \Forall x.\phi & \hbox{meta-quantification} \\ - "!!" x@1\ldots x@n "." \phi & - \Forall x@1. \ldots x@n.\phi & \hbox{nested quantification} - \end{array} -\] -Flex-flex constraints are meta-equalities arising from unification; they -require special treatment. See~\S\ref{flexflex}. -\index{flex-flex constraints} - -\index{*Trueprop constant} -Most logics define the implicit coercion $Trueprop$ from object-formulae to -propositions. This could cause an ambiguity: in $P\Imp Q$, do the -variables $P$ and $Q$ stand for meta-formulae or object-formulae? If the -latter, $P\Imp Q$ really abbreviates $Trueprop(P)\Imp Trueprop(Q)$. To -prevent such ambiguities, Isabelle's syntax does not allow a meta-formula -to consist of a variable. Variables of type~\tydx{prop} are seldom -useful, but you can make a variable stand for a meta-formula by prefixing -it with the symbol \texttt{PROP}:\index{*PROP symbol} -\begin{ttbox} -PROP ?psi ==> PROP ?theta -\end{ttbox} - -Symbols of object-logics are typically rendered into {\sc ascii} as -follows: -\[ \begin{tabular}{l@{\quad}l@{\quad}l} - \tt True & $\top$ & true \\ - \tt False & $\bot$ & false \\ - \tt $P$ \& $Q$ & $P\conj Q$ & conjunction \\ - \tt $P$ | $Q$ & $P\disj Q$ & disjunction \\ - \verb'~' $P$ & $\neg P$ & negation \\ - \tt $P$ --> $Q$ & $P\imp Q$ & implication \\ - \tt $P$ <-> $Q$ & $P\bimp Q$ & bi-implication \\ - \tt ALL $x\,y\,z$ .\ $P$ & $\forall x\,y\,z.P$ & for all \\ - \tt EX $x\,y\,z$ .\ $P$ & $\exists x\,y\,z.P$ & there exists - \end{tabular} -\] -To illustrate the notation, consider two axioms for first-order logic: -$$ \List{P; Q} \Imp P\conj Q \eqno(\conj I) $$ -$$ \List{\exists x. P(x); \Forall x. P(x)\imp Q} \Imp Q \eqno(\exists E) $$ -$({\conj}I)$ translates into {\sc ascii} characters as -\begin{ttbox} -[| ?P; ?Q |] ==> ?P & ?Q -\end{ttbox} -The schematic variables let unification instantiate the rule. To avoid -cluttering logic definitions with question marks, Isabelle converts any -free variables in a rule to schematic variables; we normally declare -$({\conj}I)$ as -\begin{ttbox} -[| P; Q |] ==> P & Q -\end{ttbox} -This variables convention agrees with the treatment of variables in goals. -Free variables in a goal remain fixed throughout the proof. After the -proof is finished, Isabelle converts them to scheme variables in the -resulting theorem. Scheme variables in a goal may be replaced by terms -during the proof, supporting answer extraction, program synthesis, and so -forth. - -For a final example, the rule $(\exists E)$ is rendered in {\sc ascii} as -\begin{ttbox} -[| EX x. P(x); !!x. P(x) ==> Q |] ==> Q -\end{ttbox} - - -\subsection{Basic operations on theorems} -\index{theorems!basic operations on|bold} -\index{LCF system} -Meta-level theorems have the \ML{} type \mltydx{thm}. They represent the -theorems and inference rules of object-logics. Isabelle's meta-logic is -implemented using the {\sc lcf} approach: each meta-level inference rule is -represented by a function from theorems to theorems. Object-level rules -are taken as axioms. - -The main theorem printing commands are \texttt{prth}, \texttt{prths} and~{\tt - prthq}. Of the other operations on theorems, most useful are \texttt{RS} -and \texttt{RSN}, which perform resolution. - -\index{theorems!printing of} -\begin{ttdescription} -\item[\ttindex{prth} {\it thm};] - pretty-prints {\it thm\/} at the terminal. - -\item[\ttindex{prths} {\it thms};] - pretty-prints {\it thms}, a list of theorems. - -\item[\ttindex{prthq} {\it thmq};] - pretty-prints {\it thmq}, a sequence of theorems; this is useful for - inspecting the output of a tactic. - -\item[$thm1$ RS $thm2$] \index{*RS} - resolves the conclusion of $thm1$ with the first premise of~$thm2$. - -\item[$thm1$ RSN $(i,thm2)$] \index{*RSN} - resolves the conclusion of $thm1$ with the $i$th premise of~$thm2$. - -\item[\ttindex{standard} $thm$] - puts $thm$ into a standard format. It also renames schematic variables - to have subscript zero, improving readability and reducing subscript - growth. -\end{ttdescription} -The rules of a theory are normally bound to \ML\ identifiers. Suppose we are -running an Isabelle session containing theory~FOL, natural deduction -first-order logic.\footnote{For a listing of the FOL rules and their \ML{} - names, turn to -\iflabelundefined{fol-rules}{{\em Isabelle's Object-Logics}}% - {page~\pageref{fol-rules}}.} -Let us try an example given in~\S\ref{joining}. We -first print \tdx{mp}, which is the rule~$({\imp}E)$, then resolve it with -itself. -\begin{ttbox} -prth mp; -{\out [| ?P --> ?Q; ?P |] ==> ?Q} -{\out val it = "[| ?P --> ?Q; ?P |] ==> ?Q" : thm} -prth (mp RS mp); -{\out [| ?P1 --> ?P --> ?Q; ?P1; ?P |] ==> ?Q} -{\out val it = "[| ?P1 --> ?P --> ?Q; ?P1; ?P |] ==> ?Q" : thm} -\end{ttbox} -User input appears in {\footnotesize\tt typewriter characters}, and output -appears in{\out slanted typewriter characters}. \ML's response {\out val - }~\ldots{} is compiler-dependent and will sometimes be suppressed. This -session illustrates two formats for the display of theorems. Isabelle's -top-level displays theorems as \ML{} values, enclosed in quotes. Printing -commands like \texttt{prth} omit the quotes and the surrounding \texttt{val - \ldots :\ thm}. Ignoring their side-effects, the printing commands are -identity functions. - -To contrast \texttt{RS} with \texttt{RSN}, we resolve -\tdx{conjunct1}, which stands for~$(\conj E1)$, with~\tdx{mp}. -\begin{ttbox} -conjunct1 RS mp; -{\out val it = "[| (?P --> ?Q) & ?Q1; ?P |] ==> ?Q" : thm} -conjunct1 RSN (2,mp); -{\out val it = "[| ?P --> ?Q; ?P & ?Q1 |] ==> ?Q" : thm} -\end{ttbox} -These correspond to the following proofs: -\[ \infer[({\imp}E)]{Q}{\infer[({\conj}E1)]{P\imp Q}{(P\imp Q)\conj Q@1} & P} - \qquad - \infer[({\imp}E)]{Q}{P\imp Q & \infer[({\conj}E1)]{P}{P\conj Q@1}} -\] -% -Rules can be derived by pasting other rules together. Let us join -\tdx{spec}, which stands for~$(\forall E)$, with \texttt{mp} and {\tt - conjunct1}. In \ML{}, the identifier~\texttt{it} denotes the value just -printed. -\begin{ttbox} -spec; -{\out val it = "ALL x. ?P(x) ==> ?P(?x)" : thm} -it RS mp; -{\out val it = "[| ALL x. ?P3(x) --> ?Q2(x); ?P3(?x1) |] ==>} -{\out ?Q2(?x1)" : thm} -it RS conjunct1; -{\out val it = "[| ALL x. ?P4(x) --> ?P6(x) & ?Q5(x); ?P4(?x2) |] ==>} -{\out ?P6(?x2)" : thm} -standard it; -{\out val it = "[| ALL x. ?P(x) --> ?Pa(x) & ?Q(x); ?P(?x) |] ==>} -{\out ?Pa(?x)" : thm} -\end{ttbox} -By resolving $(\forall E)$ with (${\imp}E)$ and (${\conj}E1)$, we have -derived a destruction rule for formulae of the form $\forall x. -P(x)\imp(Q(x)\conj R(x))$. Used with destruct-resolution, such specialized -rules provide a way of referring to particular assumptions. -\index{assumptions!use of} - -\subsection{*Flex-flex constraints} \label{flexflex} -\index{flex-flex constraints|bold}\index{unknowns!function} -In higher-order unification, {\bf flex-flex} equations are those where both -sides begin with a function unknown, such as $\Var{f}(0)\qeq\Var{g}(0)$. -They admit a trivial unifier, here $\Var{f}\equiv \lambda x.\Var{a}$ and -$\Var{g}\equiv \lambda y.\Var{a}$, where $\Var{a}$ is a new unknown. They -admit many other unifiers, such as $\Var{f} \equiv \lambda x.\Var{g}(0)$ -and $\{\Var{f} \equiv \lambda x.x,\, \Var{g} \equiv \lambda x.0\}$. Huet's -procedure does not enumerate the unifiers; instead, it retains flex-flex -equations as constraints on future unifications. Flex-flex constraints -occasionally become attached to a proof state; more frequently, they appear -during use of \texttt{RS} and~\texttt{RSN}: -\begin{ttbox} -refl; -{\out val it = "?a = ?a" : thm} -exI; -{\out val it = "?P(?x) ==> EX x. ?P(x)" : thm} -refl RS exI; -{\out val it = "EX x. ?a3(x) = ?a2(x)" [.] : thm} -\end{ttbox} -% -The mysterious symbol \texttt{[.]} indicates that the result is subject to -a meta-level hypothesis. We can make all such hypotheses visible by setting the -\ttindexbold{show_hyps} flag: -\begin{ttbox} -set show_hyps; -{\out val it = true : bool} -refl RS exI; -{\out val it = "EX x. ?a3(x) = ?a2(x)" ["?a3(?x) =?= ?a2(?x)"] : thm} -\end{ttbox} - -\noindent -Renaming variables, this is $\exists x.\Var{f}(x)=\Var{g}(x)$ with -the constraint ${\Var{f}(\Var{u})\qeq\Var{g}(\Var{u})}$. Instances -satisfying the constraint include $\exists x.\Var{f}(x)=\Var{f}(x)$ and -$\exists x.x=\Var{u}$. Calling \ttindex{flexflex_rule} removes all -constraints by applying the trivial unifier:\index{*prthq} -\begin{ttbox} -prthq (flexflex_rule it); -{\out EX x. ?a4 = ?a4} -\end{ttbox} -Isabelle simplifies flex-flex equations to eliminate redundant bound -variables. In $\lambda x\,y.\Var{f}(k(y),x) \qeq \lambda x\,y.\Var{g}(y)$, -there is no bound occurrence of~$x$ on the right side; thus, there will be -none on the left in a common instance of these terms. Choosing a new -variable~$\Var{h}$, Isabelle assigns $\Var{f}\equiv \lambda u\,v.?h(u)$, -simplifying the left side to $\lambda x\,y.\Var{h}(k(y))$. Dropping $x$ -from the equation leaves $\lambda y.\Var{h}(k(y)) \qeq \lambda -y.\Var{g}(y)$. By $\eta$-conversion, this simplifies to the assignment -$\Var{g}\equiv\lambda y.?h(k(y))$. - -\begin{warn} -\ttindex{RS} and \ttindex{RSN} fail (by raising exception \texttt{THM}) unless -the resolution delivers {\bf exactly one} resolvent. For multiple results, -use \ttindex{RL} and \ttindex{RLN}, which operate on theorem lists. The -following example uses \ttindex{read_instantiate} to create an instance -of \tdx{refl} containing no schematic variables. -\begin{ttbox} -val reflk = read_instantiate [("a","k")] refl; -{\out val reflk = "k = k" : thm} -\end{ttbox} - -\noindent -A flex-flex constraint is no longer possible; resolution does not find a -unique unifier: -\begin{ttbox} -reflk RS exI; -{\out uncaught exception} -{\out THM ("RSN: multiple unifiers", 1,} -{\out ["k = k", "?P(?x) ==> EX x. ?P(x)"])} -\end{ttbox} -Using \ttindex{RL} this time, we discover that there are four unifiers, and -four resolvents: -\begin{ttbox} -[reflk] RL [exI]; -{\out val it = ["EX x. x = x", "EX x. k = x",} -{\out "EX x. x = k", "EX x. k = k"] : thm list} -\end{ttbox} -\end{warn} - -\index{forward proof|)} - -\section{Backward proof} -Although \texttt{RS} and \texttt{RSN} are fine for simple forward reasoning, -large proofs require tactics. Isabelle provides a suite of commands for -conducting a backward proof using tactics. - -\subsection{The basic tactics} -The tactics \texttt{assume_tac}, {\tt -resolve_tac}, \texttt{eresolve_tac}, and \texttt{dresolve_tac} suffice for most -single-step proofs. Although \texttt{eresolve_tac} and \texttt{dresolve_tac} are -not strictly necessary, they simplify proofs involving elimination and -destruction rules. All the tactics act on a subgoal designated by a -positive integer~$i$, failing if~$i$ is out of range. The resolution -tactics try their list of theorems in left-to-right order. - -\begin{ttdescription} -\item[\ttindex{assume_tac} {\it i}] \index{tactics!assumption} - is the tactic that attempts to solve subgoal~$i$ by assumption. Proof by - assumption is not a trivial step; it can falsify other subgoals by - instantiating shared variables. There may be several ways of solving the - subgoal by assumption. - -\item[\ttindex{resolve_tac} {\it thms} {\it i}]\index{tactics!resolution} - is the basic resolution tactic, used for most proof steps. The $thms$ - represent object-rules, which are resolved against subgoal~$i$ of the - proof state. For each rule, resolution forms next states by unifying the - conclusion with the subgoal and inserting instantiated premises in its - place. A rule can admit many higher-order unifiers. The tactic fails if - none of the rules generates next states. - -\item[\ttindex{eresolve_tac} {\it thms} {\it i}] \index{elim-resolution} - performs elim-resolution. Like \texttt{resolve_tac~{\it thms}~{\it i\/}} - followed by \texttt{assume_tac~{\it i}}, it applies a rule then solves its - first premise by assumption. But \texttt{eresolve_tac} additionally deletes - that assumption from any subgoals arising from the resolution. - -\item[\ttindex{dresolve_tac} {\it thms} {\it i}] - \index{forward proof}\index{destruct-resolution} - performs destruct-resolution with the~$thms$, as described - in~\S\ref{destruct}. It is useful for forward reasoning from the - assumptions. -\end{ttdescription} - -\subsection{Commands for backward proof} -\index{proofs!commands for} -Tactics are normally applied using the subgoal module, which maintains a -proof state and manages the proof construction. It allows interactive -backtracking through the proof space, going away to prove lemmas, etc.; of -its many commands, most important are the following: -\begin{ttdescription} -\item[\ttindex{Goal} {\it formula}; ] -begins a new proof, where the {\it formula\/} is written as an \ML\ string. - -\item[\ttindex{by} {\it tactic}; ] -applies the {\it tactic\/} to the current proof -state, raising an exception if the tactic fails. - -\item[\ttindex{undo}(); ] - reverts to the previous proof state. Undo can be repeated but cannot be - undone. Do not omit the parentheses; typing {\tt\ \ undo;\ \ } merely - causes \ML\ to echo the value of that function. - -\item[\ttindex{result}();] -returns the theorem just proved, in a standard format. It fails if -unproved subgoals are left, etc. - -\item[\ttindex{qed} {\it name};] is the usual way of ending a proof. - It gets the theorem using \texttt{result}, stores it in Isabelle's - theorem database and binds it to an \ML{} identifier. - -\end{ttdescription} -The commands and tactics given above are cumbersome for interactive use. -Although our examples will use the full commands, you may prefer Isabelle's -shortcuts: -\begin{center} \tt -\index{*br} \index{*be} \index{*bd} \index{*ba} -\begin{tabular}{l@{\qquad\rm abbreviates\qquad}l} - ba {\it i}; & by (assume_tac {\it i}); \\ - - br {\it thm} {\it i}; & by (resolve_tac [{\it thm}] {\it i}); \\ - - be {\it thm} {\it i}; & by (eresolve_tac [{\it thm}] {\it i}); \\ - - bd {\it thm} {\it i}; & by (dresolve_tac [{\it thm}] {\it i}); -\end{tabular} -\end{center} - -\subsection{A trivial example in propositional logic} -\index{examples!propositional} - -Directory \texttt{FOL} of the Isabelle distribution defines the theory of -first-order logic. Let us try the example from \S\ref{prop-proof}, -entering the goal $P\disj P\imp P$ in that theory.\footnote{To run these - examples, see the file \texttt{FOL/ex/intro.ML}.} -\begin{ttbox} -Goal "P|P --> P"; -{\out Level 0} -{\out P | P --> P} -{\out 1. P | P --> P} -\end{ttbox}\index{level of a proof} -Isabelle responds by printing the initial proof state, which has $P\disj -P\imp P$ as the main goal and the only subgoal. The {\bf level} of the -state is the number of \texttt{by} commands that have been applied to reach -it. We now use \ttindex{resolve_tac} to apply the rule \tdx{impI}, -or~$({\imp}I)$, to subgoal~1: -\begin{ttbox} -by (resolve_tac [impI] 1); -{\out Level 1} -{\out P | P --> P} -{\out 1. P | P ==> P} -\end{ttbox} -In the new proof state, subgoal~1 is $P$ under the assumption $P\disj P$. -(The meta-implication {\tt==>} indicates assumptions.) We apply -\tdx{disjE}, or~(${\disj}E)$, to that subgoal: -\begin{ttbox} -by (resolve_tac [disjE] 1); -{\out Level 2} -{\out P | P --> P} -{\out 1. P | P ==> ?P1 | ?Q1} -{\out 2. [| P | P; ?P1 |] ==> P} -{\out 3. [| P | P; ?Q1 |] ==> P} -\end{ttbox} -At Level~2 there are three subgoals, each provable by assumption. We -deviate from~\S\ref{prop-proof} by tackling subgoal~3 first, using -\ttindex{assume_tac}. This affects subgoal~1, updating {\tt?Q1} to~{\tt - P}. -\begin{ttbox} -by (assume_tac 3); -{\out Level 3} -{\out P | P --> P} -{\out 1. P | P ==> ?P1 | P} -{\out 2. [| P | P; ?P1 |] ==> P} -\end{ttbox} -Next we tackle subgoal~2, instantiating {\tt?P1} to~\texttt{P} in subgoal~1. -\begin{ttbox} -by (assume_tac 2); -{\out Level 4} -{\out P | P --> P} -{\out 1. P | P ==> P | P} -\end{ttbox} -Lastly we prove the remaining subgoal by assumption: -\begin{ttbox} -by (assume_tac 1); -{\out Level 5} -{\out P | P --> P} -{\out No subgoals!} -\end{ttbox} -Isabelle tells us that there are no longer any subgoals: the proof is -complete. Calling \texttt{qed} stores the theorem. -\begin{ttbox} -qed "mythm"; -{\out val mythm = "?P | ?P --> ?P" : thm} -\end{ttbox} -Isabelle has replaced the free variable~\texttt{P} by the scheme -variable~{\tt?P}\@. Free variables in the proof state remain fixed -throughout the proof. Isabelle finally converts them to scheme variables -so that the resulting theorem can be instantiated with any formula. - -As an exercise, try doing the proof as in \S\ref{prop-proof}, observing how -instantiations affect the proof state. - - -\subsection{Part of a distributive law} -\index{examples!propositional} -To demonstrate the tactics \ttindex{eresolve_tac}, \ttindex{dresolve_tac} -and the tactical \texttt{REPEAT}, let us prove part of the distributive -law -\[ (P\conj Q)\disj R \,\bimp\, (P\disj R)\conj (Q\disj R). \] -We begin by stating the goal to Isabelle and applying~$({\imp}I)$ to it: -\begin{ttbox} -Goal "(P & Q) | R --> (P | R)"; -{\out Level 0} -{\out P & Q | R --> P | R} -{\out 1. P & Q | R --> P | R} -\ttbreak -by (resolve_tac [impI] 1); -{\out Level 1} -{\out P & Q | R --> P | R} -{\out 1. P & Q | R ==> P | R} -\end{ttbox} -Previously we applied~(${\disj}E)$ using \texttt{resolve_tac}, but -\ttindex{eresolve_tac} deletes the assumption after use. The resulting proof -state is simpler. -\begin{ttbox} -by (eresolve_tac [disjE] 1); -{\out Level 2} -{\out P & Q | R --> P | R} -{\out 1. P & Q ==> P | R} -{\out 2. R ==> P | R} -\end{ttbox} -Using \ttindex{dresolve_tac}, we can apply~(${\conj}E1)$ to subgoal~1, -replacing the assumption $P\conj Q$ by~$P$. Normally we should apply the -rule~(${\conj}E)$, given in~\S\ref{destruct}. That is an elimination rule -and requires \texttt{eresolve_tac}; it would replace $P\conj Q$ by the two -assumptions~$P$ and~$Q$. Because the present example does not need~$Q$, we -may try out \texttt{dresolve_tac}. -\begin{ttbox} -by (dresolve_tac [conjunct1] 1); -{\out Level 3} -{\out P & Q | R --> P | R} -{\out 1. P ==> P | R} -{\out 2. R ==> P | R} -\end{ttbox} -The next two steps apply~(${\disj}I1$) and~(${\disj}I2$) in an obvious manner. -\begin{ttbox} -by (resolve_tac [disjI1] 1); -{\out Level 4} -{\out P & Q | R --> P | R} -{\out 1. P ==> P} -{\out 2. R ==> P | R} -\ttbreak -by (resolve_tac [disjI2] 2); -{\out Level 5} -{\out P & Q | R --> P | R} -{\out 1. P ==> P} -{\out 2. R ==> R} -\end{ttbox} -Two calls of \texttt{assume_tac} can finish the proof. The -tactical~\ttindex{REPEAT} here expresses a tactic that calls \texttt{assume_tac~1} -as many times as possible. We can restrict attention to subgoal~1 because -the other subgoals move up after subgoal~1 disappears. -\begin{ttbox} -by (REPEAT (assume_tac 1)); -{\out Level 6} -{\out P & Q | R --> P | R} -{\out No subgoals!} -\end{ttbox} - - -\section{Quantifier reasoning} -\index{quantifiers}\index{parameters}\index{unknowns}\index{unknowns!function} -This section illustrates how Isabelle enforces quantifier provisos. -Suppose that $x$, $y$ and~$z$ are parameters of a subgoal. Quantifier -rules create terms such as~$\Var{f}(x,z)$, where~$\Var{f}$ is a function -unknown. Instantiating $\Var{f}$ to $\lambda x\,z.t$ has the effect of -replacing~$\Var{f}(x,z)$ by~$t$, where the term~$t$ may contain free -occurrences of~$x$ and~$z$. On the other hand, no instantiation -of~$\Var{f}$ can replace~$\Var{f}(x,z)$ by a term containing free -occurrences of~$y$, since parameters are bound variables. - -\subsection{Two quantifier proofs: a success and a failure} -\index{examples!with quantifiers} -Let us contrast a proof of the theorem $\forall x.\exists y.x=y$ with an -attempted proof of the non-theorem $\exists y.\forall x.x=y$. The former -proof succeeds, and the latter fails, because of the scope of quantified -variables~\cite{paulson-found}. Unification helps even in these trivial -proofs. In $\forall x.\exists y.x=y$ the $y$ that `exists' is simply $x$, -but we need never say so. This choice is forced by the reflexive law for -equality, and happens automatically. - -\paragraph{The successful proof.} -The proof of $\forall x.\exists y.x=y$ demonstrates the introduction rules -$(\forall I)$ and~$(\exists I)$. We state the goal and apply $(\forall I)$: -\begin{ttbox} -Goal "ALL x. EX y. x=y"; -{\out Level 0} -{\out ALL x. EX y. x = y} -{\out 1. ALL x. EX y. x = y} -\ttbreak -by (resolve_tac [allI] 1); -{\out Level 1} -{\out ALL x. EX y. x = y} -{\out 1. !!x. EX y. x = y} -\end{ttbox} -The variable~\texttt{x} is no longer universally quantified, but is a -parameter in the subgoal; thus, it is universally quantified at the -meta-level. The subgoal must be proved for all possible values of~\texttt{x}. - -To remove the existential quantifier, we apply the rule $(\exists I)$: -\begin{ttbox} -by (resolve_tac [exI] 1); -{\out Level 2} -{\out ALL x. EX y. x = y} -{\out 1. !!x. x = ?y1(x)} -\end{ttbox} -The bound variable \texttt{y} has become {\tt?y1(x)}. This term consists of -the function unknown~{\tt?y1} applied to the parameter~\texttt{x}. -Instances of {\tt?y1(x)} may or may not contain~\texttt{x}. We resolve the -subgoal with the reflexivity axiom. -\begin{ttbox} -by (resolve_tac [refl] 1); -{\out Level 3} -{\out ALL x. EX y. x = y} -{\out No subgoals!} -\end{ttbox} -Let us consider what has happened in detail. The reflexivity axiom is -lifted over~$x$ to become $\Forall x.\Var{f}(x)=\Var{f}(x)$, which is -unified with $\Forall x.x=\Var{y@1}(x)$. The function unknowns $\Var{f}$ -and~$\Var{y@1}$ are both instantiated to the identity function, and -$x=\Var{y@1}(x)$ collapses to~$x=x$ by $\beta$-reduction. - -\paragraph{The unsuccessful proof.} -We state the goal $\exists y.\forall x.x=y$, which is not a theorem, and -try~$(\exists I)$: -\begin{ttbox} -Goal "EX y. ALL x. x=y"; -{\out Level 0} -{\out EX y. ALL x. x = y} -{\out 1. EX y. ALL x. x = y} -\ttbreak -by (resolve_tac [exI] 1); -{\out Level 1} -{\out EX y. ALL x. x = y} -{\out 1. ALL x. x = ?y} -\end{ttbox} -The unknown \texttt{?y} may be replaced by any term, but this can never -introduce another bound occurrence of~\texttt{x}. We now apply~$(\forall I)$: -\begin{ttbox} -by (resolve_tac [allI] 1); -{\out Level 2} -{\out EX y. ALL x. x = y} -{\out 1. !!x. x = ?y} -\end{ttbox} -Compare our position with the previous Level~2. Instead of {\tt?y1(x)} we -have~{\tt?y}, whose instances may not contain the bound variable~\texttt{x}. -The reflexivity axiom does not unify with subgoal~1. -\begin{ttbox} -by (resolve_tac [refl] 1); -{\out by: tactic failed} -\end{ttbox} -There can be no proof of $\exists y.\forall x.x=y$ by the soundness of -first-order logic. I have elsewhere proved the faithfulness of Isabelle's -encoding of first-order logic~\cite{paulson-found}; there could, of course, be -faults in the implementation. - - -\subsection{Nested quantifiers} -\index{examples!with quantifiers} -Multiple quantifiers create complex terms. Proving -\[ (\forall x\,y.P(x,y)) \imp (\forall z\,w.P(w,z)) \] -will demonstrate how parameters and unknowns develop. If they appear in -the wrong order, the proof will fail. - -This section concludes with a demonstration of \texttt{REPEAT} -and~\texttt{ORELSE}. -\begin{ttbox} -Goal "(ALL x y.P(x,y)) --> (ALL z w.P(w,z))"; -{\out Level 0} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -\ttbreak -by (resolve_tac [impI] 1); -{\out Level 1} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. ALL x y. P(x,y) ==> ALL z w. P(w,z)} -\end{ttbox} - -\paragraph{The wrong approach.} -Using \texttt{dresolve_tac}, we apply the rule $(\forall E)$, bound to the -\ML\ identifier \tdx{spec}. Then we apply $(\forall I)$. -\begin{ttbox} -by (dresolve_tac [spec] 1); -{\out Level 2} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. ALL y. P(?x1,y) ==> ALL z w. P(w,z)} -\ttbreak -by (resolve_tac [allI] 1); -{\out Level 3} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z. ALL y. P(?x1,y) ==> ALL w. P(w,z)} -\end{ttbox} -The unknown \texttt{?x1} and the parameter \texttt{z} have appeared. We again -apply $(\forall E)$ and~$(\forall I)$. -\begin{ttbox} -by (dresolve_tac [spec] 1); -{\out Level 4} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z. P(?x1,?y3(z)) ==> ALL w. P(w,z)} -\ttbreak -by (resolve_tac [allI] 1); -{\out Level 5} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z w. P(?x1,?y3(z)) ==> P(w,z)} -\end{ttbox} -The unknown \texttt{?y3} and the parameter \texttt{w} have appeared. Each -unknown is applied to the parameters existing at the time of its creation; -instances of~\texttt{?x1} cannot contain~\texttt{z} or~\texttt{w}, while instances -of {\tt?y3(z)} can only contain~\texttt{z}. Due to the restriction on~\texttt{?x1}, -proof by assumption will fail. -\begin{ttbox} -by (assume_tac 1); -{\out by: tactic failed} -{\out uncaught exception ERROR} -\end{ttbox} - -\paragraph{The right approach.} -To do this proof, the rules must be applied in the correct order. -Parameters should be created before unknowns. The -\ttindex{choplev} command returns to an earlier stage of the proof; -let us return to the result of applying~$({\imp}I)$: -\begin{ttbox} -choplev 1; -{\out Level 1} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. ALL x y. P(x,y) ==> ALL z w. P(w,z)} -\end{ttbox} -Previously we made the mistake of applying $(\forall E)$ before $(\forall I)$. -\begin{ttbox} -by (resolve_tac [allI] 1); -{\out Level 2} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z. ALL x y. P(x,y) ==> ALL w. P(w,z)} -\ttbreak -by (resolve_tac [allI] 1); -{\out Level 3} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z w. ALL x y. P(x,y) ==> P(w,z)} -\end{ttbox} -The parameters \texttt{z} and~\texttt{w} have appeared. We now create the -unknowns: -\begin{ttbox} -by (dresolve_tac [spec] 1); -{\out Level 4} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z w. ALL y. P(?x3(z,w),y) ==> P(w,z)} -\ttbreak -by (dresolve_tac [spec] 1); -{\out Level 5} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. !!z w. P(?x3(z,w),?y4(z,w)) ==> P(w,z)} -\end{ttbox} -Both {\tt?x3(z,w)} and~{\tt?y4(z,w)} could become any terms containing {\tt -z} and~\texttt{w}: -\begin{ttbox} -by (assume_tac 1); -{\out Level 6} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out No subgoals!} -\end{ttbox} - -\paragraph{A one-step proof using tacticals.} -\index{tacticals} \index{examples!of tacticals} - -Repeated application of rules can be effective, but the rules should be -attempted in the correct order. Let us return to the original goal using -\ttindex{choplev}: -\begin{ttbox} -choplev 0; -{\out Level 0} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -\end{ttbox} -As we have just seen, \tdx{allI} should be attempted -before~\tdx{spec}, while \ttindex{assume_tac} generally can be -attempted first. Such priorities can easily be expressed -using~\ttindex{ORELSE}, and repeated using~\ttindex{REPEAT}. -\begin{ttbox} -by (REPEAT (assume_tac 1 ORELSE resolve_tac [impI,allI] 1 - ORELSE dresolve_tac [spec] 1)); -{\out Level 1} -{\out (ALL x y. P(x,y)) --> (ALL z w. P(w,z))} -{\out No subgoals!} -\end{ttbox} - - -\subsection{A realistic quantifier proof} -\index{examples!with quantifiers} -To see the practical use of parameters and unknowns, let us prove half of -the equivalence -\[ (\forall x. P(x) \imp Q) \,\bimp\, ((\exists x. P(x)) \imp Q). \] -We state the left-to-right half to Isabelle in the normal way. -Since $\imp$ is nested to the right, $({\imp}I)$ can be applied twice; we -use \texttt{REPEAT}: -\begin{ttbox} -Goal "(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q"; -{\out Level 0} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out 1. (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -\ttbreak -by (REPEAT (resolve_tac [impI] 1)); -{\out Level 1} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out 1. [| ALL x. P(x) --> Q; EX x. P(x) |] ==> Q} -\end{ttbox} -We can eliminate the universal or the existential quantifier. The -existential quantifier should be eliminated first, since this creates a -parameter. The rule~$(\exists E)$ is bound to the -identifier~\tdx{exE}. -\begin{ttbox} -by (eresolve_tac [exE] 1); -{\out Level 2} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out 1. !!x. [| ALL x. P(x) --> Q; P(x) |] ==> Q} -\end{ttbox} -The only possibility now is $(\forall E)$, a destruction rule. We use -\ttindex{dresolve_tac}, which discards the quantified assumption; it is -only needed once. -\begin{ttbox} -by (dresolve_tac [spec] 1); -{\out Level 3} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out 1. !!x. [| P(x); P(?x3(x)) --> Q |] ==> Q} -\end{ttbox} -Because we applied $(\exists E)$ before $(\forall E)$, the unknown -term~{\tt?x3(x)} may depend upon the parameter~\texttt{x}. - -Although $({\imp}E)$ is a destruction rule, it works with -\ttindex{eresolve_tac} to perform backward chaining. This technique is -frequently useful. -\begin{ttbox} -by (eresolve_tac [mp] 1); -{\out Level 4} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out 1. !!x. P(x) ==> P(?x3(x))} -\end{ttbox} -The tactic has reduced~\texttt{Q} to~\texttt{P(?x3(x))}, deleting the -implication. The final step is trivial, thanks to the occurrence of~\texttt{x}. -\begin{ttbox} -by (assume_tac 1); -{\out Level 5} -{\out (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q} -{\out No subgoals!} -\end{ttbox} - - -\subsection{The classical reasoner} -\index{classical reasoner} -Isabelle provides enough automation to tackle substantial examples. -The classical -reasoner can be set up for any classical natural deduction logic; -see \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}% - {Chap.\ts\ref{chap:classical}}. -It cannot compete with fully automatic theorem provers, but it is -competitive with tools found in other interactive provers. - -Rules are packaged into {\bf classical sets}. The classical reasoner -provides several tactics, which apply rules using naive algorithms. -Unification handles quantifiers as shown above. The most useful tactic -is~\ttindex{Blast_tac}. - -Let us solve problems~40 and~60 of Pelletier~\cite{pelletier86}. (The -backslashes~\hbox{\verb|\|\ldots\verb|\|} are an \ML{} string escape -sequence, to break the long string over two lines.) -\begin{ttbox} -Goal "(EX y. ALL x. J(y,x) <-> ~J(x,x)) \ttback -\ttback --> ~ (ALL x. EX y. ALL z. J(z,y) <-> ~ J(z,x))"; -{\out Level 0} -{\out (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} -{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} -{\out 1. (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} -{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} -\end{ttbox} -\ttindex{Blast_tac} proves subgoal~1 at a stroke. -\begin{ttbox} -by (Blast_tac 1); -{\out Depth = 0} -{\out Depth = 1} -{\out Level 1} -{\out (EX y. ALL x. J(y,x) <-> ~J(x,x)) -->} -{\out ~(ALL x. EX y. ALL z. J(z,y) <-> ~J(z,x))} -{\out No subgoals!} -\end{ttbox} -Sceptics may examine the proof by calling the package's single-step -tactics, such as~\texttt{step_tac}. This would take up much space, however, -so let us proceed to the next example: -\begin{ttbox} -Goal "ALL x. P(x,f(x)) <-> \ttback -\ttback (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))"; -{\out Level 0} -{\out ALL x. P(x,f(x)) <-> (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} -{\out 1. ALL x. P(x,f(x)) <->} -{\out (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} -\end{ttbox} -Again, subgoal~1 succumbs immediately. -\begin{ttbox} -by (Blast_tac 1); -{\out Depth = 0} -{\out Depth = 1} -{\out Level 1} -{\out ALL x. P(x,f(x)) <-> (EX y. (ALL z. P(z,y) --> P(z,f(x))) & P(x,y))} -{\out No subgoals!} -\end{ttbox} -The classical reasoner is not restricted to the usual logical connectives. -The natural deduction rules for unions and intersections resemble those for -disjunction and conjunction. The rules for infinite unions and -intersections resemble those for quantifiers. Given such rules, the classical -reasoner is effective for reasoning in set theory. - diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/intro.tex --- a/doc-src/Intro/intro.tex Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,154 +0,0 @@ -\documentclass[12pt,a4paper]{article} -\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../pdfsetup} - -%% run bibtex intro to prepare bibliography -%% run ../sedindex intro to prepare index file -%prth *(\(.*\)); \1; -%{\\out \(.*\)} {\\out val it = "\1" : thm} - -\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Introduction to Isabelle} -\author{{\em Lawrence C. Paulson}\\ - Computer Laboratory \\ University of Cambridge \\ - \texttt{lcp@cl.cam.ac.uk}\\[3ex] - With Contributions by Tobias Nipkow and Markus Wenzel -} -\makeindex - -\underscoreoff - -\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2} - -\sloppy -\binperiod %%%treat . like a binary operator - -\newcommand\qeq{\stackrel{?}{\equiv}} %for disagreement pairs in unification -\newcommand{\nand}{\mathbin{\lnot\&}} -\newcommand{\xor}{\mathbin{\#}} - -\pagenumbering{roman} -\begin{document} -\pagestyle{empty} -\begin{titlepage} -\maketitle -\emph{Note}: this document is part of the earlier Isabelle documentation, -which is largely superseded by the Isabelle/HOL -\emph{Tutorial}~\cite{isa-tutorial}. It describes the old-style theory -syntax and shows how to conduct proofs using the -ML top level. This style of interaction is largely obsolete: -most Isabelle proofs are now written using the Isar -language and the Proof General interface. However, this paper contains valuable -information that is not available elsewhere. Its examples are based -on first-order logic rather than higher-order logic. - -\thispagestyle{empty} -\vfill -{\small Copyright \copyright{} \number\year{} by Lawrence C. Paulson} -\end{titlepage} - -\pagestyle{headings} -\part*{Preface} -\index{Isabelle!overview} \index{Isabelle!object-logics supported} -Isabelle~\cite{paulson-natural,paulson-found,paulson700} is a generic theorem -prover. It has been instantiated to support reasoning in several -object-logics: -\begin{itemize} -\item first-order logic, constructive and classical versions -\item higher-order logic, similar to that of Gordon's {\sc -hol}~\cite{mgordon-hol} -\item Zermelo-Fraenkel set theory~\cite{suppes72} -\item an extensional version of Martin-L\"of's Type Theory~\cite{nordstrom90} -\item the classical first-order sequent calculus, {\sc lk} -\item the modal logics $T$, $S4$, and $S43$ -\item the Logic for Computable Functions~\cite{paulson87} -\end{itemize} -A logic's syntax and inference rules are specified declaratively; this -allows single-step proof construction. Isabelle provides control -structures for expressing search procedures. Isabelle also provides -several generic tools, such as simplifiers and classical theorem provers, -which can be applied to object-logics. - -Isabelle is a large system, but beginners can get by with a small -repertoire of commands and a basic knowledge of how Isabelle works. -The Isabelle/HOL \emph{Tutorial}~\cite{isa-tutorial} describes how to get started. Advanced Isabelle users will benefit from some -knowledge of Standard~\ML{}, because Isabelle is written in \ML{}; -\index{ML} -if you are prepared to writing \ML{} -code, you can get Isabelle to do almost anything. My book -on~\ML{}~\cite{paulson-ml2} covers much material connected with Isabelle, -including a simple theorem prover. Users must be familiar with logic as -used in computer science; there are many good -texts~\cite{galton90,reeves90}. - -\index{LCF} -{\sc lcf}, developed by Robin Milner and colleagues~\cite{mgordon79}, is an -ancestor of {\sc hol}, Nuprl, and several other systems. Isabelle borrows -ideas from {\sc lcf}: formulae are~\ML{} values; theorems belong to an -abstract type; tactics and tacticals support backward proof. But {\sc lcf} -represents object-level rules by functions, while Isabelle represents them -by terms. You may find my other writings~\cite{paulson87,paulson-handbook} -helpful in understanding the relationship between {\sc lcf} and Isabelle. - -\index{Isabelle!release history} Isabelle was first distributed in 1986. -The 1987 version introduced a higher-order meta-logic with an improved -treatment of quantifiers. The 1988 version added limited polymorphism and -support for natural deduction. The 1989 version included a parser and -pretty printer generator. The 1992 version introduced type classes, to -support many-sorted and higher-order logics. The 1994 version introduced -greater support for theories. The most important recent change is the -introduction of the Isar proof language, thanks to Markus Wenzel. -Isabelle is still under -development and will continue to change. - -\subsubsection*{Overview} -This manual consists of three parts. Part~I discusses the Isabelle's -foundations. Part~II, presents simple on-line sessions, starting with -forward proof. It also covers basic tactics and tacticals, and some -commands for invoking them. Part~III contains further examples for users -with a bit of experience. It explains how to derive rules define theories, -and concludes with an extended example: a Prolog interpreter. - -Isabelle's Reference Manual and Object-Logics manual contain more details. -They assume familiarity with the concepts presented here. - - -\subsubsection*{Acknowledgements} -Tobias Nipkow contributed most of the section on defining theories. -Stefan Berghofer, Sara Kalvala and Viktor Kuncak suggested improvements. - -Tobias Nipkow has made immense contributions to Isabelle, including the parser -generator, type classes, and the simplifier. Carsten Clasohm and Markus -Wenzel made major contributions; Sonia Mahjoub and Karin Nimmermann also -helped. Isabelle was developed using Dave Matthews's Standard~{\sc ml} -compiler, Poly/{\sc ml}. Many people have contributed to Isabelle's standard -object-logics, including Martin Coen, Philippe de Groote, Philippe No\"el. -The research has been funded by the EPSRC (grants GR/G53279, GR/H40570, -GR/K57381, GR/K77051, GR/M75440) and by ESPRIT (projects 3245: Logical -Frameworks, and 6453: Types), and by the DFG Schwerpunktprogramm -\emph{Deduktion}. - -\newpage -\pagestyle{plain} \tableofcontents -\newpage - -\newfont{\sanssi}{cmssi12} -\vspace*{2.5cm} -\begin{quote} -\raggedleft -{\sanssi -You can only find truth with logic\\ -if you have already found truth without it.}\\ -\bigskip - -G.K. Chesterton, {\em The Man who was Orthodox} -\end{quote} - -\clearfirst \pagestyle{headings} -\include{foundations} -\include{getting} -\include{advanced} - -\bibliographystyle{plain} \small\raggedright\frenchspacing -\bibliography{../manual} - -\printindex -\end{document} diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/list.thy --- a/doc-src/Intro/list.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,6 +0,0 @@ -List = FOL + -types list 1 -arities list :: (term)term -consts Nil :: "'a list" - Cons :: "['a, 'a list] => 'a list" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/prod.thy --- a/doc-src/Intro/prod.thy Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,9 +0,0 @@ -Prod = FOL + -types "*" 2 (infixl 20) -arities "*" :: (term,term)term -consts fst :: "'a * 'b => 'a" - snd :: "'a * 'b => 'b" - Pair :: "['a,'b] => 'a * 'b" ("(1<_,/_>)") -rules fst "fst() = a" - snd "snd() = b" -end diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/quant.txt --- a/doc-src/Intro/quant.txt Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,244 +0,0 @@ -(**** quantifier examples -- process using Doc/tout quant.txt ****) - -Pretty.setmargin 72; (*existing macros just allow this margin*) -print_depth 0; - - -goal Int_Rule.thy "(ALL x y.P(x,y)) --> (ALL z w.P(w,z))"; -by (resolve_tac [impI] 1); -by (dresolve_tac [spec] 1); -by (resolve_tac [allI] 1); -by (dresolve_tac [spec] 1); -by (resolve_tac [allI] 1); -by (assume_tac 1); -choplev 1; -by (resolve_tac [allI] 1); -by (resolve_tac [allI] 1); -by (dresolve_tac [spec] 1); -by (dresolve_tac [spec] 1); -by (assume_tac 1); - -choplev 0; -by (REPEAT (assume_tac 1 - ORELSE resolve_tac [impI,allI] 1 - ORELSE dresolve_tac [spec] 1)); - - -- goal Int_Rule.thy "(ALL x y.P(x,y)) --> (ALL z w.P(w,z))"; -Level 0 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z)) -val it = [] : thm list -- by (resolve_tac [impI] 1); -Level 1 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. ALL x y. P(x,y) ==> ALL z w. P(w,z) -val it = () : unit -- by (dresolve_tac [spec] 1); -Level 2 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. ALL y. P(?x1,y) ==> ALL z w. P(w,z) -val it = () : unit -- by (resolve_tac [allI] 1); -Level 3 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z. ALL y. P(?x1,y) ==> ALL w. P(w,z) -val it = () : unit -- by (dresolve_tac [spec] 1); -Level 4 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z. P(?x1,?y3(z)) ==> ALL w. P(w,z) -val it = () : unit -- by (resolve_tac [allI] 1); -Level 5 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z w. P(?x1,?y3(z)) ==> P(w,z) -val it = () : unit -- by (assume_tac 1); -by: tactic returned no results - -uncaught exception ERROR -- choplev 1; -Level 1 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. ALL x y. P(x,y) ==> ALL z w. P(w,z) -val it = () : unit -- by (resolve_tac [allI] 1); -Level 2 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z. ALL x y. P(x,y) ==> ALL w. P(w,z) -val it = () : unit -- by (resolve_tac [allI] 1); -Level 3 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z w. ALL x y. P(x,y) ==> P(w,z) -val it = () : unit -- by (dresolve_tac [spec] 1); -Level 4 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z w. ALL y. P(?x3(z,w),y) ==> P(w,z) -val it = () : unit -- by (dresolve_tac [spec] 1); -Level 5 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. !!z w. P(?x3(z,w),?y4(z,w)) ==> P(w,z) -val it = () : unit -- by (assume_tac 1); -Level 6 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) -No subgoals! - -> choplev 0; -Level 0 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) - 1. (ALL x y. P(x,y)) --> (ALL z w. P(w,z)) -> by (REPEAT (assume_tac 1 -# ORELSE resolve_tac [impI,allI] 1 -# ORELSE dresolve_tac [spec] 1)); -Level 1 -(ALL x y. P(x,y)) --> (ALL z w. P(w,z)) -No subgoals! - - - -goal FOL_thy "ALL x. EX y. x=y"; -by (resolve_tac [allI] 1); -by (resolve_tac [exI] 1); -by (resolve_tac [refl] 1); - -- goal Int_Rule.thy "ALL x. EX y. x=y"; -Level 0 -ALL x. EX y. x = y - 1. ALL x. EX y. x = y -val it = [] : thm list -- by (resolve_tac [allI] 1); -Level 1 -ALL x. EX y. x = y - 1. !!x. EX y. x = y -val it = () : unit -- by (resolve_tac [exI] 1); -Level 2 -ALL x. EX y. x = y - 1. !!x. x = ?y1(x) -val it = () : unit -- by (resolve_tac [refl] 1); -Level 3 -ALL x. EX y. x = y -No subgoals! -val it = () : unit -- - -goal FOL_thy "EX y. ALL x. x=y"; -by (resolve_tac [exI] 1); -by (resolve_tac [allI] 1); -by (resolve_tac [refl] 1); - -- goal Int_Rule.thy "EX y. ALL x. x=y"; -Level 0 -EX y. ALL x. x = y - 1. EX y. ALL x. x = y -val it = [] : thm list -- by (resolve_tac [exI] 1); -Level 1 -EX y. ALL x. x = y - 1. ALL x. x = ?y -val it = () : unit -- by (resolve_tac [allI] 1); -Level 2 -EX y. ALL x. x = y - 1. !!x. x = ?y -val it = () : unit -- by (resolve_tac [refl] 1); -by: tactic returned no results - -uncaught exception ERROR - - - -goal FOL_thy "EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w)"; -by (resolve_tac [exI, allI] 1); -by (resolve_tac [exI, allI] 1); -by (resolve_tac [exI, allI] 1); -by (resolve_tac [exI, allI] 1); -by (resolve_tac [exI, allI] 1); - -- goal Int_Rule.thy "EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w)"; -Level 0 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) -val it = [] : thm list -- by (resolve_tac [exI, allI] 1); -Level 1 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. ALL x. EX v. ALL y. EX w. P(?u,x,v,y,w) -val it = () : unit -- by (resolve_tac [exI, allI] 1); -Level 2 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. !!x. EX v. ALL y. EX w. P(?u,x,v,y,w) -val it = () : unit -- by (resolve_tac [exI, allI] 1); -Level 3 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. !!x. ALL y. EX w. P(?u,x,?v2(x),y,w) -val it = () : unit -- by (resolve_tac [exI, allI] 1); -Level 4 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. !!x y. EX w. P(?u,x,?v2(x),y,w) -val it = () : unit -- by (resolve_tac [exI, allI] 1); -Level 5 -EX u. ALL x. EX v. ALL y. EX w. P(u,x,v,y,w) - 1. !!x y. P(?u,x,?v2(x),y,?w4(x,y)) -val it = () : unit - - -goal FOL_thy "(ALL x.P(x) --> Q) --> (EX x.P(x))-->Q"; -by (REPEAT (resolve_tac [impI] 1)); -by (eresolve_tac [exE] 1); -by (dresolve_tac [spec] 1); -by (eresolve_tac [mp] 1); -by (assume_tac 1); - -- goal Int_Rule.thy "(ALL x.P(x) --> Q) --> (EX x.P(x))-->Q"; -Level 0 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q - 1. (ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q -val it = [] : thm list -- by (REPEAT (resolve_tac [impI] 1)); -Level 1 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q - 1. [| ALL x. P(x) --> Q; EX x. P(x) |] ==> Q -val it = () : unit -- by (eresolve_tac [exE] 1); -Level 2 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q - 1. !!x. [| ALL x. P(x) --> Q; P(x) |] ==> Q -val it = () : unit -- by (dresolve_tac [spec] 1); -Level 3 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q - 1. !!x. [| P(x); P(?x3(x)) --> Q |] ==> Q -val it = () : unit -- by (eresolve_tac [mp] 1); -Level 4 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q - 1. !!x. P(x) ==> P(?x3(x)) -val it = () : unit -- by (assume_tac 1); -Level 5 -(ALL x. P(x) --> Q) --> (EX x. P(x)) --> Q -No subgoals! - - -goal FOL_thy "((EX x.P(x)) --> Q) --> (ALL x.P(x)-->Q)"; -by (REPEAT (resolve_tac [impI] 1)); - - -goal FOL_thy "(EX x.P(x) --> Q) --> (ALL x.P(x))-->Q"; -by (REPEAT (resolve_tac [impI] 1)); -by (eresolve_tac [exE] 1); -by (eresolve_tac [mp] 1); -by (eresolve_tac [spec] 1); - diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/theorems-out.txt --- a/doc-src/Intro/theorems-out.txt Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,65 +0,0 @@ -> goal Nat.thy "(k+m)+n = k+(m+n)"; -Level 0 -k + m + n = k + (m + n) - 1. k + m + n = k + (m + n) -val it = [] : thm list -> by (resolve_tac [induct] 1); -Level 1 -k + m + n = k + (m + n) - 1. k + m + n = 0 - 2. !!x. k + m + n = x ==> k + m + n = Suc(x) -val it = () : unit -> back(); -Level 1 -k + m + n = k + (m + n) - 1. k + m + n = k + 0 - 2. !!x. k + m + n = k + x ==> k + m + n = k + Suc(x) -val it = () : unit -> back(); -Level 1 -k + m + n = k + (m + n) - 1. k + m + 0 = k + (m + 0) - 2. !!x. k + m + x = k + (m + x) ==> k + m + Suc(x) = k + (m + Suc(x)) -val it = () : unit -> back(); -Level 1 -k + m + n = k + (m + n) - 1. k + m + n = k + (m + 0) - 2. !!x. k + m + n = k + (m + x) ==> k + m + n = k + (m + Suc(x)) -val it = () : unit - -> val nat_congs = prths (mk_congs Nat.thy ["Suc", "op +"]); -?Xa = ?Ya ==> Suc(?Xa) = Suc(?Ya) - -[| ?Xa = ?Ya; ?Xb = ?Yb |] ==> ?Xa + ?Xb = ?Ya + ?Yb - -?Xa = ?Ya ==> Suc(?Xa) = Suc(?Ya) -[| ?Xa = ?Ya; ?Xb = ?Yb |] ==> ?Xa + ?Xb = ?Ya + ?Yb -val nat_congs = [, ] : thm list -> val add_ss = FOL_ss addcongs nat_congs -# addrews [add_0, add_Suc]; -val add_ss = ? : simpset -> goal Nat.thy "(k+m)+n = k+(m+n)"; -Level 0 -k + m + n = k + (m + n) - 1. k + m + n = k + (m + n) -val it = [] : thm list -> by (res_inst_tac [("n","k")] induct 1); -Level 1 -k + m + n = k + (m + n) - 1. 0 + m + n = 0 + (m + n) - 2. !!x. x + m + n = x + (m + n) ==> Suc(x) + m + n = Suc(x) + (m + n) -val it = () : unit -> by (SIMP_TAC add_ss 1); -Level 2 -k + m + n = k + (m + n) - 1. !!x. x + m + n = x + (m + n) ==> Suc(x) + m + n = Suc(x) + (m + n) -val it = () : unit -> by (ASM_SIMP_TAC add_ss 1); -Level 3 -k + m + n = k + (m + n) -No subgoals! -val it = () : unit -> val add_assoc = result(); -?k + ?m + ?n = ?k + (?m + ?n) -val add_assoc = : thm diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/Intro/theorems.txt --- a/doc-src/Intro/theorems.txt Mon Aug 27 20:19:09 2012 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,122 +0,0 @@ -Pretty.setmargin 72; (*existing macros just allow this margin*) -print_depth 0; - -(*operations for "thm"*) - -prth mp; - -prth (mp RS mp); - -prth (conjunct1 RS mp); -prth (conjunct1 RSN (2,mp)); - -prth (mp RS conjunct1); -prth (spec RS it); -prth (standard it); - -prth spec; -prth (it RS mp); -prth (it RS conjunct1); -prth (standard it); - -- prth spec; -ALL x. ?P(x) ==> ?P(?x) -- prth (it RS mp); -[| ALL x. ?P3(x) --> ?Q2(x); ?P3(?x1) |] ==> ?Q2(?x1) -- prth (it RS conjunct1); -[| ALL x. ?P4(x) --> ?P6(x) & ?Q5(x); ?P4(?x2) |] ==> ?P6(?x2) -- prth (standard it); -[| ALL x. ?P(x) --> ?Pa(x) & ?Q(x); ?P(?x) |] ==> ?Pa(?x) - -(*flexflex pairs*) -- prth refl; -?a = ?a -- prth exI; -?P(?x) ==> EX x. ?P(x) -- prth (refl RS exI); -?a3(?x) == ?a2(?x) ==> EX x. ?a3(x) = ?a2(x) -- prthq (flexflex_rule it); -EX x. ?a4 = ?a4 - -(*Usage of RL*) -- val reflk = prth (read_instantiate [("a","k")] refl); -k = k -val reflk = Thm {hyps=#,maxidx=#,prop=#,sign=#} : thm -- prth (reflk RS exI); - -uncaught exception THM -- prths ([reflk] RL [exI]); -EX x. x = x - -EX x. k = x - -EX x. x = k - -EX x. k = k - - - -(*tactics *) - -goal FOL.thy "P|P --> P"; -by (resolve_tac [impI] 1); -by (resolve_tac [disjE] 1); -by (assume_tac 3); -by (assume_tac 2); -by (assume_tac 1); -val mythm = prth(result()); - - -goal FOL.thy "(P & Q) | R --> (P | R)"; -by (resolve_tac [impI] 1); -by (eresolve_tac [disjE] 1); -by (dresolve_tac [conjunct1] 1); -by (resolve_tac [disjI1] 1); -by (resolve_tac [disjI2] 2); -by (REPEAT (assume_tac 1)); - - -- goal FOL.thy "(P & Q) | R --> (P | R)"; -Level 0 -P & Q | R --> P | R - 1. P & Q | R --> P | R -- by (resolve_tac [impI] 1); -Level 1 -P & Q | R --> P | R - 1. P & Q | R ==> P | R -- by (eresolve_tac [disjE] 1); -Level 2 -P & Q | R --> P | R - 1. P & Q ==> P | R - 2. R ==> P | R -- by (dresolve_tac [conjunct1] 1); -Level 3 -P & Q | R --> P | R - 1. P ==> P | R - 2. R ==> P | R -- by (resolve_tac [disjI1] 1); -Level 4 -P & Q | R --> P | R - 1. P ==> P - 2. R ==> P | R -- by (resolve_tac [disjI2] 2); -Level 5 -P & Q | R --> P | R - 1. P ==> P - 2. R ==> R -- by (REPEAT (assume_tac 1)); -Level 6 -P & Q | R --> P | R -No subgoals! - - -goal FOL.thy "(P | Q) | R --> P | (Q | R)"; -by (resolve_tac [impI] 1); -by (eresolve_tac [disjE] 1); -by (eresolve_tac [disjE] 1); -by (resolve_tac [disjI1] 1); -by (resolve_tac [disjI2] 2); -by (resolve_tac [disjI1] 2); -by (resolve_tac [disjI2] 3); -by (resolve_tac [disjI2] 3); -by (REPEAT (assume_tac 1)); diff -r f0d87c6b7a2e -r fbf60999dc31 doc-src/ROOT --- a/doc-src/ROOT Mon Aug 27 20:19:09 2012 +0200 +++ b/doc-src/ROOT Mon Aug 27 20:50:10 2012 +0200 @@ -48,6 +48,18 @@ "document/root.tex" "document/style.sty" +session Intro (doc) in "Intro" = Pure + + options [document_variants = "intro"] + theories + files + "../iman.sty" + "../extra.sty" + "../ttbox.sty" + "../proof.sty" + "../manual.bib" + "document/build" + "document/root.tex" + session "HOL-IsarRef" (doc) in "IsarRef/Thy" = HOL + options [browser_info = false, document = false, document_dump = document, document_dump_mode = "tex",