src/Doc/Prog_Prove/Bool_nat_list.thy
changeset 69505 cc2d676d5395
parent 68919 027219002f32
child 69597 ff784d5a5bfb
equal deleted inserted replaced
69504:bda7527ccf05 69505:cc2d676d5395
     4 begin
     4 begin
     5 (*>*)
     5 (*>*)
     6 
     6 
     7 text\<open>
     7 text\<open>
     8 \vspace{-4ex}
     8 \vspace{-4ex}
     9 \section{\texorpdfstring{Types @{typ bool}, @{typ nat} and @{text list}}{Types bool, nat and list}}
     9 \section{\texorpdfstring{Types @{typ bool}, @{typ nat} and \<open>list\<close>}{Types bool, nat and list}}
    10 
    10 
    11 These are the most important predefined types. We go through them one by one.
    11 These are the most important predefined types. We go through them one by one.
    12 Based on examples we learn how to define (possibly recursive) functions and
    12 Based on examples we learn how to define (possibly recursive) functions and
    13 prove theorems about them by induction and simplification.
    13 prove theorems about them by induction and simplification.
    14 
    14 
    15 \subsection{Type \indexed{@{typ bool}}{bool}}
    15 \subsection{Type \indexed{@{typ bool}}{bool}}
    16 
    16 
    17 The type of boolean values is a predefined datatype
    17 The type of boolean values is a predefined datatype
    18 @{datatype[display] bool}
    18 @{datatype[display] bool}
    19 with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
    19 with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
    20 with many predefined functions:  @{text "\<not>"}, @{text "\<and>"}, @{text "\<or>"}, @{text
    20 with many predefined functions:  \<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, etc. Here is how conjunction could be defined by pattern matching:
    21 "\<longrightarrow>"}, etc. Here is how conjunction could be defined by pattern matching:
       
    22 \<close>
    21 \<close>
    23 
    22 
    24 fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
    23 fun conj :: "bool \<Rightarrow> bool \<Rightarrow> bool" where
    25 "conj True True = True" |
    24 "conj True True = True" |
    26 "conj _ _ = False"
    25 "conj _ _ = False"
    31 \subsection{Type \indexed{@{typ nat}}{nat}}
    30 \subsection{Type \indexed{@{typ nat}}{nat}}
    32 
    31 
    33 Natural numbers are another predefined datatype:
    32 Natural numbers are another predefined datatype:
    34 @{datatype[display] nat}\index{Suc@@{const Suc}}
    33 @{datatype[display] nat}\index{Suc@@{const Suc}}
    35 All values of type @{typ nat} are generated by the constructors
    34 All values of type @{typ nat} are generated by the constructors
    36 @{text 0} and @{const Suc}. Thus the values of type @{typ nat} are
    35 \<open>0\<close> and @{const Suc}. Thus the values of type @{typ nat} are
    37 @{text 0}, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
    36 \<open>0\<close>, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
    38 There are many predefined functions: @{text "+"}, @{text "*"}, @{text
    37 There are many predefined functions: \<open>+\<close>, \<open>*\<close>, \<open>\<le>\<close>, etc. Here is how you could define your own addition:
    39 "\<le>"}, etc. Here is how you could define your own addition:
       
    40 \<close>
    38 \<close>
    41 
    39 
    42 fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
    40 fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
    43 "add 0 n = n" |
    41 "add 0 n = n" |
    44 "add (Suc m) n = Suc(add m n)"
    42 "add (Suc m) n = Suc(add m n)"
    52 (*<*)
    50 (*<*)
    53 lemma "add m 0 = m"
    51 lemma "add m 0 = m"
    54 apply(induction m)
    52 apply(induction m)
    55 (*>*)
    53 (*>*)
    56 txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
    54 txt\<open>The \isacom{lemma} command starts the proof and gives the lemma
    57 a name, @{text add_02}. Properties of recursively defined functions
    55 a name, \<open>add_02\<close>. Properties of recursively defined functions
    58 need to be established by induction in most cases.
    56 need to be established by induction in most cases.
    59 Command \isacom{apply}@{text"(induction m)"} instructs Isabelle to
    57 Command \isacom{apply}\<open>(induction m)\<close> instructs Isabelle to
    60 start a proof by induction on @{text m}. In response, it will show the
    58 start a proof by induction on \<open>m\<close>. In response, it will show the
    61 following proof state\ifsem\footnote{See page \pageref{proof-state} for how to
    59 following proof state\ifsem\footnote{See page \pageref{proof-state} for how to
    62 display the proof state.}\fi:
    60 display the proof state.}\fi:
    63 @{subgoals[display,indent=0]}
    61 @{subgoals[display,indent=0]}
    64 The numbered lines are known as \emph{subgoals}.
    62 The numbered lines are known as \emph{subgoals}.
    65 The first subgoal is the base case, the second one the induction step.
    63 The first subgoal is the base case, the second one the induction step.
    66 The prefix @{text"\<And>m."} is Isabelle's way of saying ``for an arbitrary but fixed @{text m}''. The @{text"\<Longrightarrow>"} separates assumptions from the conclusion.
    64 The prefix \<open>\<And>m.\<close> is Isabelle's way of saying ``for an arbitrary but fixed \<open>m\<close>''. The \<open>\<Longrightarrow>\<close> separates assumptions from the conclusion.
    67 The command \isacom{apply}@{text"(auto)"} instructs Isabelle to try
    65 The command \isacom{apply}\<open>(auto)\<close> instructs Isabelle to try
    68 and prove all subgoals automatically, essentially by simplifying them.
    66 and prove all subgoals automatically, essentially by simplifying them.
    69 Because both subgoals are easy, Isabelle can do it.
    67 Because both subgoals are easy, Isabelle can do it.
    70 The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
    68 The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
    71 and the induction step is almost as simple:
    69 and the induction step is almost as simple:
    72 @{text"add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m"}
    70 \<open>add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m\<close>
    73 using first the definition of @{const add} and then the induction hypothesis.
    71 using first the definition of @{const add} and then the induction hypothesis.
    74 In summary, both subproofs rely on simplification with function definitions and
    72 In summary, both subproofs rely on simplification with function definitions and
    75 the induction hypothesis.
    73 the induction hypothesis.
    76 As a result of that final \isacom{done}, Isabelle associates the lemma
    74 As a result of that final \isacom{done}, Isabelle associates the lemma
    77 just proved with its name. You can now inspect the lemma with the command
    75 just proved with its name. You can now inspect the lemma with the command
    78 \<close>
    76 \<close>
    79 
    77 
    80 thm add_02
    78 thm add_02
    81 
    79 
    82 txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
    80 txt\<open>which displays @{thm[show_question_marks,display] add_02} The free
    83 variable @{text m} has been replaced by the \concept{unknown}
    81 variable \<open>m\<close> has been replaced by the \concept{unknown}
    84 @{text"?m"}. There is no logical difference between the two but there is an
    82 \<open>?m\<close>. There is no logical difference between the two but there is an
    85 operational one: unknowns can be instantiated, which is what you want after
    83 operational one: unknowns can be instantiated, which is what you want after
    86 some lemma has been proved.
    84 some lemma has been proved.
    87 
    85 
    88 Note that there is also a proof method @{text induct}, which behaves almost
    86 Note that there is also a proof method \<open>induct\<close>, which behaves almost
    89 like @{text induction}; the difference is explained in \autoref{ch:Isar}.
    87 like \<open>induction\<close>; the difference is explained in \autoref{ch:Isar}.
    90 
    88 
    91 \begin{warn}
    89 \begin{warn}
    92 Terminology: We use \concept{lemma}, \concept{theorem} and \concept{rule}
    90 Terminology: We use \concept{lemma}, \concept{theorem} and \concept{rule}
    93 interchangeably for propositions that have been proved.
    91 interchangeably for propositions that have been proved.
    94 \end{warn}
    92 \end{warn}
    95 \begin{warn}
    93 \begin{warn}
    96   Numerals (@{text 0}, @{text 1}, @{text 2}, \dots) and most of the standard
    94   Numerals (\<open>0\<close>, \<open>1\<close>, \<open>2\<close>, \dots) and most of the standard
    97   arithmetic operations (@{text "+"}, @{text "-"}, @{text "*"}, @{text"\<le>"},
    95   arithmetic operations (\<open>+\<close>, \<open>-\<close>, \<open>*\<close>, \<open>\<le>\<close>,
    98   @{text"<"}, etc.) are overloaded: they are available
    96   \<open><\<close>, etc.) are overloaded: they are available
    99   not just for natural numbers but for other types as well.
    97   not just for natural numbers but for other types as well.
   100   For example, given the goal @{text"x + 0 = x"}, there is nothing to indicate
    98   For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
   101   that you are talking about natural numbers. Hence Isabelle can only infer
    99   that you are talking about natural numbers. Hence Isabelle can only infer
   102   that @{term x} is of some arbitrary type where @{text 0} and @{text"+"}
   100   that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
   103   exist. As a consequence, you will be unable to prove the goal.
   101   exist. As a consequence, you will be unable to prove the goal.
   104 %  To alert you to such pitfalls, Isabelle flags numerals without a
   102 %  To alert you to such pitfalls, Isabelle flags numerals without a
   105 %  fixed type in its output: @ {prop"x+0 = x"}.
   103 %  fixed type in its output: @ {prop"x+0 = x"}.
   106   In this particular example, you need to include
   104   In this particular example, you need to include
   107   an explicit type constraint, for example @{text"x+0 = (x::nat)"}. If there
   105   an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
   108   is enough contextual information this may not be necessary: @{prop"Suc x =
   106   is enough contextual information this may not be necessary: @{prop"Suc x =
   109   x"} automatically implies @{text"x::nat"} because @{term Suc} is not
   107   x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
   110   overloaded.
   108   overloaded.
   111 \end{warn}
   109 \end{warn}
   112 
   110 
   113 \subsubsection{An Informal Proof}
   111 \subsubsection{An Informal Proof}
   114 
   112 
   119 
   117 
   120 \noindent
   118 \noindent
   121 \textbf{Lemma} @{prop"add m 0 = m"}
   119 \textbf{Lemma} @{prop"add m 0 = m"}
   122 
   120 
   123 \noindent
   121 \noindent
   124 \textbf{Proof} by induction on @{text m}.
   122 \textbf{Proof} by induction on \<open>m\<close>.
   125 \begin{itemize}
   123 \begin{itemize}
   126 \item Case @{text 0} (the base case): @{prop"add 0 0 = 0"}
   124 \item Case \<open>0\<close> (the base case): @{prop"add 0 0 = 0"}
   127   holds by definition of @{const add}.
   125   holds by definition of @{const add}.
   128 \item Case @{term"Suc m"} (the induction step):
   126 \item Case @{term"Suc m"} (the induction step):
   129   We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
   127   We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
   130   and we need to show @{text"add (Suc m) 0 = Suc m"}.
   128   and we need to show \<open>add (Suc m) 0 = Suc m\<close>.
   131   The proof is as follows:\smallskip
   129   The proof is as follows:\smallskip
   132 
   130 
   133   \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
   131   \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
   134   @{term "add (Suc m) 0"} &@{text"="}& @{term"Suc(add m 0)"}
   132   @{term "add (Suc m) 0"} &\<open>=\<close>& @{term"Suc(add m 0)"}
   135   & by definition of @{text add}\\
   133   & by definition of \<open>add\<close>\\
   136               &@{text"="}& @{term "Suc m"} & by IH
   134               &\<open>=\<close>& @{term "Suc m"} & by IH
   137   \end{tabular}
   135   \end{tabular}
   138 \end{itemize}
   136 \end{itemize}
   139 Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
   137 Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
   140 
   138 
   141 We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
   139 We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
   147 as informal text comprehensible to a reader familiar with traditional
   145 as informal text comprehensible to a reader familiar with traditional
   148 mathematical proofs. Later on we will introduce an Isabelle proof language
   146 mathematical proofs. Later on we will introduce an Isabelle proof language
   149 that is closer to traditional informal mathematical language and is often
   147 that is closer to traditional informal mathematical language and is often
   150 directly readable.
   148 directly readable.
   151 
   149 
   152 \subsection{Type \indexed{@{text list}}{list}}
   150 \subsection{Type \indexed{\<open>list\<close>}{list}}
   153 
   151 
   154 Although lists are already predefined, we define our own copy for
   152 Although lists are already predefined, we define our own copy for
   155 demonstration purposes:
   153 demonstration purposes:
   156 \<close>
   154 \<close>
   157 (*<*)
   155 (*<*)
   183 
   181 
   184 fun rev :: "'a list \<Rightarrow> 'a list" where
   182 fun rev :: "'a list \<Rightarrow> 'a list" where
   185 "rev Nil = Nil" |
   183 "rev Nil = Nil" |
   186 "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
   184 "rev (Cons x xs) = app (rev xs) (Cons x Nil)"
   187 
   185 
   188 text\<open>By default, variables @{text xs}, @{text ys} and @{text zs} are of
   186 text\<open>By default, variables \<open>xs\<close>, \<open>ys\<close> and \<open>zs\<close> are of
   189 @{text list} type.
   187 \<open>list\<close> type.
   190 
   188 
   191 Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
   189 Command \indexed{\isacommand{value}}{value} evaluates a term. For example,\<close>
   192 
   190 
   193 value "rev(Cons True (Cons False Nil))"
   191 value "rev(Cons True (Cons False Nil))"
   194 
   192 
   198 
   196 
   199 text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
   197 text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
   200 \medskip
   198 \medskip
   201 
   199 
   202 Figure~\ref{fig:MyList} shows the theory created so far.
   200 Figure~\ref{fig:MyList} shows the theory created so far.
   203 Because @{text list}, @{const Nil}, @{const Cons}, etc.\ are already predefined,
   201 Because \<open>list\<close>, @{const Nil}, @{const Cons}, etc.\ are already predefined,
   204  Isabelle prints qualified (long) names when executing this theory, for example, @{text MyList.Nil}
   202  Isabelle prints qualified (long) names when executing this theory, for example, \<open>MyList.Nil\<close>
   205  instead of @{const Nil}.
   203  instead of @{const Nil}.
   206  To suppress the qualified names you can insert the command
   204  To suppress the qualified names you can insert the command
   207  \texttt{declare [[names\_short]]}.
   205  \texttt{declare [[names\_short]]}.
   208  This is not recommended in general but is convenient for this unusual example.
   206  This is not recommended in general but is convenient for this unusual example.
   209 % Notice where the
   207 % Notice where the
   223 \subsubsection{Structural Induction for Lists}
   221 \subsubsection{Structural Induction for Lists}
   224 
   222 
   225 Just as for natural numbers, there is a proof principle of induction for
   223 Just as for natural numbers, there is a proof principle of induction for
   226 lists. Induction over a list is essentially induction over the length of
   224 lists. Induction over a list is essentially induction over the length of
   227 the list, although the length remains implicit. To prove that some property
   225 the list, although the length remains implicit. To prove that some property
   228 @{text P} holds for all lists @{text xs}, i.e., \mbox{@{prop"P(xs)"}},
   226 \<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{@{prop"P(xs)"}},
   229 you need to prove
   227 you need to prove
   230 \begin{enumerate}
   228 \begin{enumerate}
   231 \item the base case @{prop"P(Nil)"} and
   229 \item the base case @{prop"P(Nil)"} and
   232 \item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed @{text x} and @{text xs}.
   230 \item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
   233 \end{enumerate}
   231 \end{enumerate}
   234 This is often called \concept{structural induction} for lists.
   232 This is often called \concept{structural induction} for lists.
   235 
   233 
   236 \subsection{The Proof Process}
   234 \subsection{The Proof Process}
   237 
   235 
   242 
   240 
   243 theorem rev_rev [simp]: "rev(rev xs) = xs"
   241 theorem rev_rev [simp]: "rev(rev xs) = xs"
   244 
   242 
   245 txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
   243 txt\<open>Commands \isacom{theorem} and \isacom{lemma} are
   246 interchangeable and merely indicate the importance we attach to a
   244 interchangeable and merely indicate the importance we attach to a
   247 proposition. Via the bracketed attribute @{text simp} we also tell Isabelle
   245 proposition. Via the bracketed attribute \<open>simp\<close> we also tell Isabelle
   248 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
   246 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
   249 involving simplification will replace occurrences of @{term"rev(rev xs)"} by
   247 involving simplification will replace occurrences of @{term"rev(rev xs)"} by
   250 @{term"xs"}. The proof is by induction:\<close>
   248 @{term"xs"}. The proof is by induction:\<close>
   251 
   249 
   252 apply(induction xs)
   250 apply(induction xs)
   271 (*<*)
   269 (*<*)
   272 oops
   270 oops
   273 (*>*)
   271 (*>*)
   274 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
   272 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
   275 
   273 
   276 txt\<open>There are two variables that we could induct on: @{text xs} and
   274 txt\<open>There are two variables that we could induct on: \<open>xs\<close> and
   277 @{text ys}. Because @{const app} is defined by recursion on
   275 \<open>ys\<close>. Because @{const app} is defined by recursion on
   278 the first argument, @{text xs} is the correct one:
   276 the first argument, \<open>xs\<close> is the correct one:
   279 \<close>
   277 \<close>
   280 
   278 
   281 apply(induction xs)
   279 apply(induction xs)
   282 
   280 
   283 txt\<open>This time not even the base case is solved automatically:\<close>
   281 txt\<open>This time not even the base case is solved automatically:\<close>
   308 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
   306 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
   309 apply(induction xs)
   307 apply(induction xs)
   310 apply(auto)
   308 apply(auto)
   311 
   309 
   312 txt\<open>
   310 txt\<open>
   313 We find that this time @{text"auto"} solves the base case, but the
   311 We find that this time \<open>auto\<close> solves the base case, but the
   314 induction step merely simplifies to
   312 induction step merely simplifies to
   315 @{subgoals[display,indent=0,goals_limit=1]}
   313 @{subgoals[display,indent=0,goals_limit=1]}
   316 The missing lemma is associativity of @{const app},
   314 The missing lemma is associativity of @{const app},
   317 which we insert in front of the failed lemma @{text rev_app}.
   315 which we insert in front of the failed lemma \<open>rev_app\<close>.
   318 
   316 
   319 \subsubsection{Associativity of @{const app}}
   317 \subsubsection{Associativity of @{const app}}
   320 
   318 
   321 The canonical proof procedure succeeds without further ado:
   319 The canonical proof procedure succeeds without further ado:
   322 \<close>
   320 \<close>
   348 
   346 
   349 \noindent
   347 \noindent
   350 \textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
   348 \textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
   351 
   349 
   352 \noindent
   350 \noindent
   353 \textbf{Proof} by induction on @{text xs}.
   351 \textbf{Proof} by induction on \<open>xs\<close>.
   354 \begin{itemize}
   352 \begin{itemize}
   355 \item Case @{text Nil}: \ @{prop"app (app Nil ys) zs = app ys zs"} @{text"="}
   353 \item Case \<open>Nil\<close>: \ @{prop"app (app Nil ys) zs = app ys zs"} \<open>=\<close>
   356   \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of @{text app}.
   354   \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of \<open>app\<close>.
   357 \item Case @{text"Cons x xs"}: We assume
   355 \item Case \<open>Cons x xs\<close>: We assume
   358   \begin{center} \hfill @{term"app (app xs ys) zs"} @{text"="}
   356   \begin{center} \hfill @{term"app (app xs ys) zs"} \<open>=\<close>
   359   @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
   357   @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
   360   and we need to show
   358   and we need to show
   361   \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
   359   \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
   362   The proof is as follows:\smallskip
   360   The proof is as follows:\smallskip
   363 
   361 
   364   \begin{tabular}{@ {}l@ {\quad}l@ {}}
   362   \begin{tabular}{@ {}l@ {\quad}l@ {}}
   365   @{term"app (app (Cons x xs) ys) zs"}\\
   363   @{term"app (app (Cons x xs) ys) zs"}\\
   366   @{text"= app (Cons x (app xs ys)) zs"} & by definition of @{text app}\\
   364   \<open>= app (Cons x (app xs ys)) zs\<close> & by definition of \<open>app\<close>\\
   367   @{text"= Cons x (app (app xs ys) zs)"} & by definition of @{text app}\\
   365   \<open>= Cons x (app (app xs ys) zs)\<close> & by definition of \<open>app\<close>\\
   368   @{text"= Cons x (app xs (app ys zs))"} & by IH\\
   366   \<open>= Cons x (app xs (app ys zs))\<close> & by IH\\
   369   @{text"= app (Cons x xs) (app ys zs)"} & by definition of @{text app}
   367   \<open>= app (Cons x xs) (app ys zs)\<close> & by definition of \<open>app\<close>
   370   \end{tabular}
   368   \end{tabular}
   371 \end{itemize}
   369 \end{itemize}
   372 \medskip
   370 \medskip
   373 
   371 
   374 \noindent Didn't we say earlier that all proofs are by simplification? But
   372 \noindent Didn't we say earlier that all proofs are by simplification? But
   375 in both cases, going from left to right, the last equality step is not a
   373 in both cases, going from left to right, the last equality step is not a
   376 simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
   374 simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
   377 ys zs)"}. It appears almost mysterious because we suddenly complicate the
   375 ys zs)"}. It appears almost mysterious because we suddenly complicate the
   378 term by appending @{text Nil} on the left. What is really going on is this:
   376 term by appending \<open>Nil\<close> on the left. What is really going on is this:
   379 when proving some equality \mbox{@{prop"s = t"}}, both @{text s} and @{text t} are
   377 when proving some equality \mbox{@{prop"s = t"}}, both \<open>s\<close> and \<open>t\<close> are
   380 simplified until they ``meet in the middle''. This heuristic for equality proofs
   378 simplified until they ``meet in the middle''. This heuristic for equality proofs
   381 works well for a functional programming context like ours. In the base case
   379 works well for a functional programming context like ours. In the base case
   382 both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
   380 both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
   383 ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
   381 ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
   384 
   382 
   386 \label{sec:predeflists}
   384 \label{sec:predeflists}
   387 
   385 
   388 Isabelle's predefined lists are the same as the ones above, but with
   386 Isabelle's predefined lists are the same as the ones above, but with
   389 more syntactic sugar:
   387 more syntactic sugar:
   390 \begin{itemize}
   388 \begin{itemize}
   391 \item @{text "[]"} is \indexed{@{const Nil}}{Nil},
   389 \item \<open>[]\<close> is \indexed{@{const Nil}}{Nil},
   392 \item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
   390 \item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
   393 \item @{text"[x\<^sub>1, \<dots>, x\<^sub>n]"} is @{text"x\<^sub>1 # \<dots> # x\<^sub>n # []"}, and
   391 \item \<open>[x\<^sub>1, \<dots>, x\<^sub>n]\<close> is \<open>x\<^sub>1 # \<dots> # x\<^sub>n # []\<close>, and
   394 \item @{term "xs @ ys"} is @{term"app xs ys"}.
   392 \item @{term "xs @ ys"} is @{term"app xs ys"}.
   395 \end{itemize}
   393 \end{itemize}
   396 There is also a large library of predefined functions.
   394 There is also a large library of predefined functions.
   397 The most important ones are the length function
   395 The most important ones are the length function
   398 @{text"length :: 'a list \<Rightarrow> nat"}\index{length@@{const length}} (with the obvious definition),
   396 \<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@@{const length}} (with the obvious definition),
   399 and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
   397 and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
   400 \begin{isabelle}
   398 \begin{isabelle}
   401 \isacom{fun} @{const map} @{text"::"} @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
   399 \isacom{fun} @{const map} \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
   402 @{text"\""}@{thm list.map(1) [of f]}@{text"\" |"}\\
   400 \<open>"\<close>@{thm list.map(1) [of f]}\<open>" |\<close>\\
   403 @{text"\""}@{thm list.map(2) [of f x xs]}@{text"\""}
   401 \<open>"\<close>@{thm list.map(2) [of f x xs]}\<open>"\<close>
   404 \end{isabelle}
   402 \end{isabelle}
   405 
   403 
   406 \ifsem
   404 \ifsem
   407 Also useful are the \concept{head} of a list, its first element,
   405 Also useful are the \concept{head} of a list, its first element,
   408 and the \concept{tail}, the rest of the list:
   406 and the \concept{tail}, the rest of the list:
   409 \begin{isabelle}\index{hd@@{const hd}}
   407 \begin{isabelle}\index{hd@@{const hd}}
   410 \isacom{fun} @{text"hd :: 'a list \<Rightarrow> 'a"}\\
   408 \isacom{fun} \<open>hd :: 'a list \<Rightarrow> 'a\<close>\\
   411 @{prop"hd(x#xs) = x"}
   409 @{prop"hd(x#xs) = x"}
   412 \end{isabelle}
   410 \end{isabelle}
   413 \begin{isabelle}\index{tl@@{const tl}}
   411 \begin{isabelle}\index{tl@@{const tl}}
   414 \isacom{fun} @{text"tl :: 'a list \<Rightarrow> 'a list"}\\
   412 \isacom{fun} \<open>tl :: 'a list \<Rightarrow> 'a list\<close>\\
   415 @{prop"tl [] = []"} @{text"|"}\\
   413 @{prop"tl [] = []"} \<open>|\<close>\\
   416 @{prop"tl(x#xs) = xs"}
   414 @{prop"tl(x#xs) = xs"}
   417 \end{isabelle}
   415 \end{isabelle}
   418 Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
   416 Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
   419 but we do not know what the result is. That is, @{term"hd []"} is not undefined
   417 but we do not know what the result is. That is, @{term"hd []"} is not undefined
   420 but underdefined.
   418 but underdefined.
   475 \end{exercise}
   473 \end{exercise}
   476 
   474 
   477 \begin{exercise}
   475 \begin{exercise}
   478 Start from the definition of @{const add} given above.
   476 Start from the definition of @{const add} given above.
   479 Prove that @{const add} is associative and commutative.
   477 Prove that @{const add} is associative and commutative.
   480 Define a recursive function @{text double} @{text"::"} @{typ"nat \<Rightarrow> nat"}
   478 Define a recursive function \<open>double\<close> \<open>::\<close> @{typ"nat \<Rightarrow> nat"}
   481 and prove @{prop"double m = add m m"}.
   479 and prove @{prop"double m = add m m"}.
   482 \end{exercise}
   480 \end{exercise}
   483 
   481 
   484 \begin{exercise}
   482 \begin{exercise}
   485 Define a function @{text"count ::"} @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
   483 Define a function \<open>count ::\<close> @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
   486 that counts the number of occurrences of an element in a list. Prove
   484 that counts the number of occurrences of an element in a list. Prove
   487 @{prop"count x xs \<le> length xs"}.
   485 @{prop"count x xs \<le> length xs"}.
   488 \end{exercise}
   486 \end{exercise}
   489 
   487 
   490 \begin{exercise}
   488 \begin{exercise}
   491 Define a recursive function @{text "snoc ::"} @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
   489 Define a recursive function \<open>snoc ::\<close> @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
   492 that appends an element to the end of a list. With the help of @{text snoc}
   490 that appends an element to the end of a list. With the help of \<open>snoc\<close>
   493 define a recursive function @{text "reverse ::"} @{typ"'a list \<Rightarrow> 'a list"}
   491 define a recursive function \<open>reverse ::\<close> @{typ"'a list \<Rightarrow> 'a list"}
   494 that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
   492 that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
   495 \end{exercise}
   493 \end{exercise}
   496 
   494 
   497 \begin{exercise}
   495 \begin{exercise}
   498 Define a recursive function @{text "sum_upto ::"} @{typ"nat \<Rightarrow> nat"} such that
   496 Define a recursive function \<open>sum_upto ::\<close> @{typ"nat \<Rightarrow> nat"} such that
   499 \mbox{@{text"sum_upto n"}} @{text"="} @{text"0 + ... + n"} and prove
   497 \mbox{\<open>sum_upto n\<close>} \<open>=\<close> \<open>0 + ... + n\<close> and prove
   500 @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
   498 @{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
   501 \end{exercise}
   499 \end{exercise}
   502 \<close>
   500 \<close>
   503 (*<*)
   501 (*<*)
   504 end
   502 end