src/Doc/Prog_Prove/Bool_nat_list.thy
changeset 69597 ff784d5a5bfb
parent 69505 cc2d676d5395
child 72656 a17c17ab931c
--- a/src/Doc/Prog_Prove/Bool_nat_list.thy	Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Prog_Prove/Bool_nat_list.thy	Sat Jan 05 17:24:33 2019 +0100
@@ -6,17 +6,17 @@
 
 text\<open>
 \vspace{-4ex}
-\section{\texorpdfstring{Types @{typ bool}, @{typ nat} and \<open>list\<close>}{Types bool, nat and list}}
+\section{\texorpdfstring{Types \<^typ>\<open>bool\<close>, \<^typ>\<open>nat\<close> and \<open>list\<close>}{Types bool, nat and list}}
 
 These are the most important predefined types. We go through them one by one.
 Based on examples we learn how to define (possibly recursive) functions and
 prove theorems about them by induction and simplification.
 
-\subsection{Type \indexed{@{typ bool}}{bool}}
+\subsection{Type \indexed{\<^typ>\<open>bool\<close>}{bool}}
 
 The type of boolean values is a predefined datatype
 @{datatype[display] bool}
-with the two values \indexed{@{const True}}{True} and \indexed{@{const False}}{False} and
+with the two values \indexed{\<^const>\<open>True\<close>}{True} and \indexed{\<^const>\<open>False\<close>}{False} and
 with many predefined functions:  \<open>\<not>\<close>, \<open>\<and>\<close>, \<open>\<or>\<close>, \<open>\<longrightarrow>\<close>, etc. Here is how conjunction could be defined by pattern matching:
 \<close>
 
@@ -27,13 +27,13 @@
 text\<open>Both the datatype and function definitions roughly follow the syntax
 of functional programming languages.
 
-\subsection{Type \indexed{@{typ nat}}{nat}}
+\subsection{Type \indexed{\<^typ>\<open>nat\<close>}{nat}}
 
 Natural numbers are another predefined datatype:
-@{datatype[display] nat}\index{Suc@@{const Suc}}
-All values of type @{typ nat} are generated by the constructors
-\<open>0\<close> and @{const Suc}. Thus the values of type @{typ nat} are
-\<open>0\<close>, @{term"Suc 0"}, @{term"Suc(Suc 0)"}, etc.
+@{datatype[display] nat}\index{Suc@\<^const>\<open>Suc\<close>}
+All values of type \<^typ>\<open>nat\<close> are generated by the constructors
+\<open>0\<close> and \<^const>\<open>Suc\<close>. Thus the values of type \<^typ>\<open>nat\<close> are
+\<open>0\<close>, \<^term>\<open>Suc 0\<close>, \<^term>\<open>Suc(Suc 0)\<close>, etc.
 There are many predefined functions: \<open>+\<close>, \<open>*\<close>, \<open>\<le>\<close>, etc. Here is how you could define your own addition:
 \<close>
 
@@ -41,7 +41,7 @@
 "add 0 n = n" |
 "add (Suc m) n = Suc(add m n)"
 
-text\<open>And here is a proof of the fact that @{prop"add m 0 = m"}:\<close>
+text\<open>And here is a proof of the fact that \<^prop>\<open>add m 0 = m\<close>:\<close>
 
 lemma add_02: "add m 0 = m"
 apply(induction m)
@@ -65,10 +65,10 @@
 The command \isacom{apply}\<open>(auto)\<close> instructs Isabelle to try
 and prove all subgoals automatically, essentially by simplifying them.
 Because both subgoals are easy, Isabelle can do it.
-The base case @{prop"add 0 0 = 0"} holds by definition of @{const add},
+The base case \<^prop>\<open>add 0 0 = 0\<close> holds by definition of \<^const>\<open>add\<close>,
 and the induction step is almost as simple:
 \<open>add\<^latex>\<open>~\<close>(Suc m) 0 = Suc(add m 0) = Suc m\<close>
-using first the definition of @{const add} and then the induction hypothesis.
+using first the definition of \<^const>\<open>add\<close> and then the induction hypothesis.
 In summary, both subproofs rely on simplification with function definitions and
 the induction hypothesis.
 As a result of that final \isacom{done}, Isabelle associates the lemma
@@ -97,46 +97,46 @@
   not just for natural numbers but for other types as well.
   For example, given the goal \<open>x + 0 = x\<close>, there is nothing to indicate
   that you are talking about natural numbers. Hence Isabelle can only infer
-  that @{term x} is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
+  that \<^term>\<open>x\<close> is of some arbitrary type where \<open>0\<close> and \<open>+\<close>
   exist. As a consequence, you will be unable to prove the goal.
 %  To alert you to such pitfalls, Isabelle flags numerals without a
 %  fixed type in its output: @ {prop"x+0 = x"}.
   In this particular example, you need to include
   an explicit type constraint, for example \<open>x+0 = (x::nat)\<close>. If there
-  is enough contextual information this may not be necessary: @{prop"Suc x =
-  x"} automatically implies \<open>x::nat\<close> because @{term Suc} is not
+  is enough contextual information this may not be necessary: \<^prop>\<open>Suc x =
+  x\<close> automatically implies \<open>x::nat\<close> because \<^term>\<open>Suc\<close> is not
   overloaded.
 \end{warn}
 
 \subsubsection{An Informal Proof}
 
 Above we gave some terse informal explanation of the proof of
-@{prop"add m 0 = m"}. A more detailed informal exposition of the lemma
+\<^prop>\<open>add m 0 = m\<close>. A more detailed informal exposition of the lemma
 might look like this:
 \bigskip
 
 \noindent
-\textbf{Lemma} @{prop"add m 0 = m"}
+\textbf{Lemma} \<^prop>\<open>add m 0 = m\<close>
 
 \noindent
 \textbf{Proof} by induction on \<open>m\<close>.
 \begin{itemize}
-\item Case \<open>0\<close> (the base case): @{prop"add 0 0 = 0"}
-  holds by definition of @{const add}.
-\item Case @{term"Suc m"} (the induction step):
-  We assume @{prop"add m 0 = m"}, the induction hypothesis (IH),
+\item Case \<open>0\<close> (the base case): \<^prop>\<open>add 0 0 = 0\<close>
+  holds by definition of \<^const>\<open>add\<close>.
+\item Case \<^term>\<open>Suc m\<close> (the induction step):
+  We assume \<^prop>\<open>add m 0 = m\<close>, the induction hypothesis (IH),
   and we need to show \<open>add (Suc m) 0 = Suc m\<close>.
   The proof is as follows:\smallskip
 
   \begin{tabular}{@ {}rcl@ {\quad}l@ {}}
-  @{term "add (Suc m) 0"} &\<open>=\<close>& @{term"Suc(add m 0)"}
+  \<^term>\<open>add (Suc m) 0\<close> &\<open>=\<close>& \<^term>\<open>Suc(add m 0)\<close>
   & by definition of \<open>add\<close>\\
-              &\<open>=\<close>& @{term "Suc m"} & by IH
+              &\<open>=\<close>& \<^term>\<open>Suc m\<close> & by IH
   \end{tabular}
 \end{itemize}
 Throughout this book, \concept{IH} will stand for ``induction hypothesis''.
 
-We have now seen three proofs of @{prop"add m 0 = 0"}: the Isabelle one, the
+We have now seen three proofs of \<^prop>\<open>add m 0 = 0\<close>: the Isabelle one, the
 terse four lines explaining the base case and the induction step, and just now a
 model of a traditional inductive proof. The three proofs differ in the level
 of detail given and the intended reader: the Isabelle proof is for the
@@ -164,14 +164,14 @@
 
 text\<open>
 \begin{itemize}
-\item Type @{typ "'a list"} is the type of lists over elements of type @{typ 'a}. Because @{typ 'a} is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
-\item Lists have two constructors: @{const Nil}, the empty list, and @{const Cons}, which puts an element (of type @{typ 'a}) in front of a list (of type @{typ "'a list"}).
-Hence all lists are of the form @{const Nil}, or @{term"Cons x Nil"},
-or @{term"Cons x (Cons y Nil)"}, etc.
+\item Type \<^typ>\<open>'a list\<close> is the type of lists over elements of type \<^typ>\<open>'a\<close>. Because \<^typ>\<open>'a\<close> is a type variable, lists are in fact \concept{polymorphic}: the elements of a list can be of arbitrary type (but must all be of the same type).
+\item Lists have two constructors: \<^const>\<open>Nil\<close>, the empty list, and \<^const>\<open>Cons\<close>, which puts an element (of type \<^typ>\<open>'a\<close>) in front of a list (of type \<^typ>\<open>'a list\<close>).
+Hence all lists are of the form \<^const>\<open>Nil\<close>, or \<^term>\<open>Cons x Nil\<close>,
+or \<^term>\<open>Cons x (Cons y Nil)\<close>, etc.
 \item \isacom{datatype} requires no quotation marks on the
 left-hand side, but on the right-hand side each of the argument
 types of a constructor needs to be enclosed in quotation marks, unless
-it is just an identifier (e.g., @{typ nat} or @{typ 'a}).
+it is just an identifier (e.g., \<^typ>\<open>nat\<close> or \<^typ>\<open>'a\<close>).
 \end{itemize}
 We also define two standard functions, append and reverse:\<close>
 
@@ -190,17 +190,17 @@
 
 value "rev(Cons True (Cons False Nil))"
 
-text\<open>yields the result @{value "rev(Cons True (Cons False Nil))"}. This works symbolically, too:\<close>
+text\<open>yields the result \<^value>\<open>rev(Cons True (Cons False Nil))\<close>. This works symbolically, too:\<close>
 
 value "rev(Cons a (Cons b Nil))"
 
-text\<open>yields @{value "rev(Cons a (Cons b Nil))"}.
+text\<open>yields \<^value>\<open>rev(Cons a (Cons b Nil))\<close>.
 \medskip
 
 Figure~\ref{fig:MyList} shows the theory created so far.
-Because \<open>list\<close>, @{const Nil}, @{const Cons}, etc.\ are already predefined,
+Because \<open>list\<close>, \<^const>\<open>Nil\<close>, \<^const>\<open>Cons\<close>, etc.\ are already predefined,
  Isabelle prints qualified (long) names when executing this theory, for example, \<open>MyList.Nil\<close>
- instead of @{const Nil}.
+ instead of \<^const>\<open>Nil\<close>.
  To suppress the qualified names you can insert the command
  \texttt{declare [[names\_short]]}.
  This is not recommended in general but is convenient for this unusual example.
@@ -223,11 +223,11 @@
 Just as for natural numbers, there is a proof principle of induction for
 lists. Induction over a list is essentially induction over the length of
 the list, although the length remains implicit. To prove that some property
-\<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{@{prop"P(xs)"}},
+\<open>P\<close> holds for all lists \<open>xs\<close>, i.e., \mbox{\<^prop>\<open>P(xs)\<close>},
 you need to prove
 \begin{enumerate}
-\item the base case @{prop"P(Nil)"} and
-\item the inductive case @{prop"P(Cons x xs)"} under the assumption @{prop"P(xs)"}, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
+\item the base case \<^prop>\<open>P(Nil)\<close> and
+\item the inductive case \<^prop>\<open>P(Cons x xs)\<close> under the assumption \<^prop>\<open>P(xs)\<close>, for some arbitrary but fixed \<open>x\<close> and \<open>xs\<close>.
 \end{enumerate}
 This is often called \concept{structural induction} for lists.
 
@@ -244,13 +244,13 @@
 interchangeable and merely indicate the importance we attach to a
 proposition. Via the bracketed attribute \<open>simp\<close> we also tell Isabelle
 to make the eventual theorem a \conceptnoidx{simplification rule}: future proofs
-involving simplification will replace occurrences of @{term"rev(rev xs)"} by
-@{term"xs"}. The proof is by induction:\<close>
+involving simplification will replace occurrences of \<^term>\<open>rev(rev xs)\<close> by
+\<^term>\<open>xs\<close>. The proof is by induction:\<close>
 
 apply(induction xs)
 
 txt\<open>
-As explained above, we obtain two subgoals, namely the base case (@{const Nil}) and the induction step (@{const Cons}):
+As explained above, we obtain two subgoals, namely the base case (\<^const>\<open>Nil\<close>) and the induction step (\<^const>\<open>Cons\<close>):
 @{subgoals[display,indent=0,margin=65]}
 Let us try to solve both goals automatically:
 \<close>
@@ -272,7 +272,7 @@
 lemma rev_app [simp]: "rev(app xs ys) = app (rev ys) (rev xs)"
 
 txt\<open>There are two variables that we could induct on: \<open>xs\<close> and
-\<open>ys\<close>. Because @{const app} is defined by recursion on
+\<open>ys\<close>. Because \<^const>\<open>app\<close> is defined by recursion on
 the first argument, \<open>xs\<close> is the correct one:
 \<close>
 
@@ -311,10 +311,10 @@
 We find that this time \<open>auto\<close> solves the base case, but the
 induction step merely simplifies to
 @{subgoals[display,indent=0,goals_limit=1]}
-The missing lemma is associativity of @{const app},
+The missing lemma is associativity of \<^const>\<open>app\<close>,
 which we insert in front of the failed lemma \<open>rev_app\<close>.
 
-\subsubsection{Associativity of @{const app}}
+\subsubsection{Associativity of \<^const>\<open>app\<close>}
 
 The canonical proof procedure succeeds without further ado:
 \<close>
@@ -340,27 +340,27 @@
 
 \subsubsection{Another Informal Proof}
 
-Here is the informal proof of associativity of @{const app}
+Here is the informal proof of associativity of \<^const>\<open>app\<close>
 corresponding to the Isabelle proof above.
 \bigskip
 
 \noindent
-\textbf{Lemma} @{prop"app (app xs ys) zs = app xs (app ys zs)"}
+\textbf{Lemma} \<^prop>\<open>app (app xs ys) zs = app xs (app ys zs)\<close>
 
 \noindent
 \textbf{Proof} by induction on \<open>xs\<close>.
 \begin{itemize}
-\item Case \<open>Nil\<close>: \ @{prop"app (app Nil ys) zs = app ys zs"} \<open>=\<close>
-  \mbox{@{term"app Nil (app ys zs)"}} \ holds by definition of \<open>app\<close>.
+\item Case \<open>Nil\<close>: \ \<^prop>\<open>app (app Nil ys) zs = app ys zs\<close> \<open>=\<close>
+  \mbox{\<^term>\<open>app Nil (app ys zs)\<close>} \ holds by definition of \<open>app\<close>.
 \item Case \<open>Cons x xs\<close>: We assume
-  \begin{center} \hfill @{term"app (app xs ys) zs"} \<open>=\<close>
-  @{term"app xs (app ys zs)"} \hfill (IH) \end{center}
+  \begin{center} \hfill \<^term>\<open>app (app xs ys) zs\<close> \<open>=\<close>
+  \<^term>\<open>app xs (app ys zs)\<close> \hfill (IH) \end{center}
   and we need to show
-  \begin{center} @{prop"app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)"}.\end{center}
+  \begin{center} \<^prop>\<open>app (app (Cons x xs) ys) zs = app (Cons x xs) (app ys zs)\<close>.\end{center}
   The proof is as follows:\smallskip
 
   \begin{tabular}{@ {}l@ {\quad}l@ {}}
-  @{term"app (app (Cons x xs) ys) zs"}\\
+  \<^term>\<open>app (app (Cons x xs) ys) zs\<close>\\
   \<open>= app (Cons x (app xs ys)) zs\<close> & by definition of \<open>app\<close>\\
   \<open>= Cons x (app (app xs ys) zs)\<close> & by definition of \<open>app\<close>\\
   \<open>= Cons x (app xs (app ys zs))\<close> & by IH\\
@@ -371,14 +371,14 @@
 
 \noindent Didn't we say earlier that all proofs are by simplification? But
 in both cases, going from left to right, the last equality step is not a
-simplification at all! In the base case it is @{prop"app ys zs = app Nil (app
-ys zs)"}. It appears almost mysterious because we suddenly complicate the
+simplification at all! In the base case it is \<^prop>\<open>app ys zs = app Nil (app
+ys zs)\<close>. It appears almost mysterious because we suddenly complicate the
 term by appending \<open>Nil\<close> on the left. What is really going on is this:
-when proving some equality \mbox{@{prop"s = t"}}, both \<open>s\<close> and \<open>t\<close> are
+when proving some equality \mbox{\<^prop>\<open>s = t\<close>}, both \<open>s\<close> and \<open>t\<close> are
 simplified until they ``meet in the middle''. This heuristic for equality proofs
 works well for a functional programming context like ours. In the base case
-both @{term"app (app Nil ys) zs"} and @{term"app Nil (app
-ys zs)"} are simplified to @{term"app ys zs"}, the term in the middle.
+both \<^term>\<open>app (app Nil ys) zs\<close> and \<^term>\<open>app Nil (app
+ys zs)\<close> are simplified to \<^term>\<open>app ys zs\<close>, the term in the middle.
 
 \subsection{Predefined Lists}
 \label{sec:predeflists}
@@ -386,17 +386,17 @@
 Isabelle's predefined lists are the same as the ones above, but with
 more syntactic sugar:
 \begin{itemize}
-\item \<open>[]\<close> is \indexed{@{const Nil}}{Nil},
-\item @{term"x # xs"} is @{term"Cons x xs"}\index{Cons@@{const Cons}},
+\item \<open>[]\<close> is \indexed{\<^const>\<open>Nil\<close>}{Nil},
+\item \<^term>\<open>x # xs\<close> is \<^term>\<open>Cons x xs\<close>\index{Cons@\<^const>\<open>Cons\<close>},
 \item \<open>[x\<^sub>1, \<dots>, x\<^sub>n]\<close> is \<open>x\<^sub>1 # \<dots> # x\<^sub>n # []\<close>, and
-\item @{term "xs @ ys"} is @{term"app xs ys"}.
+\item \<^term>\<open>xs @ ys\<close> is \<^term>\<open>app xs ys\<close>.
 \end{itemize}
 There is also a large library of predefined functions.
 The most important ones are the length function
-\<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@@{const length}} (with the obvious definition),
-and the \indexed{@{const map}}{map} function that applies a function to all elements of a list:
+\<open>length :: 'a list \<Rightarrow> nat\<close>\index{length@\<^const>\<open>length\<close>} (with the obvious definition),
+and the \indexed{\<^const>\<open>map\<close>}{map} function that applies a function to all elements of a list:
 \begin{isabelle}
-\isacom{fun} @{const map} \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
+\isacom{fun} \<^const>\<open>map\<close> \<open>::\<close> @{typ[source] "('a \<Rightarrow> 'b) \<Rightarrow> 'a list \<Rightarrow> 'b list"} \isacom{where}\\
 \<open>"\<close>@{thm list.map(1) [of f]}\<open>" |\<close>\\
 \<open>"\<close>@{thm list.map(2) [of f x xs]}\<open>"\<close>
 \end{isabelle}
@@ -404,17 +404,17 @@
 \ifsem
 Also useful are the \concept{head} of a list, its first element,
 and the \concept{tail}, the rest of the list:
-\begin{isabelle}\index{hd@@{const hd}}
+\begin{isabelle}\index{hd@\<^const>\<open>hd\<close>}
 \isacom{fun} \<open>hd :: 'a list \<Rightarrow> 'a\<close>\\
-@{prop"hd(x#xs) = x"}
+\<^prop>\<open>hd(x#xs) = x\<close>
 \end{isabelle}
-\begin{isabelle}\index{tl@@{const tl}}
+\begin{isabelle}\index{tl@\<^const>\<open>tl\<close>}
 \isacom{fun} \<open>tl :: 'a list \<Rightarrow> 'a list\<close>\\
-@{prop"tl [] = []"} \<open>|\<close>\\
-@{prop"tl(x#xs) = xs"}
+\<^prop>\<open>tl [] = []\<close> \<open>|\<close>\\
+\<^prop>\<open>tl(x#xs) = xs\<close>
 \end{isabelle}
-Note that since HOL is a logic of total functions, @{term"hd []"} is defined,
-but we do not know what the result is. That is, @{term"hd []"} is not undefined
+Note that since HOL is a logic of total functions, \<^term>\<open>hd []\<close> is defined,
+but we do not know what the result is. That is, \<^term>\<open>hd []\<close> is not undefined
 but underdefined.
 \fi
 %
@@ -422,44 +422,44 @@
 From now on lists are always the predefined lists.
 
 \ifsem\else
-\subsection{Types @{typ int} and @{typ real}}
+\subsection{Types \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>}
 
-In addition to @{typ nat} there are also the types @{typ int} and @{typ real}, the mathematical integers
+In addition to \<^typ>\<open>nat\<close> there are also the types \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>, the mathematical integers
 and real numbers. As mentioned above, numerals and most of the standard arithmetic operations are overloaded.
-In particular they are defined on @{typ int} and @{typ real}.
+In particular they are defined on \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>.
 
 \begin{warn}
 There are two infix exponentiation operators:
-@{term "(^)"} for @{typ nat} and @{typ int} (with exponent of type @{typ nat} in both cases)
-and @{term "(powr)"} for @{typ real}.
+\<^term>\<open>(^)\<close> for \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close> (with exponent of type \<^typ>\<open>nat\<close> in both cases)
+and \<^term>\<open>(powr)\<close> for \<^typ>\<open>real\<close>.
 \end{warn}
 \begin{warn}
-Type  @{typ int} is already part of theory @{theory Main}, but in order to use @{typ real} as well, you have to import
-theory @{theory Complex_Main} instead of @{theory Main}.
+Type  \<^typ>\<open>int\<close> is already part of theory \<^theory>\<open>Main\<close>, but in order to use \<^typ>\<open>real\<close> as well, you have to import
+theory \<^theory>\<open>Complex_Main\<close> instead of \<^theory>\<open>Main\<close>.
 \end{warn}
 
 There are three conversion functions, meaning inclusions:
 \begin{quote}
 \begin{tabular}{rcl}
-@{const int} &\<open>::\<close>& @{typ "nat \<Rightarrow> int"}\\
-@{const real} &\<open>::\<close>& @{typ "nat \<Rightarrow> real"}\\
-@{const real_of_int} &\<open>::\<close>& @{typ "int \<Rightarrow> real"}\\
+\<^const>\<open>int\<close> &\<open>::\<close>& \<^typ>\<open>nat \<Rightarrow> int\<close>\\
+\<^const>\<open>real\<close> &\<open>::\<close>& \<^typ>\<open>nat \<Rightarrow> real\<close>\\
+\<^const>\<open>real_of_int\<close> &\<open>::\<close>& \<^typ>\<open>int \<Rightarrow> real\<close>\\
 \end{tabular}
 \end{quote}
 
 Isabelle inserts these conversion functions automatically once you import \<open>Complex_Main\<close>.
 If there are multiple type-correct completions, Isabelle chooses an arbitrary one.
 For example, the input \noquotes{@{term[source] "(i::int) + (n::nat)"}} has the unique
-type-correct completion @{term"(i::int) + int(n::nat)"}. In contrast,
+type-correct completion \<^term>\<open>(i::int) + int(n::nat)\<close>. In contrast,
 \noquotes{@{term[source] "((n::nat) + n) :: real"}} has two type-correct completions,
 \noquotes{@{term[source]"real(n+n)"}} and \noquotes{@{term[source]"real n + real n"}}.
 
 There are also the coercion functions in the other direction:
 \begin{quote}
 \begin{tabular}{rcl}
-@{const nat} &\<open>::\<close>& @{typ "int \<Rightarrow> nat"}\\
-@{const floor} &\<open>::\<close>& @{typ "real \<Rightarrow> int"}\\
-@{const ceiling} &\<open>::\<close>& @{typ "real \<Rightarrow> int"}\\
+\<^const>\<open>nat\<close> &\<open>::\<close>& \<^typ>\<open>int \<Rightarrow> nat\<close>\\
+\<^const>\<open>floor\<close> &\<open>::\<close>& \<^typ>\<open>real \<Rightarrow> int\<close>\\
+\<^const>\<open>ceiling\<close> &\<open>::\<close>& \<^typ>\<open>real \<Rightarrow> int\<close>\\
 \end{tabular}
 \end{quote}
 \fi
@@ -473,29 +473,29 @@
 \end{exercise}
 
 \begin{exercise}
-Start from the definition of @{const add} given above.
-Prove that @{const add} is associative and commutative.
-Define a recursive function \<open>double\<close> \<open>::\<close> @{typ"nat \<Rightarrow> nat"}
-and prove @{prop"double m = add m m"}.
+Start from the definition of \<^const>\<open>add\<close> given above.
+Prove that \<^const>\<open>add\<close> is associative and commutative.
+Define a recursive function \<open>double\<close> \<open>::\<close> \<^typ>\<open>nat \<Rightarrow> nat\<close>
+and prove \<^prop>\<open>double m = add m m\<close>.
 \end{exercise}
 
 \begin{exercise}
-Define a function \<open>count ::\<close> @{typ"'a \<Rightarrow> 'a list \<Rightarrow> nat"}
+Define a function \<open>count ::\<close> \<^typ>\<open>'a \<Rightarrow> 'a list \<Rightarrow> nat\<close>
 that counts the number of occurrences of an element in a list. Prove
-@{prop"count x xs \<le> length xs"}.
+\<^prop>\<open>count x xs \<le> length xs\<close>.
 \end{exercise}
 
 \begin{exercise}
-Define a recursive function \<open>snoc ::\<close> @{typ"'a list \<Rightarrow> 'a \<Rightarrow> 'a list"}
+Define a recursive function \<open>snoc ::\<close> \<^typ>\<open>'a list \<Rightarrow> 'a \<Rightarrow> 'a list\<close>
 that appends an element to the end of a list. With the help of \<open>snoc\<close>
-define a recursive function \<open>reverse ::\<close> @{typ"'a list \<Rightarrow> 'a list"}
-that reverses a list. Prove @{prop"reverse(reverse xs) = xs"}.
+define a recursive function \<open>reverse ::\<close> \<^typ>\<open>'a list \<Rightarrow> 'a list\<close>
+that reverses a list. Prove \<^prop>\<open>reverse(reverse xs) = xs\<close>.
 \end{exercise}
 
 \begin{exercise}
-Define a recursive function \<open>sum_upto ::\<close> @{typ"nat \<Rightarrow> nat"} such that
+Define a recursive function \<open>sum_upto ::\<close> \<^typ>\<open>nat \<Rightarrow> nat\<close> such that
 \mbox{\<open>sum_upto n\<close>} \<open>=\<close> \<open>0 + ... + n\<close> and prove
-@{prop" sum_upto (n::nat) = n * (n+1) div 2"}.
+\<^prop>\<open> sum_upto (n::nat) = n * (n+1) div 2\<close>.
 \end{exercise}
 \<close>
 (*<*)