Merge.
--- a/doc-src/IsarImplementation/Thy/ML.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/ML.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,6 @@
-(* $Id$ *)
-
-theory "ML" imports base begin
+theory "ML"
+imports Base
+begin
chapter {* Advanced ML programming *}
--- a/doc-src/IsarImplementation/Thy/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,11 +1,11 @@
-
-(* $Id$ *)
-
-use_thy "prelim";
-use_thy "logic";
-use_thy "tactic";
-use_thy "proof";
-use_thy "isar";
-use_thy "locale";
-use_thy "integration";
-use_thy "ML";
+use_thys [
+ "Integration",
+ "Isar",
+ "Local_Theory",
+ "Logic",
+ "ML",
+ "Prelim",
+ "Proof",
+ "Syntax",
+ "Tactic"
+];
--- a/doc-src/IsarImplementation/Thy/document/ML.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/document/ML.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,14 +3,14 @@
\def\isabellecontext{ML}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
\isatagtheory
\isacommand{theory}\isamarkupfalse%
-\ {\isachardoublequoteopen}ML{\isachardoublequoteclose}\ \isakeyword{imports}\ base\ \isakeyword{begin}%
+\ {\isachardoublequoteopen}ML{\isachardoublequoteclose}\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
\endisatagtheory
{\isafoldtheory}%
%
@@ -275,9 +275,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{NAMED\_CRITICAL}\verb|NAMED_CRITICAL: string -> (unit -> 'a) -> 'a| \\
- \indexml{CRITICAL}\verb|CRITICAL: (unit -> 'a) -> 'a| \\
- \indexml{setmp}\verb|setmp: 'a ref -> 'a -> ('b -> 'c) -> 'b -> 'c| \\
+ \indexdef{}{ML}{NAMED\_CRITICAL}\verb|NAMED_CRITICAL: string -> (unit -> 'a) -> 'a| \\
+ \indexdef{}{ML}{CRITICAL}\verb|CRITICAL: (unit -> 'a) -> 'a| \\
+ \indexdef{}{ML}{setmp}\verb|setmp: 'a ref -> 'a -> ('b -> 'c) -> 'b -> 'c| \\
\end{mldecls}
\begin{description}
@@ -331,7 +331,7 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op |$>$ }\verb|op |\verb,|,\verb|> : 'a * ('a -> 'b) -> 'b| \\
+ \indexdef{}{ML}{op $\mid$$>$ }\verb|op |\verb,|,\verb|> : 'a * ('a -> 'b) -> 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -410,10 +410,10 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op |-$>$ }\verb|op |\verb,|,\verb|-> : ('c * 'a) * ('c -> 'a -> 'b) -> 'b| \\
- \indexml{op |$>$$>$ }\verb|op |\verb,|,\verb|>> : ('a * 'c) * ('a -> 'b) -> 'b * 'c| \\
- \indexml{op ||$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|> : ('c * 'a) * ('a -> 'b) -> 'c * 'b| \\
- \indexml{op ||$>$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|>> : ('c * 'a) * ('a -> 'd * 'b) -> ('c * 'd) * 'b| \\
+ \indexdef{}{ML}{op $\mid$-$>$ }\verb|op |\verb,|,\verb|-> : ('c * 'a) * ('c -> 'a -> 'b) -> 'b| \\
+ \indexdef{}{ML}{op $\mid$$>$$>$ }\verb|op |\verb,|,\verb|>> : ('a * 'c) * ('a -> 'b) -> 'b * 'c| \\
+ \indexdef{}{ML}{op $\mid$$\mid$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|> : ('c * 'a) * ('a -> 'b) -> 'c * 'b| \\
+ \indexdef{}{ML}{op $\mid$$\mid$$>$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|>> : ('c * 'a) * ('a -> 'd * 'b) -> ('c * 'd) * 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -483,8 +483,8 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{fold}\verb|fold: ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b| \\
- \indexml{fold\_map}\verb|fold_map: ('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b| \\
+ \indexdef{}{ML}{fold}\verb|fold: ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b| \\
+ \indexdef{}{ML}{fold\_map}\verb|fold_map: ('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -545,11 +545,11 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op \#$>$ }\verb|op #> : ('a -> 'b) * ('b -> 'c) -> 'a -> 'c| \\
- \indexml{op \#-$>$ }\verb|op #-> : ('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd| \\
- \indexml{op \#$>$$>$ }\verb|op #>> : ('a -> 'c * 'b) * ('c -> 'd) -> 'a -> 'd * 'b| \\
- \indexml{op \#\#$>$ }\verb|op ##> : ('a -> 'c * 'b) * ('b -> 'd) -> 'a -> 'c * 'd| \\
- \indexml{op \#\#$>$$>$ }\verb|op ##>> : ('a -> 'c * 'b) * ('b -> 'e * 'd) -> 'a -> ('c * 'e) * 'd| \\
+ \indexdef{}{ML}{op \#$>$ }\verb|op #> : ('a -> 'b) * ('b -> 'c) -> 'a -> 'c| \\
+ \indexdef{}{ML}{op \#-$>$ }\verb|op #-> : ('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd| \\
+ \indexdef{}{ML}{op \#$>$$>$ }\verb|op #>> : ('a -> 'c * 'b) * ('c -> 'd) -> 'a -> 'd * 'b| \\
+ \indexdef{}{ML}{op \#\#$>$ }\verb|op ##> : ('a -> 'c * 'b) * ('b -> 'd) -> 'a -> 'c * 'd| \\
+ \indexdef{}{ML}{op \#\#$>$$>$ }\verb|op ##>> : ('a -> 'c * 'b) * ('b -> 'e * 'd) -> 'a -> ('c * 'e) * 'd| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -576,8 +576,8 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op ` }\verb|op ` : ('b -> 'a) -> 'b -> 'a * 'b| \\
- \indexml{tap}\verb|tap: ('b -> 'a) -> 'b -> 'b| \\
+ \indexdef{}{ML}{op ` }\verb|op ` : ('b -> 'a) -> 'b -> 'a * 'b| \\
+ \indexdef{}{ML}{tap}\verb|tap: ('b -> 'a) -> 'b -> 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -619,14 +619,14 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{is\_some}\verb|is_some: 'a option -> bool| \\
- \indexml{is\_none}\verb|is_none: 'a option -> bool| \\
- \indexml{the}\verb|the: 'a option -> 'a| \\
- \indexml{these}\verb|these: 'a list option -> 'a list| \\
- \indexml{the\_list}\verb|the_list: 'a option -> 'a list| \\
- \indexml{the\_default}\verb|the_default: 'a -> 'a option -> 'a| \\
- \indexml{try}\verb|try: ('a -> 'b) -> 'a -> 'b option| \\
- \indexml{can}\verb|can: ('a -> 'b) -> 'a -> bool| \\
+ \indexdef{}{ML}{is\_some}\verb|is_some: 'a option -> bool| \\
+ \indexdef{}{ML}{is\_none}\verb|is_none: 'a option -> bool| \\
+ \indexdef{}{ML}{the}\verb|the: 'a option -> 'a| \\
+ \indexdef{}{ML}{these}\verb|these: 'a list option -> 'a list| \\
+ \indexdef{}{ML}{the\_list}\verb|the_list: 'a option -> 'a list| \\
+ \indexdef{}{ML}{the\_default}\verb|the_default: 'a -> 'a option -> 'a| \\
+ \indexdef{}{ML}{try}\verb|try: ('a -> 'b) -> 'a -> 'b option| \\
+ \indexdef{}{ML}{can}\verb|can: ('a -> 'b) -> 'a -> bool| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -659,10 +659,10 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{member}\verb|member: ('b * 'a -> bool) -> 'a list -> 'b -> bool| \\
- \indexml{insert}\verb|insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list| \\
- \indexml{remove}\verb|remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list| \\
- \indexml{merge}\verb|merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list| \\
+ \indexdef{}{ML}{member}\verb|member: ('b * 'a -> bool) -> 'a list -> 'b -> bool| \\
+ \indexdef{}{ML}{insert}\verb|insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list| \\
+ \indexdef{}{ML}{remove}\verb|remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list| \\
+ \indexdef{}{ML}{merge}\verb|merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -690,19 +690,19 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexmlexception{AList.DUP}\verb|exception AList.DUP| \\
- \indexml{AList.lookup}\verb|AList.lookup: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option| \\
- \indexml{AList.defined}\verb|AList.defined: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool| \\
- \indexml{AList.update}\verb|AList.update: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.default}\verb|AList.default: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.delete}\verb|AList.delete: ('a * 'b -> bool) -> 'a -> ('b * 'c) list -> ('b * 'c) list| \\
- \indexml{AList.map\_entry}\verb|AList.map_entry: ('a * 'b -> bool) -> 'a|\isasep\isanewline%
+ \indexdef{}{ML exception}{AList.DUP}\verb|exception AList.DUP| \\
+ \indexdef{}{ML}{AList.lookup}\verb|AList.lookup: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option| \\
+ \indexdef{}{ML}{AList.defined}\verb|AList.defined: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool| \\
+ \indexdef{}{ML}{AList.update}\verb|AList.update: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
+ \indexdef{}{ML}{AList.default}\verb|AList.default: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
+ \indexdef{}{ML}{AList.delete}\verb|AList.delete: ('a * 'b -> bool) -> 'a -> ('b * 'c) list -> ('b * 'c) list| \\
+ \indexdef{}{ML}{AList.map\_entry}\verb|AList.map_entry: ('a * 'b -> bool) -> 'a|\isasep\isanewline%
\verb| -> ('c -> 'c) -> ('b * 'c) list -> ('b * 'c) list| \\
- \indexml{AList.map\_default}\verb|AList.map_default: ('a * 'a -> bool) -> 'a * 'b -> ('b -> 'b)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.map\_default}\verb|AList.map_default: ('a * 'a -> bool) -> 'a * 'b -> ('b -> 'b)|\isasep\isanewline%
\verb| -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.join}\verb|AList.join: ('a * 'a -> bool) -> ('a -> 'b * 'b -> 'b) (*exception DUP*)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.join}\verb|AList.join: ('a * 'a -> bool) -> ('a -> 'b * 'b -> 'b) (*exception DUP*)|\isasep\isanewline%
\verb| -> ('a * 'b) list * ('a * 'b) list -> ('a * 'b) list (*exception AList.DUP*)| \\
- \indexml{AList.merge}\verb|AList.merge: ('a * 'a -> bool) -> ('b * 'b -> bool)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.merge}\verb|AList.merge: ('a * 'a -> bool) -> ('b * 'b -> bool)|\isasep\isanewline%
\verb| -> ('a * 'b) list * ('a * 'b) list -> ('a * 'b) list (*exception AList.DUP*)|
\end{mldecls}%
\end{isamarkuptext}%
@@ -732,25 +732,25 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexmltype{'a Symtab.table}\verb|type 'a Symtab.table| \\
- \indexmlexception{Symtab.DUP}\verb|exception Symtab.DUP of string| \\
- \indexmlexception{Symtab.SAME}\verb|exception Symtab.SAME| \\
- \indexmlexception{Symtab.UNDEF}\verb|exception Symtab.UNDEF of string| \\
- \indexml{Symtab.empty}\verb|Symtab.empty: 'a Symtab.table| \\
- \indexml{Symtab.lookup}\verb|Symtab.lookup: 'a Symtab.table -> string -> 'a option| \\
- \indexml{Symtab.defined}\verb|Symtab.defined: 'a Symtab.table -> string -> bool| \\
- \indexml{Symtab.update}\verb|Symtab.update: (string * 'a) -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.default}\verb|Symtab.default: string * 'a -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.delete}\verb|Symtab.delete: string|\isasep\isanewline%
+ \indexdef{}{ML type}{'a Symtab.table}\verb|type 'a Symtab.table| \\
+ \indexdef{}{ML exception}{Symtab.DUP}\verb|exception Symtab.DUP of string| \\
+ \indexdef{}{ML exception}{Symtab.SAME}\verb|exception Symtab.SAME| \\
+ \indexdef{}{ML exception}{Symtab.UNDEF}\verb|exception Symtab.UNDEF of string| \\
+ \indexdef{}{ML}{Symtab.empty}\verb|Symtab.empty: 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.lookup}\verb|Symtab.lookup: 'a Symtab.table -> string -> 'a option| \\
+ \indexdef{}{ML}{Symtab.defined}\verb|Symtab.defined: 'a Symtab.table -> string -> bool| \\
+ \indexdef{}{ML}{Symtab.update}\verb|Symtab.update: (string * 'a) -> 'a Symtab.table -> 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.default}\verb|Symtab.default: string * 'a -> 'a Symtab.table -> 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.delete}\verb|Symtab.delete: string|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table (*exception Symtab.UNDEF*)| \\
- \indexml{Symtab.map\_entry}\verb|Symtab.map_entry: string -> ('a -> 'a)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.map\_entry}\verb|Symtab.map_entry: string -> ('a -> 'a)|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.map\_default}\verb|Symtab.map_default: (string * 'a) -> ('a -> 'a)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.map\_default}\verb|Symtab.map_default: (string * 'a) -> ('a -> 'a)|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.join}\verb|Symtab.join: (string -> 'a * 'a -> 'a) (*exception Symtab.DUP/Symtab.SAME*)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.join}\verb|Symtab.join: (string -> 'a * 'a -> 'a) (*exception Symtab.DUP/Symtab.SAME*)|\isasep\isanewline%
\verb| -> 'a Symtab.table * 'a Symtab.table|\isasep\isanewline%
\verb| -> 'a Symtab.table (*exception Symtab.DUP*)| \\
- \indexml{Symtab.merge}\verb|Symtab.merge: ('a * 'a -> bool)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.merge}\verb|Symtab.merge: ('a * 'a -> bool)|\isasep\isanewline%
\verb| -> 'a Symtab.table * 'a Symtab.table|\isasep\isanewline%
\verb| -> 'a Symtab.table (*exception Symtab.DUP*)|
\end{mldecls}%
--- a/doc-src/IsarImplementation/Thy/document/session.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/document/session.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,21 +1,23 @@
-\input{base.tex}
-
-\input{prelim.tex}
+\input{Base.tex}
-\input{logic.tex}
-
-\input{tactic.tex}
+\input{Integration.tex}
-\input{proof.tex}
-
-\input{isar.tex}
+\input{Isar.tex}
-\input{locale.tex}
+\input{Local_Theory.tex}
-\input{integration.tex}
+\input{Logic.tex}
\input{ML.tex}
+\input{Prelim.tex}
+
+\input{Proof.tex}
+
+\input{Syntax.tex}
+
+\input{Tactic.tex}
+
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "root"
--- a/doc-src/IsarImplementation/implementation.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/implementation.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\documentclass[12pt,a4paper,fleqn]{report}
\usepackage{latexsym,graphicx}
\usepackage[refpage]{nomencl}
@@ -23,9 +20,6 @@
and Larry Paulson
}
-%FIXME
-%\makeglossary
-
\makeindex
@@ -71,28 +65,24 @@
\listoffigures
\clearfirst
-%\input{intro.tex}
-\input{Thy/document/prelim.tex}
-\input{Thy/document/logic.tex}
-\input{Thy/document/tactic.tex}
-\input{Thy/document/proof.tex}
-\input{Thy/document/isar.tex}
-\input{Thy/document/locale.tex}
-\input{Thy/document/integration.tex}
+\input{Thy/document/Prelim.tex}
+\input{Thy/document/Logic.tex}
+\input{Thy/document/Tactic.tex}
+\input{Thy/document/Proof.tex}
+\input{Thy/document/Syntax.tex}
+\input{Thy/document/Isar.tex}
+\input{Thy/document/Local_Theory.tex}
+\input{Thy/document/Integration.tex}
\appendix
\input{Thy/document/ML.tex}
\begingroup
\tocentry{\bibname}
-\bibliographystyle{plain} \small\raggedright\frenchspacing
+\bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
-%FIXME
-%\tocentry{\glossaryname}
-%\printglossary
-
\tocentry{\indexname}
\printindex
--- a/doc-src/IsarImplementation/style.sty Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarImplementation/style.sty Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
%% toc
\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
@@ -10,24 +7,12 @@
\newcommand{\chref}[1]{chapter~\ref{#1}}
\newcommand{\figref}[1]{figure~\ref{#1}}
-%% glossary
-\renewcommand{\glossary}[2]{\nomenclature{\bf #1}{#2}}
-\newcommand{\seeglossary}[1]{\emph{#1}}
-\newcommand{\glossaryname}{Glossary}
-\renewcommand{\nomname}{\glossaryname}
-\renewcommand{\pagedeclaration}[1]{\nobreak\quad\dotfill~page~\bold{#1}}
-
-%% index
-\newcommand{\indexml}[1]{\index{\emph{#1}|bold}}
-\newcommand{\indexmlexception}[1]{\index{\emph{#1} (exception)|bold}}
-\newcommand{\indexmltype}[1]{\index{\emph{#1} (type)|bold}}
-\newcommand{\indexmlstructure}[1]{\index{\emph{#1} (structure)|bold}}
-\newcommand{\indexmlfunctor}[1]{\index{\emph{#1} (functor)|bold}}
-
%% math
\newcommand{\text}[1]{\mbox{#1}}
\newcommand{\isasymvartheta}{\isamath{\theta}}
-\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
+\newcommand{\isactrlvec}[1]{\emph{$\vec{#1}$}}
+\newcommand{\isactrlBG}{\isacharbackquoteopen}
+\newcommand{\isactrlEN}{\isacharbackquoteclose}
\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
@@ -49,6 +34,10 @@
\newcommand{\isasymtype}{\minorcmd{type}}
\newcommand{\isasymval}{\minorcmd{val}}
+\newcommand{\isasymFIX}{\isakeyword{fix}}
+\newcommand{\isasymASSUME}{\isakeyword{assume}}
+\newcommand{\isasymDEFINE}{\isakeyword{define}}
+\newcommand{\isasymNOTE}{\isakeyword{note}}
\newcommand{\isasymGUESS}{\isakeyword{guess}}
\newcommand{\isasymOBTAIN}{\isakeyword{obtain}}
\newcommand{\isasymTHEORY}{\isakeyword{theory}}
@@ -61,6 +50,7 @@
\isabellestyle{it}
+
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "implementation"
--- a/doc-src/IsarRef/IsaMakefile Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/IsaMakefile Wed Mar 04 10:45:52 2009 +0100
@@ -22,10 +22,11 @@
HOL-IsarRef: $(LOG)/HOL-IsarRef.gz
$(LOG)/HOL-IsarRef.gz: Thy/ROOT.ML ../antiquote_setup.ML \
- Thy/Inner_Syntax.thy Thy/Introduction.thy Thy/Outer_Syntax.thy \
- Thy/Spec.thy Thy/Proof.thy Thy/Misc.thy Thy/Document_Preparation.thy \
- Thy/Generic.thy Thy/HOL_Specific.thy Thy/Quick_Reference.thy \
- Thy/Symbols.thy Thy/ML_Tactic.thy
+ Thy/First_Order_Logic.thy Thy/Framework.thy Thy/Inner_Syntax.thy \
+ Thy/Introduction.thy Thy/Outer_Syntax.thy Thy/Spec.thy Thy/Proof.thy \
+ Thy/Misc.thy Thy/Document_Preparation.thy Thy/Generic.thy \
+ Thy/HOL_Specific.thy Thy/Quick_Reference.thy Thy/Symbols.thy \
+ Thy/ML_Tactic.thy
@$(USEDIR) -s IsarRef HOL Thy
--- a/doc-src/IsarRef/Makefile Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Makefile Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,3 @@
-#
-# $Id$
-#
-
## targets
default: dvi
--- a/doc-src/IsarRef/Thy/Document_Preparation.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Document_Preparation.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Document_Preparation
imports Main
begin
--- a/doc-src/IsarRef/Thy/Generic.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Generic.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Generic
imports Main
begin
--- a/doc-src/IsarRef/Thy/HOLCF_Specific.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/HOLCF_Specific.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory HOLCF_Specific
imports HOLCF
begin
--- a/doc-src/IsarRef/Thy/HOL_Specific.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/HOL_Specific.thy Wed Mar 04 10:45:52 2009 +0100
@@ -771,6 +771,55 @@
*}
+section {* Intuitionistic proof search *}
+
+text {*
+ \begin{matharray}{rcl}
+ @{method_def (HOL) iprover} & : & @{text method} \\
+ \end{matharray}
+
+ \begin{rail}
+ 'iprover' ('!' ?) (rulemod *)
+ ;
+ \end{rail}
+
+ The @{method (HOL) iprover} method performs intuitionistic proof
+ search, depending on specifically declared rules from the context,
+ or given as explicit arguments. Chained facts are inserted into the
+ goal before commencing proof search; ``@{method (HOL) iprover}@{text
+ "!"}'' means to include the current @{fact prems} as well.
+
+ Rules need to be classified as @{attribute (Pure) intro},
+ @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
+ ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
+ applied aggressively (without considering back-tracking later).
+ Rules declared with ``@{text "?"}'' are ignored in proof search (the
+ single-step @{method rule} method still observes these). An
+ explicit weight annotation may be given as well; otherwise the
+ number of rule premises will be taken into account here.
+*}
+
+
+section {* Coherent Logic *}
+
+text {*
+ \begin{matharray}{rcl}
+ @{method_def (HOL) "coherent"} & : & @{text method} \\
+ \end{matharray}
+
+ \begin{rail}
+ 'coherent' thmrefs?
+ ;
+ \end{rail}
+
+ The @{method (HOL) coherent} method solves problems of
+ \emph{Coherent Logic} \cite{Bezem-Coquand:2005}, which covers
+ applications in confluence theory, lattice theory and projective
+ geometry. See @{"file" "~~/src/HOL/ex/Coherent.thy"} for some
+ examples.
+*}
+
+
section {* Invoking automated reasoning tools -- The Sledgehammer *}
text {*
--- a/doc-src/IsarRef/Thy/Inner_Syntax.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Inner_Syntax.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Inner_Syntax
imports Main
begin
@@ -370,7 +368,7 @@
\end{matharray}
\begin{rail}
- ('notation' | 'no\_notation') target? mode? (nameref structmixfix + 'and')
+ ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and')
;
\end{rail}
@@ -525,13 +523,15 @@
& @{text "|"} & @{text "tid | tvar | "}@{verbatim "_"} \\
& @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort | tvar "}@{verbatim "::"} @{text "sort | "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
& @{text "|"} & @{text "id | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) id | "}@{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text id} \\
- & @{text "|"} & @{text "longid | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) longid | "}@{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text longid} \\
+ & @{text "|"} & @{text "longid | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) longid"} \\
+ & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text longid} \\
& @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\\\
- @{syntax_def (inner) sort} & = & @{text "id | longid | "}@{verbatim "{}"}@{text " | "}@{verbatim "{"} @{text "(id | longid)"} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text "(id | longid)"} @{verbatim "}"} \\
+ @{syntax_def (inner) sort} & = & @{text "id | longid | "}@{verbatim "{}"} \\
+ & @{text "|"} & @{verbatim "{"} @{text "(id | longid)"} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text "(id | longid)"} @{verbatim "}"} \\
\end{supertabular}
\end{center}
--- a/doc-src/IsarRef/Thy/Introduction.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Introduction.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Introduction
imports Main
begin
@@ -12,27 +10,27 @@
The \emph{Isabelle} system essentially provides a generic
infrastructure for building deductive systems (programmed in
Standard ML), with a special focus on interactive theorem proving in
- higher-order logics. In the olden days even end-users would refer
- to certain ML functions (goal commands, tactics, tacticals etc.) to
- pursue their everyday theorem proving tasks
- \cite{isabelle-intro,isabelle-ref}.
+ higher-order logics. Many years ago, even end-users would refer to
+ certain ML functions (goal commands, tactics, tacticals etc.) to
+ pursue their everyday theorem proving tasks.
In contrast \emph{Isar} provides an interpreted language environment
of its own, which has been specifically tailored for the needs of
theory and proof development. Compared to raw ML, the Isabelle/Isar
top-level provides a more robust and comfortable development
- platform, with proper support for theory development graphs,
- single-step transactions with unlimited undo, etc. The
- Isabelle/Isar version of the \emph{Proof~General} user interface
- \cite{proofgeneral,Aspinall:TACAS:2000} provides an adequate
- front-end for interactive theory and proof development in this
- advanced theorem proving environment.
+ platform, with proper support for theory development graphs, managed
+ transactions with unlimited undo etc. The Isabelle/Isar version of
+ the \emph{Proof~General} user interface
+ \cite{proofgeneral,Aspinall:TACAS:2000} provides a decent front-end
+ for interactive theory and proof development in this advanced
+ theorem proving environment, even though it is somewhat biased
+ towards old-style proof scripts.
\medskip Apart from the technical advances over bare-bones ML
programming, the main purpose of the Isar language is to provide a
conceptually different view on machine-checked proofs
- \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. ``Isar'' stands for
- ``Intelligible semi-automated reasoning''. Drawing from both the
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. \emph{Isar} stands for
+ \emph{Intelligible semi-automated reasoning}. Drawing from both the
traditions of informal mathematical proof texts and high-level
programming languages, Isar offers a versatile environment for
structured formal proof documents. Thus properly written Isar
@@ -47,12 +45,12 @@
Despite its grand design of structured proof texts, Isar is able to
assimilate the old tactical style as an ``improper'' sub-language.
This provides an easy upgrade path for existing tactic scripts, as
- well as additional means for interactive experimentation and
- debugging of structured proofs. Isabelle/Isar supports a broad
- range of proof styles, both readable and unreadable ones.
+ well as some means for interactive experimentation and debugging of
+ structured proofs. Isabelle/Isar supports a broad range of proof
+ styles, both readable and unreadable ones.
- \medskip The Isabelle/Isar framework \cite{Wenzel:2006:Festschrift}
- is generic and should work reasonably well for any Isabelle
+ \medskip The generic Isabelle/Isar framework (see
+ \chref{ch:isar-framework}) works reasonably well for any Isabelle
object-logic that conforms to the natural deduction view of the
Isabelle/Pure framework. Specific language elements introduced by
the major object-logics are described in \chref{ch:hol}
@@ -72,194 +70,4 @@
context; other commands emulate old-style tactical theorem proving.
*}
-
-section {* User interfaces *}
-
-subsection {* Terminal sessions *}
-
-text {*
- The Isabelle \texttt{tty} tool provides a very interface for running
- the Isar interaction loop, with some support for command line
- editing. For example:
-\begin{ttbox}
-isabelle tty\medskip
-{\out Welcome to Isabelle/HOL (Isabelle2008)}\medskip
-theory Foo imports Main begin;
-definition foo :: nat where "foo == 1";
-lemma "0 < foo" by (simp add: foo_def);
-end;
-\end{ttbox}
-
- Any Isabelle/Isar command may be retracted by @{command undo}.
- See the Isabelle/Isar Quick Reference (\appref{ap:refcard}) for a
- comprehensive overview of available commands and other language
- elements.
-*}
-
-
-subsection {* Emacs Proof General *}
-
-text {*
- Plain TTY-based interaction as above used to be quite feasible with
- traditional tactic based theorem proving, but developing Isar
- documents really demands some better user-interface support. The
- Proof~General environment by David Aspinall
- \cite{proofgeneral,Aspinall:TACAS:2000} offers a generic Emacs
- interface for interactive theorem provers that organizes all the
- cut-and-paste and forward-backward walk through the text in a very
- neat way. In Isabelle/Isar, the current position within a partial
- proof document is equally important than the actual proof state.
- Thus Proof~General provides the canonical working environment for
- Isabelle/Isar, both for getting acquainted (e.g.\ by replaying
- existing Isar documents) and for production work.
-*}
-
-
-subsubsection{* Proof~General as default Isabelle interface *}
-
-text {*
- The Isabelle interface wrapper script provides an easy way to invoke
- Proof~General (including XEmacs or GNU Emacs). The default
- configuration of Isabelle is smart enough to detect the
- Proof~General distribution in several canonical places (e.g.\
- @{verbatim "$ISABELLE_HOME/contrib/ProofGeneral"}). Thus the
- capital @{verbatim Isabelle} executable would already refer to the
- @{verbatim "ProofGeneral/isar"} interface without further ado. The
- Isabelle interface script provides several options; pass @{verbatim
- "-?"} to see its usage.
-
- With the proper Isabelle interface setup, Isar documents may now be edited by
- visiting appropriate theory files, e.g.\
-\begin{ttbox}
-Isabelle \({\langle}isabellehome{\rangle}\)/src/HOL/Isar_examples/Summation.thy
-\end{ttbox}
- Beginners may note the tool bar for navigating forward and backward
- through the text (this depends on the local Emacs installation).
- Consult the Proof~General documentation \cite{proofgeneral} for
- further basic command sequences, in particular ``@{verbatim "C-c C-return"}''
- and ``@{verbatim "C-c u"}''.
-
- \medskip Proof~General may be also configured manually by giving
- Isabelle settings like this (see also \cite{isabelle-sys}):
-
-\begin{ttbox}
-ISABELLE_INTERFACE=\$ISABELLE_HOME/contrib/ProofGeneral/isar/interface
-PROOFGENERAL_OPTIONS=""
-\end{ttbox}
- You may have to change @{verbatim
- "$ISABELLE_HOME/contrib/ProofGeneral"} to the actual installation
- directory of Proof~General.
-
- \medskip Apart from the Isabelle command line, defaults for
- interface options may be given by the @{verbatim PROOFGENERAL_OPTIONS}
- setting. For example, the Emacs executable to be used may be
- configured in Isabelle's settings like this:
-\begin{ttbox}
-PROOFGENERAL_OPTIONS="-p xemacs-mule"
-\end{ttbox}
-
- Occasionally, a user's @{verbatim "~/.emacs"} file contains code
- that is incompatible with the (X)Emacs version used by
- Proof~General, causing the interface startup to fail prematurely.
- Here the @{verbatim "-u false"} option helps to get the interface
- process up and running. Note that additional Lisp customization
- code may reside in @{verbatim "proofgeneral-settings.el"} of
- @{verbatim "$ISABELLE_HOME/etc"} or @{verbatim
- "$ISABELLE_HOME_USER/etc"}.
-*}
-
-
-subsubsection {* The X-Symbol package *}
-
-text {*
- Proof~General incorporates a version of the Emacs X-Symbol package
- \cite{x-symbol}, which handles proper mathematical symbols displayed
- on screen. Pass option @{verbatim "-x true"} to the Isabelle
- interface script, or check the appropriate Proof~General menu
- setting by hand. The main challenge of getting X-Symbol to work
- properly is the underlying (semi-automated) X11 font setup.
-
- \medskip Using proper mathematical symbols in Isabelle theories can
- be very convenient for readability of large formulas. On the other
- hand, the plain ASCII sources easily become somewhat unintelligible.
- For example, @{text "\<Longrightarrow>"} would appear as @{verbatim "\<Longrightarrow>"} according
- the default set of Isabelle symbols. Nevertheless, the Isabelle
- document preparation system (see \chref{ch:document-prep}) will be
- happy to print non-ASCII symbols properly. It is even possible to
- invent additional notation beyond the display capabilities of Emacs
- and X-Symbol.
-*}
-
-
-section {* Isabelle/Isar theories *}
-
-text {*
- Isabelle/Isar offers the following main improvements over classic
- Isabelle.
-
- \begin{enumerate}
-
- \item A \emph{theory format} that integrates specifications and
- proofs, supporting interactive development and unlimited undo
- operation.
-
- \item A \emph{formal proof document language} designed to support
- intelligible semi-automated reasoning. Instead of putting together
- unreadable tactic scripts, the author is enabled to express the
- reasoning in way that is close to usual mathematical practice. The
- old tactical style has been assimilated as ``improper'' language
- elements.
-
- \item A simple document preparation system, for typesetting formal
- developments together with informal text. The resulting
- hyper-linked PDF documents are equally well suited for WWW
- presentation and as printed copies.
-
- \end{enumerate}
-
- The Isar proof language is embedded into the new theory format as a
- proper sub-language. Proof mode is entered by stating some
- @{command theorem} or @{command lemma} at the theory level, and
- left again with the final conclusion (e.g.\ via @{command qed}).
- A few theory specification mechanisms also require some proof, such
- as HOL's @{command typedef} which demands non-emptiness of the
- representing sets.
-*}
-
-
-section {* How to write Isar proofs anyway? \label{sec:isar-howto} *}
-
-text {*
- This is one of the key questions, of course. First of all, the
- tactic script emulation of Isabelle/Isar essentially provides a
- clarified version of the very same unstructured proof style of
- classic Isabelle. Old-time users should quickly become acquainted
- with that (slightly degenerative) view of Isar.
-
- Writing \emph{proper} Isar proof texts targeted at human readers is
- quite different, though. Experienced users of the unstructured
- style may even have to unlearn some of their habits to master proof
- composition in Isar. In contrast, new users with less experience in
- old-style tactical proving, but a good understanding of mathematical
- proof in general, often get started easier.
-
- \medskip The present text really is only a reference manual on
- Isabelle/Isar, not a tutorial. Nevertheless, we will attempt to
- give some clues of how the concepts introduced here may be put into
- practice. Especially note that \appref{ap:refcard} provides a quick
- reference card of the most common Isabelle/Isar language elements.
-
- Further issues concerning the Isar concepts are covered in the
- literature
- \cite{Wenzel:1999:TPHOL,Wiedijk:2000:MV,Bauer-Wenzel:2000:HB,Bauer-Wenzel:2001}.
- The author's PhD thesis \cite{Wenzel-PhD} presently provides the
- most complete exposition of Isar foundations, techniques, and
- applications. A number of example applications are distributed with
- Isabelle, and available via the Isabelle WWW library (e.g.\
- \url{http://isabelle.in.tum.de/library/}). The ``Archive of Formal
- Proofs'' \url{http://afp.sourceforge.net/} also provides plenty of
- examples, both in proper Isar proof style and unstructured tactic
- scripts.
-*}
-
end
--- a/doc-src/IsarRef/Thy/ML_Tactic.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/ML_Tactic.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory ML_Tactic
imports Main
begin
--- a/doc-src/IsarRef/Thy/Misc.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Misc.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Misc
imports Main
begin
--- a/doc-src/IsarRef/Thy/Outer_Syntax.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Outer_Syntax.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Outer_Syntax
imports Main
begin
@@ -170,10 +168,10 @@
Isabelle as @{verbatim \<forall>}. There are infinitely many Isabelle
symbols like this, although proper presentation is left to front-end
tools such as {\LaTeX} or Proof~General with the X-Symbol package.
- A list of standard Isabelle symbols that work well with these tools
- is given in \appref{app:symbols}. Note that @{verbatim "\<lambda>"} does
- not belong to the @{text letter} category, since it is already used
- differently in the Pure term language.
+ A list of predefined Isabelle symbols that work well with these
+ tools is given in \appref{app:symbols}. Note that @{verbatim "\<lambda>"}
+ does not belong to the @{text letter} category, since it is already
+ used differently in the Pure term language.
*}
--- a/doc-src/IsarRef/Thy/Proof.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Proof.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,17 +1,15 @@
-(* $Id$ *)
-
theory Proof
imports Main
begin
-chapter {* Proofs *}
+chapter {* Proofs \label{ch:proofs} *}
text {*
Proof commands perform transitions of Isar/VM machine
configurations, which are block-structured, consisting of a stack of
nodes with three main components: logical proof context, current
- facts, and open goals. Isar/VM transitions are \emph{typed}
- according to the following three different modes of operation:
+ facts, and open goals. Isar/VM transitions are typed according to
+ the following three different modes of operation:
\begin{description}
@@ -32,13 +30,17 @@
\end{description}
- The proof mode indicator may be read as a verb telling the writer
- what kind of operation may be performed next. The corresponding
- typings of proof commands restricts the shape of well-formed proof
- texts to particular command sequences. So dynamic arrangements of
- commands eventually turn out as static texts of a certain structure.
- \Appref{ap:refcard} gives a simplified grammar of the overall
- (extensible) language emerging that way.
+ The proof mode indicator may be understood as an instruction to the
+ writer, telling what kind of operation may be performed next. The
+ corresponding typings of proof commands restricts the shape of
+ well-formed proof texts to particular command sequences. So dynamic
+ arrangements of commands eventually turn out as static texts of a
+ certain structure.
+
+ \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+ language emerging that way from the different types of proof
+ commands. The main ideas of the overall Isar framework are
+ explained in \chref{ch:isar-framework}.
*}
@@ -681,7 +683,6 @@
@{method_def "assumption"} & : & @{text method} \\
@{method_def "this"} & : & @{text method} \\
@{method_def "rule"} & : & @{text method} \\
- @{method_def "iprover"} & : & @{text method} \\[0.5ex]
@{attribute_def (Pure) "intro"} & : & @{text attribute} \\
@{attribute_def (Pure) "elim"} & : & @{text attribute} \\
@{attribute_def (Pure) "dest"} & : & @{text attribute} \\
@@ -696,8 +697,6 @@
;
'rule' thmrefs?
;
- 'iprover' ('!' ?) (rulemod *)
- ;
rulemod: ('intro' | 'elim' | 'dest') ((('!' | () | '?') nat?) | 'del') ':' thmrefs
;
('intro' | 'elim' | 'dest') ('!' | () | '?') nat?
@@ -756,27 +755,11 @@
default behavior of @{command "proof"} and ``@{command ".."}''
(double-dot) steps (see \secref{sec:proof-steps}).
- \item @{method iprover} performs intuitionistic proof search,
- depending on specifically declared rules from the context, or given
- as explicit arguments. Chained facts are inserted into the goal
- before commencing proof search; ``@{method iprover}@{text "!"}''
- means to include the current @{fact prems} as well.
-
- Rules need to be classified as @{attribute (Pure) intro},
- @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
- ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
- applied aggressively (without considering back-tracking later).
- Rules declared with ``@{text "?"}'' are ignored in proof search (the
- single-step @{method rule} method still observes these). An
- explicit weight annotation may be given as well; otherwise the
- number of rule premises will be taken into account here.
-
\item @{attribute (Pure) intro}, @{attribute (Pure) elim}, and
@{attribute (Pure) dest} declare introduction, elimination, and
- destruct rules, to be used with the @{method rule} and @{method
- iprover} methods. Note that the latter will ignore rules declared
- with ``@{text "?"}'', while ``@{text "!"}'' are used most
- aggressively.
+ destruct rules, to be used with method @{method rule}, and similar
+ tools. Note that the latter will ignore rules declared with
+ ``@{text "?"}'', while ``@{text "!"}'' are used most aggressively.
The classical reasoner (see \secref{sec:classical}) introduces its
own variants of these attributes; use qualified names to access the
@@ -963,7 +946,7 @@
\begin{matharray}{l}
@{text "\<langle>using b\<^sub>1 \<dots> b\<^sub>k\<rangle>"}~~@{command "obtain"}~@{text "x\<^sub>1 \<dots> x\<^sub>m \<WHERE> a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n \<langle>proof\<rangle> \<equiv>"} \\[1ex]
\quad @{command "have"}~@{text "\<And>thesis. (\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
- \quad @{command "proof"}~@{text succeed} \\
+ \quad @{command "proof"}~@{method succeed} \\
\qquad @{command "fix"}~@{text thesis} \\
\qquad @{command "assume"}~@{text "that [Pure.intro?]: \<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis"} \\
\qquad @{command "then"}~@{command "show"}~@{text thesis} \\
--- a/doc-src/IsarRef/Thy/Quick_Reference.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Quick_Reference.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Quick_Reference
imports Main
begin
@@ -30,7 +28,7 @@
\begin{tabular}{rcl}
@{text "theory\<dash>stmt"} & = & @{command "theorem"}~@{text "name: props proof |"}~~@{command "definition"}~@{text "\<dots> | \<dots>"} \\[1ex]
- @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method stmt\<^sup>*"}~@{command "qed"}~@{text method} \\
+ @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\
& @{text "|"} & @{text "prfx\<^sup>*"}~@{command "done"} \\[1ex]
@{text prfx} & = & @{command "apply"}~@{text method} \\
& @{text "|"} & @{command "using"}~@{text "facts"} \\
--- a/doc-src/IsarRef/Thy/ROOT-HOLCF.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT-HOLCF.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-(* $Id$ *)
-
set ThyOutput.source;
use "../../antiquote_setup.ML";
--- a/doc-src/IsarRef/Thy/ROOT-ZF.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT-ZF.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-(* $Id$ *)
-
set ThyOutput.source;
use "../../antiquote_setup.ML";
--- a/doc-src/IsarRef/Thy/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,18 +1,20 @@
-
-(* $Id$ *)
-
+set quick_and_dirty;
set ThyOutput.source;
use "../../antiquote_setup.ML";
-use_thy "Introduction";
-use_thy "Outer_Syntax";
-use_thy "Document_Preparation";
-use_thy "Spec";
-use_thy "Proof";
-use_thy "Inner_Syntax";
-use_thy "Misc";
-use_thy "Generic";
-use_thy "HOL_Specific";
-use_thy "Quick_Reference";
-use_thy "Symbols";
-use_thy "ML_Tactic";
+use_thys [
+ "Introduction",
+ "Framework",
+ "First_Order_Logic",
+ "Outer_Syntax",
+ "Document_Preparation",
+ "Spec",
+ "Proof",
+ "Inner_Syntax",
+ "Misc",
+ "Generic",
+ "HOL_Specific",
+ "Quick_Reference",
+ "Symbols",
+ "ML_Tactic"
+];
--- a/doc-src/IsarRef/Thy/Spec.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Spec.thy Wed Mar 04 10:45:52 2009 +0100
@@ -4,6 +4,24 @@
chapter {* Theory specifications *}
+text {*
+ The Isabelle/Isar theory format integrates specifications and
+ proofs, supporting interactive development with unlimited undo
+ operation. There is an integrated document preparation system (see
+ \chref{ch:document-prep}), for typesetting formal developments
+ together with informal text. The resulting hyper-linked PDF
+ documents can be used both for WWW presentation and printed copies.
+
+ The Isar proof language (see \chref{ch:proofs}) is embedded into the
+ theory language as a proper sub-language. Proof mode is entered by
+ stating some @{command theorem} or @{command lemma} at the theory
+ level, and left again with the final conclusion (e.g.\ via @{command
+ qed}). Some theory specification mechanisms also require a proof,
+ such as @{command typedef} in HOL, which demands non-emptiness of
+ the representing sets.
+*}
+
+
section {* Defining theories \label{sec:begin-thy} *}
text {*
@@ -106,9 +124,9 @@
@{command (global) "end"} has a different meaning: it concludes the
theory itself (\secref{sec:begin-thy}).
- \item @{text "(\<IN> c)"} given after any local theory command
- specifies an immediate target, e.g.\ ``@{command
- "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
+ \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any
+ local theory command specifies an immediate target, e.g.\
+ ``@{command "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
"theorem"}~@{text "(\<IN> c) \<dots>"}''. This works both in a local or
global theory context; the current target context will be suspended
for this command only. Note that ``@{text "(\<IN> -)"}'' will
@@ -1164,7 +1182,7 @@
\end{description}
- See @{"file" "~~/src/FOL/ex/IffOracle.thy"} for a worked example of
+ See @{"file" "~~/src/FOL/ex/Iff_Oracle.thy"} for a worked example of
defining a new primitive rule as oracle, and turning it into a proof
method.
*}
--- a/doc-src/IsarRef/Thy/Symbols.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/Symbols.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,10 +1,8 @@
-(* $Id$ *)
-
theory Symbols
imports Pure
begin
-chapter {* Standard Isabelle symbols \label{app:symbols} *}
+chapter {* Predefined Isabelle symbols \label{app:symbols} *}
text {*
Isabelle supports an infinite number of non-ASCII symbols, which are
--- a/doc-src/IsarRef/Thy/ZF_Specific.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/ZF_Specific.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory ZF_Specific
imports Main
begin
--- a/doc-src/IsarRef/Thy/document/Document_Preparation.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Document_Preparation.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Document{\isacharunderscore}Preparation}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/Thy/document/Generic.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Generic.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Generic}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/Thy/document/HOLCF_Specific.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/HOLCF_Specific.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{HOLCF{\isacharunderscore}Specific}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/Thy/document/HOL_Specific.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/HOL_Specific.tex Wed Mar 04 10:45:52 2009 +0100
@@ -779,6 +779,58 @@
\end{isamarkuptext}%
\isamarkuptrue%
%
+\isamarkupsection{Intuitionistic proof search%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+\begin{matharray}{rcl}
+ \indexdef{HOL}{method}{iprover}\hypertarget{method.HOL.iprover}{\hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}}} & : & \isa{method} \\
+ \end{matharray}
+
+ \begin{rail}
+ 'iprover' ('!' ?) (rulemod *)
+ ;
+ \end{rail}
+
+ The \hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}} method performs intuitionistic proof
+ search, depending on specifically declared rules from the context,
+ or given as explicit arguments. Chained facts are inserted into the
+ goal before commencing proof search; ``\hyperlink{method.HOL.iprover}{\mbox{\isa{iprover}}}\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' means to include the current \hyperlink{fact.prems}{\mbox{\isa{prems}}} as well.
+
+ Rules need to be classified as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}},
+ \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, or \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}; here the
+ ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' indicator refers to ``safe'' rules, which may be
+ applied aggressively (without considering back-tracking later).
+ Rules declared with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'' are ignored in proof search (the
+ single-step \hyperlink{method.rule}{\mbox{\isa{rule}}} method still observes these). An
+ explicit weight annotation may be given as well; otherwise the
+ number of rule premises will be taken into account here.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Coherent Logic%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+\begin{matharray}{rcl}
+ \indexdef{HOL}{method}{coherent}\hypertarget{method.HOL.coherent}{\hyperlink{method.HOL.coherent}{\mbox{\isa{coherent}}}} & : & \isa{method} \\
+ \end{matharray}
+
+ \begin{rail}
+ 'coherent' thmrefs?
+ ;
+ \end{rail}
+
+ The \hyperlink{method.HOL.coherent}{\mbox{\isa{coherent}}} method solves problems of
+ \emph{Coherent Logic} \cite{Bezem-Coquand:2005}, which covers
+ applications in confluence theory, lattice theory and projective
+ geometry. See \hyperlink{file.~~/src/HOL/ex/Coherent.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}HOL{\isacharslash}ex{\isacharslash}Coherent{\isachardot}thy}}}} for some
+ examples.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
\isamarkupsection{Invoking automated reasoning tools -- The Sledgehammer%
}
\isamarkuptrue%
--- a/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Inner{\isacharunderscore}Syntax}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -120,19 +118,19 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{show\_types}\verb|show_types: bool ref| & default \verb|false| \\
- \indexml{show\_sorts}\verb|show_sorts: bool ref| & default \verb|false| \\
- \indexml{show\_consts}\verb|show_consts: bool ref| & default \verb|false| \\
- \indexml{long\_names}\verb|long_names: bool ref| & default \verb|false| \\
- \indexml{short\_names}\verb|short_names: bool ref| & default \verb|false| \\
- \indexml{unique\_names}\verb|unique_names: bool ref| & default \verb|true| \\
- \indexml{show\_brackets}\verb|show_brackets: bool ref| & default \verb|false| \\
- \indexml{eta\_contract}\verb|eta_contract: bool ref| & default \verb|true| \\
- \indexml{goals\_limit}\verb|goals_limit: int ref| & default \verb|10| \\
- \indexml{Proof.show\_main\_goal}\verb|Proof.show_main_goal: bool ref| & default \verb|false| \\
- \indexml{show\_hyps}\verb|show_hyps: bool ref| & default \verb|false| \\
- \indexml{show\_tags}\verb|show_tags: bool ref| & default \verb|false| \\
- \indexml{show\_question\_marks}\verb|show_question_marks: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{show\_types}\verb|show_types: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_sorts}\verb|show_sorts: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_consts}\verb|show_consts: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{long\_names}\verb|long_names: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{short\_names}\verb|short_names: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{unique\_names}\verb|unique_names: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{show\_brackets}\verb|show_brackets: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{eta\_contract}\verb|eta_contract: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{goals\_limit}\verb|goals_limit: int ref| & default \verb|10| \\
+ \indexdef{}{ML}{Proof.show\_main\_goal}\verb|Proof.show_main_goal: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_hyps}\verb|show_hyps: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_tags}\verb|show_tags: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_question\_marks}\verb|show_question_marks: bool ref| & default \verb|true| \\
\end{mldecls}
These global ML variables control the detail of information that is
@@ -233,9 +231,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{Pretty.setdepth}\verb|Pretty.setdepth: int -> unit| \\
- \indexml{Pretty.setmargin}\verb|Pretty.setmargin: int -> unit| \\
- \indexml{print\_depth}\verb|print_depth: int -> unit| \\
+ \indexdef{}{ML}{Pretty.setdepth}\verb|Pretty.setdepth: int -> unit| \\
+ \indexdef{}{ML}{Pretty.setmargin}\verb|Pretty.setmargin: int -> unit| \\
+ \indexdef{}{ML}{print\_depth}\verb|print_depth: int -> unit| \\
\end{mldecls}
These ML functions set limits for pretty printed text.
@@ -392,7 +390,7 @@
\end{matharray}
\begin{rail}
- ('notation' | 'no\_notation') target? mode? (nameref structmixfix + 'and')
+ ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and')
;
\end{rail}
@@ -551,13 +549,15 @@
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}tid\ \ {\isacharbar}\ \ tvar\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|_| \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}tid{\isachardoublequote}} \verb|::| \isa{{\isachardoublequote}sort\ \ {\isacharbar}\ \ tvar\ \ {\isachardoublequote}}\verb|::| \isa{{\isachardoublequote}sort\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|_| \verb|::| \isa{{\isachardoublequote}sort{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ id\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{id} \\
- & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}longid\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{longid} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}longid\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ longid{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{longid} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isacharparenright}{\isachardoublequote}} \verb|=>| \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isacharparenright}{\isachardoublequote}} \isa{{\isachardoublequote}{\isasymRightarrow}{\isachardoublequote}} \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|[| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|]| \verb|=>| \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|[| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|]| \isa{{\isachardoublequote}{\isasymRightarrow}{\isachardoublequote}} \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\\\
- \indexdef{inner}{syntax}{sort}\hypertarget{syntax.inner.sort}{\hyperlink{syntax.inner.sort}{\mbox{\isa{sort}}}} & = & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{}|\isa{{\isachardoublequote}\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|}| \\
+ \indexdef{inner}{syntax}{sort}\hypertarget{syntax.inner.sort}{\hyperlink{syntax.inner.sort}{\mbox{\isa{sort}}}} & = & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{}| \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|{| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|}| \\
\end{supertabular}
\end{center}
--- a/doc-src/IsarRef/Thy/document/Introduction.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Introduction.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Introduction}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -32,27 +30,27 @@
The \emph{Isabelle} system essentially provides a generic
infrastructure for building deductive systems (programmed in
Standard ML), with a special focus on interactive theorem proving in
- higher-order logics. In the olden days even end-users would refer
- to certain ML functions (goal commands, tactics, tacticals etc.) to
- pursue their everyday theorem proving tasks
- \cite{isabelle-intro,isabelle-ref}.
+ higher-order logics. Many years ago, even end-users would refer to
+ certain ML functions (goal commands, tactics, tacticals etc.) to
+ pursue their everyday theorem proving tasks.
In contrast \emph{Isar} provides an interpreted language environment
of its own, which has been specifically tailored for the needs of
theory and proof development. Compared to raw ML, the Isabelle/Isar
top-level provides a more robust and comfortable development
- platform, with proper support for theory development graphs,
- single-step transactions with unlimited undo, etc. The
- Isabelle/Isar version of the \emph{Proof~General} user interface
- \cite{proofgeneral,Aspinall:TACAS:2000} provides an adequate
- front-end for interactive theory and proof development in this
- advanced theorem proving environment.
+ platform, with proper support for theory development graphs, managed
+ transactions with unlimited undo etc. The Isabelle/Isar version of
+ the \emph{Proof~General} user interface
+ \cite{proofgeneral,Aspinall:TACAS:2000} provides a decent front-end
+ for interactive theory and proof development in this advanced
+ theorem proving environment, even though it is somewhat biased
+ towards old-style proof scripts.
\medskip Apart from the technical advances over bare-bones ML
programming, the main purpose of the Isar language is to provide a
conceptually different view on machine-checked proofs
- \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. ``Isar'' stands for
- ``Intelligible semi-automated reasoning''. Drawing from both the
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. \emph{Isar} stands for
+ \emph{Intelligible semi-automated reasoning}. Drawing from both the
traditions of informal mathematical proof texts and high-level
programming languages, Isar offers a versatile environment for
structured formal proof documents. Thus properly written Isar
@@ -67,12 +65,12 @@
Despite its grand design of structured proof texts, Isar is able to
assimilate the old tactical style as an ``improper'' sub-language.
This provides an easy upgrade path for existing tactic scripts, as
- well as additional means for interactive experimentation and
- debugging of structured proofs. Isabelle/Isar supports a broad
- range of proof styles, both readable and unreadable ones.
+ well as some means for interactive experimentation and debugging of
+ structured proofs. Isabelle/Isar supports a broad range of proof
+ styles, both readable and unreadable ones.
- \medskip The Isabelle/Isar framework \cite{Wenzel:2006:Festschrift}
- is generic and should work reasonably well for any Isabelle
+ \medskip The generic Isabelle/Isar framework (see
+ \chref{ch:isar-framework}) works reasonably well for any Isabelle
object-logic that conforms to the natural deduction view of the
Isabelle/Pure framework. Specific language elements introduced by
the major object-logics are described in \chref{ch:hol}
@@ -92,207 +90,6 @@
\end{isamarkuptext}%
\isamarkuptrue%
%
-\isamarkupsection{User interfaces%
-}
-\isamarkuptrue%
-%
-\isamarkupsubsection{Terminal sessions%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The Isabelle \texttt{tty} tool provides a very interface for running
- the Isar interaction loop, with some support for command line
- editing. For example:
-\begin{ttbox}
-isabelle tty\medskip
-{\out Welcome to Isabelle/HOL (Isabelle2008)}\medskip
-theory Foo imports Main begin;
-definition foo :: nat where "foo == 1";
-lemma "0 < foo" by (simp add: foo_def);
-end;
-\end{ttbox}
-
- Any Isabelle/Isar command may be retracted by \hyperlink{command.undo}{\mbox{\isa{\isacommand{undo}}}}.
- See the Isabelle/Isar Quick Reference (\appref{ap:refcard}) for a
- comprehensive overview of available commands and other language
- elements.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Emacs Proof General%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Plain TTY-based interaction as above used to be quite feasible with
- traditional tactic based theorem proving, but developing Isar
- documents really demands some better user-interface support. The
- Proof~General environment by David Aspinall
- \cite{proofgeneral,Aspinall:TACAS:2000} offers a generic Emacs
- interface for interactive theorem provers that organizes all the
- cut-and-paste and forward-backward walk through the text in a very
- neat way. In Isabelle/Isar, the current position within a partial
- proof document is equally important than the actual proof state.
- Thus Proof~General provides the canonical working environment for
- Isabelle/Isar, both for getting acquainted (e.g.\ by replaying
- existing Isar documents) and for production work.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsubsection{Proof~General as default Isabelle interface%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The Isabelle interface wrapper script provides an easy way to invoke
- Proof~General (including XEmacs or GNU Emacs). The default
- configuration of Isabelle is smart enough to detect the
- Proof~General distribution in several canonical places (e.g.\
- \verb|$ISABELLE_HOME/contrib/ProofGeneral|). Thus the
- capital \verb|Isabelle| executable would already refer to the
- \verb|ProofGeneral/isar| interface without further ado. The
- Isabelle interface script provides several options; pass \verb|-?| to see its usage.
-
- With the proper Isabelle interface setup, Isar documents may now be edited by
- visiting appropriate theory files, e.g.\
-\begin{ttbox}
-Isabelle \({\langle}isabellehome{\rangle}\)/src/HOL/Isar_examples/Summation.thy
-\end{ttbox}
- Beginners may note the tool bar for navigating forward and backward
- through the text (this depends on the local Emacs installation).
- Consult the Proof~General documentation \cite{proofgeneral} for
- further basic command sequences, in particular ``\verb|C-c C-return|''
- and ``\verb|C-c u|''.
-
- \medskip Proof~General may be also configured manually by giving
- Isabelle settings like this (see also \cite{isabelle-sys}):
-
-\begin{ttbox}
-ISABELLE_INTERFACE=\$ISABELLE_HOME/contrib/ProofGeneral/isar/interface
-PROOFGENERAL_OPTIONS=""
-\end{ttbox}
- You may have to change \verb|$ISABELLE_HOME/contrib/ProofGeneral| to the actual installation
- directory of Proof~General.
-
- \medskip Apart from the Isabelle command line, defaults for
- interface options may be given by the \verb|PROOFGENERAL_OPTIONS|
- setting. For example, the Emacs executable to be used may be
- configured in Isabelle's settings like this:
-\begin{ttbox}
-PROOFGENERAL_OPTIONS="-p xemacs-mule"
-\end{ttbox}
-
- Occasionally, a user's \verb|~/.emacs| file contains code
- that is incompatible with the (X)Emacs version used by
- Proof~General, causing the interface startup to fail prematurely.
- Here the \verb|-u false| option helps to get the interface
- process up and running. Note that additional Lisp customization
- code may reside in \verb|proofgeneral-settings.el| of
- \verb|$ISABELLE_HOME/etc| or \verb|$ISABELLE_HOME_USER/etc|.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsubsection{The X-Symbol package%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Proof~General incorporates a version of the Emacs X-Symbol package
- \cite{x-symbol}, which handles proper mathematical symbols displayed
- on screen. Pass option \verb|-x true| to the Isabelle
- interface script, or check the appropriate Proof~General menu
- setting by hand. The main challenge of getting X-Symbol to work
- properly is the underlying (semi-automated) X11 font setup.
-
- \medskip Using proper mathematical symbols in Isabelle theories can
- be very convenient for readability of large formulas. On the other
- hand, the plain ASCII sources easily become somewhat unintelligible.
- For example, \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}} would appear as \verb|\<Longrightarrow>| according
- the default set of Isabelle symbols. Nevertheless, the Isabelle
- document preparation system (see \chref{ch:document-prep}) will be
- happy to print non-ASCII symbols properly. It is even possible to
- invent additional notation beyond the display capabilities of Emacs
- and X-Symbol.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Isabelle/Isar theories%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Isabelle/Isar offers the following main improvements over classic
- Isabelle.
-
- \begin{enumerate}
-
- \item A \emph{theory format} that integrates specifications and
- proofs, supporting interactive development and unlimited undo
- operation.
-
- \item A \emph{formal proof document language} designed to support
- intelligible semi-automated reasoning. Instead of putting together
- unreadable tactic scripts, the author is enabled to express the
- reasoning in way that is close to usual mathematical practice. The
- old tactical style has been assimilated as ``improper'' language
- elements.
-
- \item A simple document preparation system, for typesetting formal
- developments together with informal text. The resulting
- hyper-linked PDF documents are equally well suited for WWW
- presentation and as printed copies.
-
- \end{enumerate}
-
- The Isar proof language is embedded into the new theory format as a
- proper sub-language. Proof mode is entered by stating some
- \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}} or \hyperlink{command.lemma}{\mbox{\isa{\isacommand{lemma}}}} at the theory level, and
- left again with the final conclusion (e.g.\ via \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}).
- A few theory specification mechanisms also require some proof, such
- as HOL's \hyperlink{command.typedef}{\mbox{\isa{\isacommand{typedef}}}} which demands non-emptiness of the
- representing sets.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{How to write Isar proofs anyway? \label{sec:isar-howto}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-This is one of the key questions, of course. First of all, the
- tactic script emulation of Isabelle/Isar essentially provides a
- clarified version of the very same unstructured proof style of
- classic Isabelle. Old-time users should quickly become acquainted
- with that (slightly degenerative) view of Isar.
-
- Writing \emph{proper} Isar proof texts targeted at human readers is
- quite different, though. Experienced users of the unstructured
- style may even have to unlearn some of their habits to master proof
- composition in Isar. In contrast, new users with less experience in
- old-style tactical proving, but a good understanding of mathematical
- proof in general, often get started easier.
-
- \medskip The present text really is only a reference manual on
- Isabelle/Isar, not a tutorial. Nevertheless, we will attempt to
- give some clues of how the concepts introduced here may be put into
- practice. Especially note that \appref{ap:refcard} provides a quick
- reference card of the most common Isabelle/Isar language elements.
-
- Further issues concerning the Isar concepts are covered in the
- literature
- \cite{Wenzel:1999:TPHOL,Wiedijk:2000:MV,Bauer-Wenzel:2000:HB,Bauer-Wenzel:2001}.
- The author's PhD thesis \cite{Wenzel-PhD} presently provides the
- most complete exposition of Isar foundations, techniques, and
- applications. A number of example applications are distributed with
- Isabelle, and available via the Isabelle WWW library (e.g.\
- \url{http://isabelle.in.tum.de/library/}). The ``Archive of Formal
- Proofs'' \url{http://afp.sourceforge.net/} also provides plenty of
- examples, both in proper Isar proof style and unstructured tactic
- scripts.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
\isadelimtheory
%
\endisadelimtheory
--- a/doc-src/IsarRef/Thy/document/ML_Tactic.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/ML_Tactic.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{ML{\isacharunderscore}Tactic}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/Thy/document/Misc.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Misc.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Misc}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/Thy/document/Outer_Syntax.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Outer_Syntax.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Outer{\isacharunderscore}Syntax}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -185,10 +183,10 @@
Isabelle as \verb|\<forall>|. There are infinitely many Isabelle
symbols like this, although proper presentation is left to front-end
tools such as {\LaTeX} or Proof~General with the X-Symbol package.
- A list of standard Isabelle symbols that work well with these tools
- is given in \appref{app:symbols}. Note that \verb|\<lambda>| does
- not belong to the \isa{letter} category, since it is already used
- differently in the Pure term language.%
+ A list of predefined Isabelle symbols that work well with these
+ tools is given in \appref{app:symbols}. Note that \verb|\<lambda>|
+ does not belong to the \isa{letter} category, since it is already
+ used differently in the Pure term language.%
\end{isamarkuptext}%
\isamarkuptrue%
%
--- a/doc-src/IsarRef/Thy/document/Proof.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Proof.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Proof}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -20,7 +18,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Proofs%
+\isamarkupchapter{Proofs \label{ch:proofs}%
}
\isamarkuptrue%
%
@@ -28,8 +26,8 @@
Proof commands perform transitions of Isar/VM machine
configurations, which are block-structured, consisting of a stack of
nodes with three main components: logical proof context, current
- facts, and open goals. Isar/VM transitions are \emph{typed}
- according to the following three different modes of operation:
+ facts, and open goals. Isar/VM transitions are typed according to
+ the following three different modes of operation:
\begin{description}
@@ -49,13 +47,17 @@
\end{description}
- The proof mode indicator may be read as a verb telling the writer
- what kind of operation may be performed next. The corresponding
- typings of proof commands restricts the shape of well-formed proof
- texts to particular command sequences. So dynamic arrangements of
- commands eventually turn out as static texts of a certain structure.
- \Appref{ap:refcard} gives a simplified grammar of the overall
- (extensible) language emerging that way.%
+ The proof mode indicator may be understood as an instruction to the
+ writer, telling what kind of operation may be performed next. The
+ corresponding typings of proof commands restricts the shape of
+ well-formed proof texts to particular command sequences. So dynamic
+ arrangements of commands eventually turn out as static texts of a
+ certain structure.
+
+ \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+ language emerging that way from the different types of proof
+ commands. The main ideas of the overall Isar framework are
+ explained in \chref{ch:isar-framework}.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -691,7 +693,6 @@
\indexdef{}{method}{assumption}\hypertarget{method.assumption}{\hyperlink{method.assumption}{\mbox{\isa{assumption}}}} & : & \isa{method} \\
\indexdef{}{method}{this}\hypertarget{method.this}{\hyperlink{method.this}{\mbox{\isa{this}}}} & : & \isa{method} \\
\indexdef{}{method}{rule}\hypertarget{method.rule}{\hyperlink{method.rule}{\mbox{\isa{rule}}}} & : & \isa{method} \\
- \indexdef{}{method}{iprover}\hypertarget{method.iprover}{\hyperlink{method.iprover}{\mbox{\isa{iprover}}}} & : & \isa{method} \\[0.5ex]
\indexdef{Pure}{attribute}{intro}\hypertarget{attribute.Pure.intro}{\hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}} & : & \isa{attribute} \\
\indexdef{Pure}{attribute}{elim}\hypertarget{attribute.Pure.elim}{\hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}} & : & \isa{attribute} \\
\indexdef{Pure}{attribute}{dest}\hypertarget{attribute.Pure.dest}{\hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}} & : & \isa{attribute} \\
@@ -706,8 +707,6 @@
;
'rule' thmrefs?
;
- 'iprover' ('!' ?) (rulemod *)
- ;
rulemod: ('intro' | 'elim' | 'dest') ((('!' | () | '?') nat?) | 'del') ':' thmrefs
;
('intro' | 'elim' | 'dest') ('!' | () | '?') nat?
@@ -762,26 +761,11 @@
default behavior of \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}} and ``\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}''
(double-dot) steps (see \secref{sec:proof-steps}).
- \item \hyperlink{method.iprover}{\mbox{\isa{iprover}}} performs intuitionistic proof search,
- depending on specifically declared rules from the context, or given
- as explicit arguments. Chained facts are inserted into the goal
- before commencing proof search; ``\hyperlink{method.iprover}{\mbox{\isa{iprover}}}\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}''
- means to include the current \hyperlink{fact.prems}{\mbox{\isa{prems}}} as well.
-
- Rules need to be classified as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}},
- \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, or \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}; here the
- ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' indicator refers to ``safe'' rules, which may be
- applied aggressively (without considering back-tracking later).
- Rules declared with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'' are ignored in proof search (the
- single-step \hyperlink{method.rule}{\mbox{\isa{rule}}} method still observes these). An
- explicit weight annotation may be given as well; otherwise the
- number of rule premises will be taken into account here.
-
\item \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}, \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, and
\hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}} declare introduction, elimination, and
- destruct rules, to be used with the \hyperlink{method.rule}{\mbox{\isa{rule}}} and \hyperlink{method.iprover}{\mbox{\isa{iprover}}} methods. Note that the latter will ignore rules declared
- with ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'', while ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' are used most
- aggressively.
+ destruct rules, to be used with method \hyperlink{method.rule}{\mbox{\isa{rule}}}, and similar
+ tools. Note that the latter will ignore rules declared with
+ ``\isa{{\isachardoublequote}{\isacharquery}{\isachardoublequote}}'', while ``\isa{{\isachardoublequote}{\isacharbang}{\isachardoublequote}}'' are used most aggressively.
The classical reasoner (see \secref{sec:classical}) introduces its
own variants of these attributes; use qualified names to access the
@@ -966,7 +950,7 @@
\begin{matharray}{l}
\isa{{\isachardoublequote}{\isasymlangle}using\ b\isactrlsub {\isadigit{1}}\ {\isasymdots}\ b\isactrlsub k{\isasymrangle}{\isachardoublequote}}~~\hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}~\isa{{\isachardoublequote}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymWHERE}\ a{\isacharcolon}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ \ {\isasymlangle}proof{\isasymrangle}\ {\isasymequiv}{\isachardoublequote}} \\[1ex]
\quad \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}{\isasymAnd}thesis{\isachardot}\ {\isacharparenleft}{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ {\isasymLongrightarrow}\ thesis{\isacharparenright}\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\
- \quad \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{succeed} \\
+ \quad \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\hyperlink{method.succeed}{\mbox{\isa{succeed}}} \\
\qquad \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{thesis} \\
\qquad \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}that\ {\isacharbrackleft}Pure{\isachardot}intro{\isacharquery}{\isacharbrackright}{\isacharcolon}\ {\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\
\qquad \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}}~\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}~\isa{thesis} \\
--- a/doc-src/IsarRef/Thy/document/Quick_Reference.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Quick_Reference.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Quick{\isacharunderscore}Reference}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -52,7 +50,7 @@
\begin{tabular}{rcl}
\isa{{\isachardoublequote}theory{\isasymdash}stmt{\isachardoublequote}} & = & \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}name{\isacharcolon}\ props\ proof\ \ {\isacharbar}{\isachardoublequote}}~~\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isasymdots}\ \ {\isacharbar}\ \ {\isasymdots}{\isachardoublequote}} \\[1ex]
- \isa{{\isachardoublequote}proof{\isachardoublequote}} & = & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\ stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{method} \\
+ \isa{{\isachardoublequote}proof{\isachardoublequote}} & = & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}\ stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.done}{\mbox{\isa{\isacommand{done}}}} \\[1ex]
\isa{prfx} & = & \hyperlink{command.apply}{\mbox{\isa{\isacommand{apply}}}}~\isa{method} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.using}{\mbox{\isa{\isacommand{using}}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} \\
--- a/doc-src/IsarRef/Thy/document/Spec.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Spec.tex Wed Mar 04 10:45:52 2009 +0100
@@ -22,6 +22,23 @@
}
\isamarkuptrue%
%
+\begin{isamarkuptext}%
+The Isabelle/Isar theory format integrates specifications and
+ proofs, supporting interactive development with unlimited undo
+ operation. There is an integrated document preparation system (see
+ \chref{ch:document-prep}), for typesetting formal developments
+ together with informal text. The resulting hyper-linked PDF
+ documents can be used both for WWW presentation and printed copies.
+
+ The Isar proof language (see \chref{ch:proofs}) is embedded into the
+ theory language as a proper sub-language. Proof mode is entered by
+ stating some \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}} or \hyperlink{command.lemma}{\mbox{\isa{\isacommand{lemma}}}} at the theory
+ level, and left again with the final conclusion (e.g.\ via \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}). Some theory specification mechanisms also require a proof,
+ such as \hyperlink{command.typedef}{\mbox{\isa{\isacommand{typedef}}}} in HOL, which demands non-emptiness of
+ the representing sets.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
\isamarkupsection{Defining theories \label{sec:begin-thy}%
}
\isamarkuptrue%
@@ -127,8 +144,9 @@
\hyperlink{command.global.end}{\mbox{\isa{\isacommand{end}}}} has a different meaning: it concludes the
theory itself (\secref{sec:begin-thy}).
- \item \isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}{\isachardoublequote}} given after any local theory command
- specifies an immediate target, e.g.\ ``\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}'' or ``\hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}''. This works both in a local or
+ \item \isa{{\isachardoublequote}{\isacharparenleft}{\isachardoublequote}}\indexdef{}{keyword}{in}\hypertarget{keyword.in}{\hyperlink{keyword.in}{\mbox{\isa{\isakeyword{in}}}}}~\isa{{\isachardoublequote}c{\isacharparenright}{\isachardoublequote}} given after any
+ local theory command specifies an immediate target, e.g.\
+ ``\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}'' or ``\hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}''. This works both in a local or
global theory context; the current target context will be suspended
for this command only. Note that ``\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ {\isacharminus}{\isacharparenright}{\isachardoublequote}}'' will
always produce a global result independently of the current target
@@ -792,8 +810,8 @@
\end{matharray}
\begin{mldecls}
- \indexml{bind\_thms}\verb|bind_thms: string * thm list -> unit| \\
- \indexml{bind\_thm}\verb|bind_thm: string * thm -> unit| \\
+ \indexdef{}{ML}{bind\_thms}\verb|bind_thms: string * thm list -> unit| \\
+ \indexdef{}{ML}{bind\_thm}\verb|bind_thm: string * thm -> unit| \\
\end{mldecls}
\begin{rail}
@@ -1178,7 +1196,7 @@
\end{description}
- See \hyperlink{file.~~/src/FOL/ex/IffOracle.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}FOL{\isacharslash}ex{\isacharslash}IffOracle{\isachardot}thy}}}} for a worked example of
+ See \hyperlink{file.~~/src/FOL/ex/Iff-Oracle.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}FOL{\isacharslash}ex{\isacharslash}Iff{\isacharunderscore}Oracle{\isachardot}thy}}}} for a worked example of
defining a new primitive rule as oracle, and turning it into a proof
method.%
\end{isamarkuptext}%
--- a/doc-src/IsarRef/Thy/document/Symbols.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Symbols.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Symbols}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -20,7 +18,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Standard Isabelle symbols \label{app:symbols}%
+\isamarkupchapter{Predefined Isabelle symbols \label{app:symbols}%
}
\isamarkuptrue%
%
--- a/doc-src/IsarRef/Thy/document/ZF_Specific.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/ZF_Specific.tex Wed Mar 04 10:45:52 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{ZF{\isacharunderscore}Specific}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
--- a/doc-src/IsarRef/isar-ref.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/isar-ref.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\documentclass[12pt,a4paper,fleqn]{report}
\usepackage{amssymb}
\usepackage[greek,english]{babel}
@@ -27,12 +24,13 @@
With Contributions by
Clemens Ballarin,
Stefan Berghofer, \\
+ Timothy Bourke
Lucas Dixon,
- Florian Haftmann,
- Gerwin Klein, \\
+ Florian Haftmann, \\
+ Gerwin Klein,
Alexander Krauss,
- Tobias Nipkow,
- David von Oheimb, \\
+ Tobias Nipkow, \\
+ David von Oheimb,
Larry Paulson,
and Sebastian Skalberg
}
@@ -82,7 +80,11 @@
\pagenumbering{roman} \tableofcontents \clearfirst
+\part{Basic Concepts}
\input{Thy/document/Introduction.tex}
+\input{Thy/document/Framework.tex}
+\input{Thy/document/First_Order_Logic.tex}
+\part{General Language Elements}
\input{Thy/document/Outer_Syntax.tex}
\input{Thy/document/Document_Preparation.tex}
\input{Thy/document/Spec.tex}
@@ -90,10 +92,12 @@
\input{Thy/document/Inner_Syntax.tex}
\input{Thy/document/Misc.tex}
\input{Thy/document/Generic.tex}
+\part{Object-Logics}
\input{Thy/document/HOL_Specific.tex}
\input{Thy/document/HOLCF_Specific.tex}
\input{Thy/document/ZF_Specific.tex}
+\part{Appendix}
\appendix
\input{Thy/document/Quick_Reference.tex}
\let\int\intorig
@@ -101,7 +105,7 @@
\input{Thy/document/ML_Tactic.tex}
\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
+ \bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
--- a/doc-src/IsarRef/style.sty Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/IsarRef/style.sty Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
%% toc
\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
@@ -18,12 +15,17 @@
%% ML
\newenvironment{mldecls}{\par\noindent\begingroup\def\isanewline{\\}\begin{tabular}{ll}}{\end{tabular}\medskip\endgroup}
-\newcommand{\indexml}[1]{\index{#1 (ML value)|bold}}
+
+%% Isar
+\newcommand{\isasymBBAR}{{\,\newdimen{\tmpheight}\settoheight\tmpheight{\isacharbar}\rule{1pt}{\tmpheight}\,}}
+\isafoldtag{noproof}\def\isafoldnoproof{~\isafold{proof}}
%% math
+\newcommand{\isasymstrut}{\isamath{\mathstrut}}
+\newcommand{\isasymvartheta}{\isamath{\,\theta}}
\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
\renewcommand{\isadigit}[1]{\isamath{#1}}
-
+\newcommand{\text}[1]{\mbox{#1}}
%% global style options
\pagestyle{headings}
--- a/doc-src/Ref/Makefile Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/Makefile Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-#
-# $Id$
-#
## targets
@@ -12,16 +9,15 @@
include ../Makefile.in
NAME = ref
-FILES = ref.tex introduction.tex goals.tex tactic.tex tctical.tex \
- thm.tex theories.tex defining.tex syntax.tex substitution.tex \
- simplifier.tex classical.tex theory-syntax.tex \
- ../rail.sty ../proof.sty ../iman.sty ../extra.sty ../ttbox.sty ../manual.bib
+FILES = ref.tex introduction.tex tactic.tex tctical.tex thm.tex \
+ theories.tex defining.tex syntax.tex substitution.tex \
+ simplifier.tex classical.tex ../proof.sty ../iman.sty \
+ ../extra.sty ../ttbox.sty ../manual.bib
dvi: $(NAME).dvi
$(NAME).dvi: $(FILES) isabelle.eps
$(LATEX) $(NAME)
- $(RAIL) $(NAME)
$(BIBTEX) $(NAME)
$(LATEX) $(NAME)
$(LATEX) $(NAME)
@@ -32,7 +28,6 @@
$(NAME).pdf: $(FILES) isabelle.pdf
$(PDFLATEX) $(NAME)
- $(RAIL) $(NAME)
$(BIBTEX) $(NAME)
$(PDFLATEX) $(NAME)
$(PDFLATEX) $(NAME)
--- a/doc-src/Ref/classical.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/classical.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{The Classical Reasoner}\label{chap:classical}
\index{classical reasoner|(}
\newcommand\ainfer[2]{\begin{array}{r@{\,}l}#2\\ \hline#1\end{array}}
@@ -28,29 +28,6 @@
be traced, and their components can be called directly; in this manner,
any proof can be viewed interactively.
-The simplest way to apply the classical reasoner (to subgoal~$i$) is to type
-\begin{ttbox}
-by (Blast_tac \(i\));
-\end{ttbox}
-This command quickly proves most simple formulas of the predicate calculus or
-set theory. To attempt to prove subgoals using a combination of
-rewriting and classical reasoning, try
-\begin{ttbox}
-auto(); \emph{\textrm{applies to all subgoals}}
-force i; \emph{\textrm{applies to one subgoal}}
-\end{ttbox}
-To do all obvious logical steps, even if they do not prove the
-subgoal, type one of the following:
-\begin{ttbox}
-by Safe_tac; \emph{\textrm{applies to all subgoals}}
-by (Clarify_tac \(i\)); \emph{\textrm{applies to one subgoal}}
-\end{ttbox}
-
-
-You need to know how the classical reasoner works in order to use it
-effectively. There are many tactics to choose from, including
-{\tt Fast_tac} and \texttt{Best_tac}.
-
We shall first discuss the underlying principles, then present the classical
reasoner. Finally, we shall see how to instantiate it for new logics. The
logics FOL, ZF, HOL and HOLCF have it already installed.
--- a/doc-src/Ref/defining.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/defining.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,376 +1,5 @@
-%% $Id$
+
\chapter{Defining Logics} \label{Defining-Logics}
-This chapter explains how to define new formal systems --- in particular,
-their concrete syntax. While Isabelle can be regarded as a theorem prover
-for set theory, higher-order logic or the sequent calculus, its
-distinguishing feature is support for the definition of new logics.
-
-Isabelle logics are hierarchies of theories, which are described and
-illustrated in
-\iflabelundefined{sec:defining-theories}{{\em Introduction to Isabelle}}%
-{\S\ref{sec:defining-theories}}. That material, together with the theory
-files provided in the examples directories, should suffice for all simple
-applications. The easiest way to define a new theory is by modifying a
-copy of an existing theory.
-
-This chapter documents the meta-logic syntax, mixfix declarations and
-pretty printing. The extended examples in \S\ref{sec:min_logics}
-demonstrate the logical aspects of the definition of theories.
-
-
-\section{Priority grammars} \label{sec:priority_grammars}
-\index{priority grammars|(}
-
-A context-free grammar contains a set of {\bf nonterminal symbols}, a set of
-{\bf terminal symbols} and a set of {\bf productions}\index{productions}.
-Productions have the form ${A=\gamma}$, where $A$ is a nonterminal and
-$\gamma$ is a string of terminals and nonterminals. One designated
-nonterminal is called the {\bf start symbol}. The language defined by the
-grammar consists of all strings of terminals that can be derived from the
-start symbol by applying productions as rewrite rules.
-
-The syntax of an Isabelle logic is specified by a {\bf priority
- grammar}.\index{priorities} Each nonterminal is decorated by an integer
-priority, as in~$A^{(p)}$. A nonterminal $A^{(p)}$ in a derivation may be
-rewritten using a production $A^{(q)} = \gamma$ only if~$p \leq q$. Any
-priority grammar can be translated into a normal context free grammar by
-introducing new nonterminals and productions.
-
-Formally, a set of context free productions $G$ induces a derivation
-relation $\longrightarrow@G$. Let $\alpha$ and $\beta$ denote strings of
-terminal or nonterminal symbols. Then
-\[ \alpha\, A^{(p)}\, \beta ~\longrightarrow@G~ \alpha\,\gamma\,\beta \]
-if and only if $G$ contains some production $A^{(q)}=\gamma$ for~$p \leq q$.
-
-The following simple grammar for arithmetic expressions demonstrates how
-binding power and associativity of operators can be enforced by priorities.
-\begin{center}
-\begin{tabular}{rclr}
- $A^{(9)}$ & = & {\tt0} \\
- $A^{(9)}$ & = & {\tt(} $A^{(0)}$ {\tt)} \\
- $A^{(0)}$ & = & $A^{(0)}$ {\tt+} $A^{(1)}$ \\
- $A^{(2)}$ & = & $A^{(3)}$ {\tt*} $A^{(2)}$ \\
- $A^{(3)}$ & = & {\tt-} $A^{(3)}$
-\end{tabular}
-\end{center}
-The choice of priorities determines that {\tt -} binds tighter than {\tt *},
-which binds tighter than {\tt +}. Furthermore {\tt +} associates to the
-left and {\tt *} to the right.
-
-For clarity, grammars obey these conventions:
-\begin{itemize}
-\item All priorities must lie between~0 and \ttindex{max_pri}, which is a
- some fixed integer. Sometimes {\tt max_pri} is written as $\infty$.
-\item Priority 0 on the right-hand side and priority \ttindex{max_pri} on
- the left-hand side may be omitted.
-\item The production $A^{(p)} = \alpha$ is written as $A = \alpha~(p)$; the
- priority of the left-hand side actually appears in a column on the far
- right.
-\item Alternatives are separated by~$|$.
-\item Repetition is indicated by dots~(\dots) in an informal but obvious
- way.
-\end{itemize}
-
-Using these conventions and assuming $\infty=9$, the grammar
-takes the form
-\begin{center}
-\begin{tabular}{rclc}
-$A$ & = & {\tt0} & \hspace*{4em} \\
- & $|$ & {\tt(} $A$ {\tt)} \\
- & $|$ & $A$ {\tt+} $A^{(1)}$ & (0) \\
- & $|$ & $A^{(3)}$ {\tt*} $A^{(2)}$ & (2) \\
- & $|$ & {\tt-} $A^{(3)}$ & (3)
-\end{tabular}
-\end{center}
-\index{priority grammars|)}
-
-
-\begin{figure}\small
-\begin{center}
-\begin{tabular}{rclc}
-$any$ &=& $prop$ ~~$|$~~ $logic$ \\\\
-$prop$ &=& {\tt(} $prop$ {\tt)} \\
- &$|$& $prop^{(4)}$ {\tt::} $type$ & (3) \\
- &$|$& {\tt PROP} $aprop$ \\
- &$|$& $any^{(3)}$ {\tt ==} $any^{(2)}$ & (2) \\
- &$|$& $any^{(3)}$ {\tt =?=} $any^{(2)}$ & (2) \\
- &$|$& $prop^{(2)}$ {\tt ==>} $prop^{(1)}$ & (1) \\
- &$|$& {\tt[|} $prop$ {\tt;} \dots {\tt;} $prop$ {\tt|]} {\tt==>} $prop^{(1)}$ & (1) \\
- &$|$& {\tt!!} $idts$ {\tt.} $prop$ & (0) \\
- &$|$& {\tt OFCLASS} {\tt(} $type$ {\tt,} $logic$ {\tt)} \\\\
-$aprop$ &=& $id$ ~~$|$~~ $longid$ ~~$|$~~ $var$
- ~~$|$~~ $logic^{(\infty)}$ {\tt(} $any$ {\tt,} \dots {\tt,} $any$ {\tt)} \\\\
-$logic$ &=& {\tt(} $logic$ {\tt)} \\
- &$|$& $logic^{(4)}$ {\tt::} $type$ & (3) \\
- &$|$& $id$ ~~$|$~~ $longid$ ~~$|$~~ $var$
- ~~$|$~~ $logic^{(\infty)}$ {\tt(} $any$ {\tt,} \dots {\tt,} $any$ {\tt)} \\
- &$|$& {\tt \%} $pttrns$ {\tt.} $any^{(3)}$ & (3) \\
- &$|$& {\tt TYPE} {\tt(} $type$ {\tt)} \\\\
-$idts$ &=& $idt$ ~~$|$~~ $idt^{(1)}$ $idts$ \\\\
-$idt$ &=& $id$ ~~$|$~~ {\tt(} $idt$ {\tt)} \\
- &$|$& $id$ {\tt ::} $type$ & (0) \\\\
-$pttrns$ &=& $pttrn$ ~~$|$~~ $pttrn^{(1)}$ $pttrns$ \\\\
-$pttrn$ &=& $idt$ \\\\
-$type$ &=& {\tt(} $type$ {\tt)} \\
- &$|$& $tid$ ~~$|$~~ $tvar$ ~~$|$~~ $tid$ {\tt::} $sort$
- ~~$|$~~ $tvar$ {\tt::} $sort$ \\
- &$|$& $id$ ~~$|$~~ $type^{(\infty)}$ $id$
- ~~$|$~~ {\tt(} $type$ {\tt,} \dots {\tt,} $type$ {\tt)} $id$ \\
- &$|$& $longid$ ~~$|$~~ $type^{(\infty)}$ $longid$
- ~~$|$~~ {\tt(} $type$ {\tt,} \dots {\tt,} $type$ {\tt)} $longid$ \\
- &$|$& $type^{(1)}$ {\tt =>} $type$ & (0) \\
- &$|$& {\tt[} $type$ {\tt,} \dots {\tt,} $type$ {\tt]} {\tt=>} $type$&(0) \\\\
-$sort$ &=& $id$ ~~$|$~~ $longid$ ~~$|$~~ {\tt\ttlbrace\ttrbrace} ~~$|$~~
- {\tt\ttlbrace} $id$ ~$|$~ $longid${\tt,}\dots{\tt,} $id$ ~$|$~$longid$ {\tt\ttrbrace}
-\end{tabular}
-\index{*PROP symbol}
-\index{*== symbol}\index{*=?= symbol}\index{*==> symbol}
-\index{*:: symbol}\index{*=> symbol}
-\index{sort constraints}
-%the index command: a percent is permitted, but braces must match!
-\index{%@{\tt\%} symbol}
-\index{{}@{\tt\ttlbrace} symbol}\index{{}@{\tt\ttrbrace} symbol}
-\index{*[ symbol}\index{*] symbol}
-\index{*"!"! symbol}
-\index{*"["| symbol}
-\index{*"|"] symbol}
-\end{center}
-\caption{Meta-logic syntax}\label{fig:pure_gram}
-\end{figure}
-
-
-\section{The Pure syntax} \label{sec:basic_syntax}
-\index{syntax!Pure|(}
-
-At the root of all object-logics lies the theory \thydx{Pure}. It
-contains, among many other things, the Pure syntax. An informal account of
-this basic syntax (types, terms and formulae) appears in
-\iflabelundefined{sec:forward}{{\em Introduction to Isabelle}}%
-{\S\ref{sec:forward}}. A more precise description using a priority grammar
-appears in Fig.\ts\ref{fig:pure_gram}. It defines the following
-nonterminals:
-\begin{ttdescription}
- \item[\ndxbold{any}] denotes any term.
-
- \item[\ndxbold{prop}] denotes terms of type {\tt prop}. These are formulae
- of the meta-logic. Note that user constants of result type {\tt prop}
- (i.e.\ $c :: \ldots \To prop$) should always provide concrete syntax.
- Otherwise atomic propositions with head $c$ may be printed incorrectly.
-
- \item[\ndxbold{aprop}] denotes atomic propositions.
-
-%% FIXME huh!?
-% These typically
-% include the judgement forms of the object-logic; its definition
-% introduces a meta-level predicate for each judgement form.
-
- \item[\ndxbold{logic}] denotes terms whose type belongs to class
- \cldx{logic}, excluding type \tydx{prop}.
-
- \item[\ndxbold{idts}] denotes a list of identifiers, possibly constrained
- by types.
-
- \item[\ndxbold{pttrn}, \ndxbold{pttrns}] denote patterns for
- abstraction, cases etc. Initially the same as $idt$ and $idts$,
- these are intended to be augmented by user extensions.
-
- \item[\ndxbold{type}] denotes types of the meta-logic.
-
- \item[\ndxbold{sort}] denotes meta-level sorts.
-\end{ttdescription}
-
-\begin{warn}
- In {\tt idts}, note that \verb|x::nat y| is parsed as \verb|x::(nat y)|,
- treating {\tt y} like a type constructor applied to {\tt nat}. The
- likely result is an error message. To avoid this interpretation, use
- parentheses and write \verb|(x::nat) y|.
- \index{type constraints}\index{*:: symbol}
-
- Similarly, \verb|x::nat y::nat| is parsed as \verb|x::(nat y::nat)| and
- yields an error. The correct form is \verb|(x::nat) (y::nat)|.
-\end{warn}
-
-\begin{warn}
- Type constraints bind very weakly. For example, \verb!x<y::nat! is normally
- parsed as \verb!(x<y)::nat!, unless \verb$<$ has priority of 3 or less, in
- which case the string is likely to be ambiguous. The correct form is
- \verb!x<(y::nat)!.
-\end{warn}
-
-\subsection{Logical types and default syntax}\label{logical-types}
-\index{lambda calc@$\lambda$-calculus}
-
-Isabelle's representation of mathematical languages is based on the
-simply typed $\lambda$-calculus. All logical types, namely those of
-class \cldx{logic}, are automatically equipped with a basic syntax of
-types, identifiers, variables, parentheses, $\lambda$-abstraction and
-application.
-\begin{warn}
- Isabelle combines the syntaxes for all types of class \cldx{logic} by
- mapping all those types to the single nonterminal $logic$. Thus all
- productions of $logic$, in particular $id$, $var$ etc, become available.
-\end{warn}
-
-
-\subsection{Lexical matters}
-The parser does not process input strings directly. It operates on token
-lists provided by Isabelle's \bfindex{lexer}. There are two kinds of
-tokens: \bfindex{delimiters} and \bfindex{name tokens}.
-
-\index{reserved words}
-Delimiters can be regarded as reserved words of the syntax. You can
-add new ones when extending theories. In Fig.\ts\ref{fig:pure_gram} they
-appear in typewriter font, for example {\tt ==}, {\tt =?=} and
-{\tt PROP}\@.
-
-Name tokens have a predefined syntax. The lexer distinguishes six disjoint
-classes of names: \rmindex{identifiers}, \rmindex{unknowns}, type
-identifiers\index{type identifiers}, type unknowns\index{type unknowns},
-\rmindex{numerals}, \rmindex{strings}. They are denoted by \ndxbold{id},
-\ndxbold{var}, \ndxbold{tid}, \ndxbold{tvar}, \ndxbold{num}, \ndxbold{xnum},
-\ndxbold{xstr}, respectively. Typical examples are {\tt x}, {\tt ?x7}, {\tt
- 'a}, {\tt ?'a3}, {\tt \#42}, {\tt ''foo bar''}. Here is the precise syntax:
-\begin{eqnarray*}
-id & = & letter\,quasiletter^* \\
-longid & = & id (\mbox{\tt .}id)^+ \\
-var & = & \mbox{\tt ?}id ~~|~~ \mbox{\tt ?}id\mbox{\tt .}nat \\
-tid & = & \mbox{\tt '}id \\
-tvar & = & \mbox{\tt ?}tid ~~|~~
- \mbox{\tt ?}tid\mbox{\tt .}nat \\
-num & = & nat ~~|~~ \mbox{\tt-}nat ~~|~~ \verb,0x,\,hex^+ ~~|~~ \verb,0b,\,bin^+ \\
-xnum & = & \mbox{\tt \#}num \\
-xstr & = & \mbox{\tt ''~\dots~\tt ''} \\[1ex]
-letter & = & latin ~|~ \verb,\<,latin\verb,>, ~|~ \verb,\<,latin\,latin\verb,>, ~|~ greek ~| \\
- & & \verb,\<^isub>, ~|~ \verb,\<^isup>, \\
-quasiletter & = & letter ~|~ digit ~|~ \verb,_, ~|~ \verb,', \\
-latin & = & \verb,a, ~|~ \dots ~|~ \verb,z, ~|~ \verb,A, ~|~ \dots ~|~ \verb,Z, \\
-digit & = & \verb,0, ~|~ \dots ~|~ \verb,9, \\
-nat & = & digit^+ \\
-bin & = & \verb,0, ~|~ \verb,1, \\
-hex & = & digit ~|~ \verb,a, ~|~ \dots ~|~ \verb,f, ~|~ \verb,A, ~|~ \dots ~|~ \verb,F, \\
-greek & = & \verb,\<alpha>, ~|~ \verb,\<beta>, ~|~ \verb,\<gamma>, ~|~ \verb,\<delta>, ~| \\
- & & \verb,\<epsilon>, ~|~ \verb,\<zeta>, ~|~ \verb,\<eta>, ~|~ \verb,\<theta>, ~| \\
- & & \verb,\<iota>, ~|~ \verb,\<kappa>, ~|~ \verb,\<mu>, ~|~ \verb,\<nu>, ~| \\
- & & \verb,\<xi>, ~|~ \verb,\<pi>, ~|~ \verb,\<rho>, ~|~ \verb,\<sigma>, ~| \\
- & & \verb,\<tau>, ~|~ \verb,\<upsilon>, ~|~ \verb,\<phi>, ~|~ \verb,\<psi>, ~| \\
- & & \verb,\<omega>, ~|~ \verb,\<Gamma>, ~|~ \verb,\<Delta>, ~|~ \verb,\<Theta>, ~| \\
- & & \verb,\<Lambda>, ~|~ \verb,\<Xi>, ~|~ \verb,\<Pi>, ~|~ \verb,\<Sigma>, ~| \\
- & & \verb,\<Upsilon>, ~|~ \verb,\<Phi>, ~|~ \verb,\<Psi>, ~|~ \verb,\<Omega>, \\
-\end{eqnarray*}
-The lexer repeatedly takes the longest prefix of the input string that
-forms a valid token. A maximal prefix that is both a delimiter and a
-name is treated as a delimiter. Spaces, tabs, newlines and formfeeds
-are separators; they never occur within tokens, except those of class
-$xstr$.
-
-\medskip
-Delimiters need not be separated by white space. For example, if {\tt -}
-is a delimiter but {\tt --} is not, then the string {\tt --} is treated as
-two consecutive occurrences of the token~{\tt -}. In contrast, \ML\
-treats {\tt --} as a single symbolic name. The consequence of Isabelle's
-more liberal scheme is that the same string may be parsed in different ways
-after extending the syntax: after adding {\tt --} as a delimiter, the input
-{\tt --} is treated as a single token.
-
-A \ndxbold{var} or \ndxbold{tvar} describes an unknown, which is internally
-a pair of base name and index (\ML\ type \mltydx{indexname}). These
-components are either separated by a dot as in {\tt ?x.1} or {\tt ?x7.3} or
-run together as in {\tt ?x1}. The latter form is possible if the base name
-does not end with digits. If the index is 0, it may be dropped altogether:
-{\tt ?x} abbreviates both {\tt ?x0} and {\tt ?x.0}.
-
-Tokens of class $num$, $xnum$ or $xstr$ are not used by the meta-logic.
-Object-logics may provide numerals and string constants by adding appropriate
-productions and translation functions.
-
-\medskip
-Although name tokens are returned from the lexer rather than the parser, it
-is more logical to regard them as nonterminals. Delimiters, however, are
-terminals; they are just syntactic sugar and contribute nothing to the
-abstract syntax tree.
-
-
-\subsection{*Inspecting the syntax} \label{pg:print_syn}
-\begin{ttbox}
-syn_of : theory -> Syntax.syntax
-print_syntax : theory -> unit
-Syntax.print_syntax : Syntax.syntax -> unit
-Syntax.print_gram : Syntax.syntax -> unit
-Syntax.print_trans : Syntax.syntax -> unit
-\end{ttbox}
-The abstract type \mltydx{Syntax.syntax} allows manipulation of syntaxes
-in \ML. You can display values of this type by calling the following
-functions:
-\begin{ttdescription}
-\item[\ttindexbold{syn_of} {\it thy}] returns the syntax of the Isabelle
- theory~{\it thy} as an \ML\ value.
-
-\item[\ttindexbold{print_syntax} $thy$] uses {\tt Syntax.print_syntax}
- to display the syntax part of theory $thy$.
-
-\item[\ttindexbold{Syntax.print_syntax} {\it syn}] shows virtually all
- information contained in the syntax {\it syn}. The displayed output can
- be large. The following two functions are more selective.
-
-\item[\ttindexbold{Syntax.print_gram} {\it syn}] shows the grammar part
- of~{\it syn}, namely the lexicon, logical types and productions. These are
- discussed below.
-
-\item[\ttindexbold{Syntax.print_trans} {\it syn}] shows the translation
- part of~{\it syn}, namely the constants, parse/print macros and
- parse/print translations.
-\end{ttdescription}
-
-The output of the above print functions is divided into labelled sections.
-The grammar is represented by {\tt lexicon}, {\tt logtypes} and {\tt prods}.
-The rest refers to syntactic translations and macro expansion. Here is an
-explanation of the various sections.
-\begin{description}
- \item[{\tt lexicon}] lists the delimiters used for lexical
- analysis.\index{delimiters}
-
- \item[{\tt logtypes}] lists the types that are regarded the same as {\tt
- logic} syntactically. Thus types of object-logics (e.g.\ {\tt nat}, say)
- will be automatically equipped with the standard syntax of
- $\lambda$-calculus.
-
- \item[{\tt prods}] lists the \rmindex{productions} of the priority grammar.
- The nonterminal $A^{(n)}$ is rendered in {\sc ascii} as {\tt $A$[$n$]}.
- Each delimiter is quoted. Some productions are shown with {\tt =>} and
- an attached string. These strings later become the heads of parse
- trees; they also play a vital role when terms are printed (see
- \S\ref{sec:asts}).
-
- Productions with no strings attached are called {\bf copy
- productions}\indexbold{productions!copy}. Their right-hand side must
- have exactly one nonterminal symbol (or name token). The parser does
- not create a new parse tree node for copy productions, but simply
- returns the parse tree of the right-hand symbol.
-
- If the right-hand side consists of a single nonterminal with no
- delimiters, then the copy production is called a {\bf chain
- production}. Chain productions act as abbreviations:
- conceptually, they are removed from the grammar by adding new
- productions. Priority information attached to chain productions is
- ignored; only the dummy value $-1$ is displayed.
-
- \item[\ttindex{print_modes}] lists the alternative print modes
- provided by this syntax (see \S\ref{sec:prmodes}).
-
- \item[{\tt consts}, {\tt parse_rules}, {\tt print_rules}]
- relate to macros (see \S\ref{sec:macros}).
-
- \item[{\tt parse_ast_translation}, {\tt print_ast_translation}]
- list sets of constants that invoke translation functions for abstract
- syntax trees. Section \S\ref{sec:asts} below discusses this obscure
- matter.\index{constants!for translations}
-
- \item[{\tt parse_translation}, {\tt print_translation}] list the sets
- of constants that invoke translation functions for terms (see
- \S\ref{sec:tr_funs}).
-\end{description}
-\index{syntax!Pure|)}
-
\section{Mixfix declarations} \label{sec:mixfix}
\index{mixfix declarations|(}
@@ -515,49 +144,6 @@
syntax}. Try this as an exercise and study the changes in the
grammar.
-\subsection{The mixfix template}
-Let us now take a closer look at the string $template$ appearing in mixfix
-annotations. This string specifies a list of parsing and printing
-directives: delimiters\index{delimiters}, arguments, spaces, blocks of
-indentation and line breaks. These are encoded by the following character
-sequences:
-\index{pretty printing|(}
-\begin{description}
-\item[~$d$~] is a delimiter, namely a non-empty sequence of characters
- other than the special characters {\tt _}, {\tt(}, {\tt)} and~{\tt/}.
- Even these characters may appear if escaped; this means preceding it with
- a~{\tt '} (single quote). Thus you have to write {\tt ''} if you really
- want a single quote. Furthermore, a~{\tt '} followed by a space separates
- delimiters without extra white space being added for printing.
-
-\item[~{\tt_}~] is an argument position, which stands for a nonterminal symbol
- or name token.
-
-\item[~$s$~] is a non-empty sequence of spaces for printing. This and the
- following specifications do not affect parsing at all.
-
-\item[~{\tt(}$n$~] opens a pretty printing block. The optional number $n$
- specifies how much indentation to add when a line break occurs within the
- block. If {\tt(} is not followed by digits, the indentation defaults
- to~0.
-
-\item[~{\tt)}~] closes a pretty printing block.
-
-\item[~{\tt//}~] forces a line break.
-
-\item[~{\tt/}$s$~] allows a line break. Here $s$ stands for the string of
- spaces (zero or more) right after the {\tt /} character. These spaces
- are printed if the break is not taken.
-\end{description}
-For example, the template {\tt"(_ +/ _)"} specifies an infix operator.
-There are two argument positions; the delimiter~{\tt+} is preceded by a
-space and followed by a space or line break; the entire phrase is a pretty
-printing block. Other examples appear in Fig.\ts\ref{fig:set_trans} below.
-Isabelle's pretty printer resembles the one described in
-Paulson~\cite{paulson-ml2}.
-
-\index{pretty printing|)}
-
\subsection{Infixes}
\indexbold{infixes}
@@ -723,141 +309,6 @@
ambiguity should be eliminated by changing the grammar or the rule.
-\section{Example: some minimal logics} \label{sec:min_logics}
-\index{examples!of logic definitions}
-
-This section presents some examples that have a simple syntax. They
-demonstrate how to define new object-logics from scratch.
-
-First we must define how an object-logic syntax is embedded into the
-meta-logic. Since all theorems must conform to the syntax for~\ndx{prop}
-(see Fig.\ts\ref{fig:pure_gram}), that syntax has to be extended with the
-object-level syntax. Assume that the syntax of your object-logic defines a
-meta-type~\tydx{o} of formulae which refers to the nonterminal {\tt logic}.
-These formulae can now appear in axioms and theorems wherever \ndx{prop} does
-if you add the production
-\[ prop ~=~ logic. \]
-This is not supposed to be a copy production but an implicit coercion from
-formulae to propositions:
-\begin{ttbox}
-Base = Pure +
-types
- o
-arities
- o :: logic
-consts
- Trueprop :: o => prop ("_" 5)
-end
-\end{ttbox}
-The constant \cdx{Trueprop} (the name is arbitrary) acts as an invisible
-coercion function. Assuming this definition resides in a file {\tt Base.thy},
-you have to load it with the command {\tt use_thy "Base"}.
-
-One of the simplest nontrivial logics is {\bf minimal logic} of
-implication. Its definition in Isabelle needs no advanced features but
-illustrates the overall mechanism nicely:
-\begin{ttbox}
-Hilbert = Base +
-consts
- "-->" :: [o, o] => o (infixr 10)
-rules
- K "P --> Q --> P"
- S "(P --> Q --> R) --> (P --> Q) --> P --> R"
- MP "[| P --> Q; P |] ==> Q"
-end
-\end{ttbox}
-After loading this definition from the file {\tt Hilbert.thy}, you can
-start to prove theorems in the logic:
-\begin{ttbox}
-Goal "P --> P";
-{\out Level 0}
-{\out P --> P}
-{\out 1. P --> P}
-\ttbreak
-by (resolve_tac [Hilbert.MP] 1);
-{\out Level 1}
-{\out P --> P}
-{\out 1. ?P --> P --> P}
-{\out 2. ?P}
-\ttbreak
-by (resolve_tac [Hilbert.MP] 1);
-{\out Level 2}
-{\out P --> P}
-{\out 1. ?P1 --> ?P --> P --> P}
-{\out 2. ?P1}
-{\out 3. ?P}
-\ttbreak
-by (resolve_tac [Hilbert.S] 1);
-{\out Level 3}
-{\out P --> P}
-{\out 1. P --> ?Q2 --> P}
-{\out 2. P --> ?Q2}
-\ttbreak
-by (resolve_tac [Hilbert.K] 1);
-{\out Level 4}
-{\out P --> P}
-{\out 1. P --> ?Q2}
-\ttbreak
-by (resolve_tac [Hilbert.K] 1);
-{\out Level 5}
-{\out P --> P}
-{\out No subgoals!}
-\end{ttbox}
-As we can see, this Hilbert-style formulation of minimal logic is easy to
-define but difficult to use. The following natural deduction formulation is
-better:
-\begin{ttbox}
-MinI = Base +
-consts
- "-->" :: [o, o] => o (infixr 10)
-rules
- impI "(P ==> Q) ==> P --> Q"
- impE "[| P --> Q; P |] ==> Q"
-end
-\end{ttbox}
-Note, however, that although the two systems are equivalent, this fact
-cannot be proved within Isabelle. Axioms {\tt S} and {\tt K} can be
-derived in {\tt MinI} (exercise!), but {\tt impI} cannot be derived in {\tt
- Hilbert}. The reason is that {\tt impI} is only an {\bf admissible} rule
-in {\tt Hilbert}, something that can only be shown by induction over all
-possible proofs in {\tt Hilbert}.
-
-We may easily extend minimal logic with falsity:
-\begin{ttbox}
-MinIF = MinI +
-consts
- False :: o
-rules
- FalseE "False ==> P"
-end
-\end{ttbox}
-On the other hand, we may wish to introduce conjunction only:
-\begin{ttbox}
-MinC = Base +
-consts
- "&" :: [o, o] => o (infixr 30)
-\ttbreak
-rules
- conjI "[| P; Q |] ==> P & Q"
- conjE1 "P & Q ==> P"
- conjE2 "P & Q ==> Q"
-end
-\end{ttbox}
-And if we want to have all three connectives together, we create and load a
-theory file consisting of a single line:
-\begin{ttbox}
-MinIFC = MinIF + MinC
-\end{ttbox}
-Now we can prove mixed theorems like
-\begin{ttbox}
-Goal "P & False --> Q";
-by (resolve_tac [MinI.impI] 1);
-by (dresolve_tac [MinC.conjE2] 1);
-by (eresolve_tac [MinIF.FalseE] 1);
-\end{ttbox}
-Try this as an exercise!
-
-
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "ref"
--- a/doc-src/Ref/introduction.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/introduction.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,23 +1,5 @@
-
-%% $Id$
\chapter{Basic Use of Isabelle}\index{sessions|(}
-The Reference Manual is a comprehensive description of Isabelle
-proper, including all \ML{} commands, functions and packages. It
-really is intended for reference, perhaps for browsing, but not for
-reading through. It is not a tutorial, but assumes familiarity with
-the basic logical concepts of Isabelle.
-
-When you are looking for a way of performing some task, scan the Table of
-Contents for a relevant heading. Functions are organized by their purpose,
-by their operands (subgoals, tactics, theorems), and by their usefulness.
-In each section, basic functions appear first, then advanced functions, and
-finally esoteric functions. Use the Index when you are looking for the
-definition of a particular Isabelle function.
-
-A few examples are presented. Many example files are distributed with
-Isabelle, however; please experiment interactively.
-
\section{Basic interaction with Isabelle}
\index{starting up|bold}\nobreak
@@ -217,109 +199,6 @@
value is returned.
-\section{Printing of terms and theorems}\label{sec:printing-control}
-\index{printing control|(}
-Isabelle's pretty printer is controlled by a number of parameters.
-
-\subsection{Printing limits}
-\begin{ttbox}
-Pretty.setdepth : int -> unit
-Pretty.setmargin : int -> unit
-print_depth : int -> unit
-\end{ttbox}
-These set limits for terminal output. See also {\tt goals_limit},
-which limits the number of subgoals printed
-(\S\ref{sec:goals-printing}).
-
-\begin{ttdescription}
-\item[\ttindexbold{Pretty.setdepth} \(d\);] tells Isabelle's pretty printer to
- limit the printing depth to~$d$. This affects the display of theorems and
- terms. The default value is~0, which permits printing to an arbitrary
- depth. Useful values for $d$ are~10 and~20.
-
-\item[\ttindexbold{Pretty.setmargin} \(m\);]
- tells Isabelle's pretty printer to assume a right margin (page width)
- of~$m$. The initial margin is~76.
-
-\item[\ttindexbold{print_depth} \(n\);]
- limits the printing depth of complex \ML{} values, such as theorems and
- terms. This command affects the \ML{} top level and its effect is
- compiler-dependent. Typically $n$ should be less than~10.
-\end{ttdescription}
-
-
-\subsection{Printing of hypotheses, brackets, types etc.}
-\index{meta-assumptions!printing of}
-\index{types!printing of}\index{sorts!printing of}
-\begin{ttbox}
-show_hyps : bool ref \hfill{\bf initially false}
-show_tags : bool ref \hfill{\bf initially false}
-show_brackets : bool ref \hfill{\bf initially false}
-show_types : bool ref \hfill{\bf initially false}
-show_sorts : bool ref \hfill{\bf initially false}
-show_consts : bool ref \hfill{\bf initially false}
-long_names : bool ref \hfill{\bf initially false}
-\end{ttbox}
-These flags allow you to control how much information is displayed for
-types, terms and theorems. The hypotheses of theorems \emph{are}
-normally shown. Superfluous parentheses of types and terms are not.
-Types and sorts of variables are normally hidden.
-
-Note that displaying types and sorts may explain why a polymorphic
-inference rule fails to resolve with some goal, or why a rewrite rule
-does not apply as expected.
-
-\begin{ttdescription}
-
-\item[reset \ttindexbold{show_hyps};] makes Isabelle show each
- meta-level hypothesis as a dot.
-
-\item[set \ttindexbold{show_tags};] makes Isabelle show tags of theorems
- (which are basically just comments that may be attached by some tools).
-
-\item[set \ttindexbold{show_brackets};] makes Isabelle show full
- bracketing. In particular, this reveals the grouping of infix
- operators.
-
-\item[set \ttindexbold{show_types};] makes Isabelle show types when
- printing a term or theorem.
-
-\item[set \ttindexbold{show_sorts};] makes Isabelle show both types
- and the sorts of type variables, independently of the value of
- \texttt{show_types}.
-
-\item[set \ttindexbold{show_consts};] makes Isabelle show types of constants
- when printing proof states. Note that the output can be enormous as
- polymorphic constants often occur at several different type instances.
-
-\item[set \ttindexbold{long_names};] forces names of all objects
- (types, constants, theorems, etc.) to be printed in their fully
- qualified internal form.
-
-\end{ttdescription}
-
-
-\subsection{Eta-contraction before printing}
-\begin{ttbox}
-eta_contract: bool ref
-\end{ttbox}
-The {\bf $\eta$-contraction law} asserts $(\lambda x.f(x))\equiv f$,
-provided $x$ is not free in ~$f$. It asserts {\bf extensionality} of
-functions: $f\equiv g$ if $f(x)\equiv g(x)$ for all~$x$. Higher-order
-unification frequently puts terms into a fully $\eta$-expanded form. For
-example, if $F$ has type $(\tau\To\tau)\To\tau$ then its expanded form is
-$\lambda h.F(\lambda x.h(x))$. By default, the user sees this expanded
-form.
-
-\begin{ttdescription}
-\item[set \ttindexbold{eta_contract};]
-makes Isabelle perform $\eta$-contractions before printing, so that
-$\lambda h.F(\lambda x.h(x))$ appears simply as~$F$. The
-distinction between a term and its $\eta$-expanded form occasionally
-matters.
-\end{ttdescription}
-\index{printing control|)}
-
\section{Diagnostic messages}
\index{error messages}
\index{warnings}
@@ -351,40 +230,16 @@
\ttindex{warning} resume normal program execution.
-\section{Displaying exceptions as error messages}
-\index{exceptions!printing of}
+\section{Timing}
+\index{timing statistics}\index{proofs!timing}
\begin{ttbox}
-print_exn: exn -> 'a
+timing: bool ref \hfill{\bf initially false}
\end{ttbox}
-Certain Isabelle primitives, such as the forward proof functions {\tt RS}
-and {\tt RSN}, are called both interactively and from programs. They
-indicate errors not by printing messages, but by raising exceptions. For
-interactive use, \ML's reporting of an uncaught exception may be
-uninformative. The Poly/ML function {\tt exception_trace} can generate a
-backtrace.\index{Poly/{\ML} compiler}
\begin{ttdescription}
-\item[\ttindexbold{print_exn} $e$]
-displays the exception~$e$ in a readable manner, and then re-raises~$e$.
-Typical usage is~\hbox{\tt $EXP$ handle e => print_exn e;}, where
-$EXP$ is an expression that may raise an exception.
-
-{\tt print_exn} can display the following common exceptions, which concern
-types, terms, theorems and theories, respectively. Each carries a message
-and related information.
-\begin{ttbox}
-exception TYPE of string * typ list * term list
-exception TERM of string * term list
-exception THM of string * int * thm list
-exception THEORY of string * theory list
-\end{ttbox}
+\item[set \ttindexbold{timing};] enables global timing in Isabelle.
+ This information is compiler-dependent.
\end{ttdescription}
-\begin{warn}
- {\tt print_exn} prints terms by calling \ttindex{prin}, which obtains
- pretty printing information from the proof state last stored in the
- subgoal module. The appearance of the output thus depends upon the
- theory used in the last interactive proof.
-\end{warn}
\index{sessions|)}
--- a/doc-src/Ref/ref.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/ref.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,13 +1,12 @@
\documentclass[12pt,a4paper]{report}
-\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../rail,../pdfsetup}
+\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../pdfsetup}
-%% $Id$
%%\includeonly{}
%%% to index ids: \[\\tt \([a-zA-Z0-9][a-zA-Z0-9_'.]*\) [\\ttindexbold{\1}
%%% to delete old ones: \\indexbold{\*[^}]*}
%% run sedindex ref to prepare index file
%%% needs chapter on Provers/typedsimp.ML?
-\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] The Isabelle Reference Manual}
+\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Isabelle Reference Manual}
\author{{\em Lawrence C. Paulson}\\
Computer Laboratory \\ University of Cambridge \\
@@ -22,10 +21,6 @@
\sloppy
\binperiod %%%treat . like a binary operator
-\railalias{lbrace}{\ttlbrace}
-\railalias{rbrace}{\ttrbrace}
-\railterm{lbrace,rbrace}
-
\begin{document}
\underscoreoff
@@ -34,17 +29,10 @@
\index{meta-rules|see{meta-rules}}
\maketitle
-\emph{Note}: this document is part of the earlier Isabelle documentation,
-which is somewhat superseded by the Isabelle/HOL
-\emph{Tutorial}~\cite{isa-tutorial}. Much of it is concerned with
-the old-style theory syntax and the primitives for conducting proofs
-using the ML top level. This style of interaction is largely obsolete:
-most Isabelle proofs are now written using the Isar
-language and the Proof General interface. However, this is the only
-comprehensive Isabelle reference manual.
-
-See also the \emph{Introduction to Isabelle}, which has tutorial examples
-on conducting proofs using the ML top-level.
+\emph{Note}: this document is part of the earlier Isabelle
+documentation and is mostly outdated. Fully obsolete parts of the
+original text have already been removed. The remaining material
+covers some aspects that did not make it into the newer manuals yet.
\subsubsection*{Acknowledgements}
Tobias Nipkow, of T. U. Munich, wrote most of
@@ -62,7 +50,6 @@
\pagenumbering{roman} \tableofcontents \clearfirst
\include{introduction}
-\include{goals}
\include{tactic}
\include{tctical}
\include{thm}
--- a/doc-src/Ref/simplifier.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/simplifier.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{Simplification}
\label{chap:simplification}
\index{simplification|(}
@@ -810,173 +810,6 @@
\end{warn}
-\section{Examples of using the Simplifier}
-\index{examples!of simplification} Assume we are working within {\tt
- FOL} (see the file \texttt{FOL/ex/Nat}) and that
-\begin{ttdescription}
-\item[Nat.thy]
- is a theory including the constants $0$, $Suc$ and $+$,
-\item[add_0]
- is the rewrite rule $0+\Var{n} = \Var{n}$,
-\item[add_Suc]
- is the rewrite rule $Suc(\Var{m})+\Var{n} = Suc(\Var{m}+\Var{n})$,
-\item[induct]
- is the induction rule $\List{\Var{P}(0);\; \Forall x. \Var{P}(x)\Imp
- \Var{P}(Suc(x))} \Imp \Var{P}(\Var{n})$.
-\end{ttdescription}
-We augment the implicit simpset inherited from \texttt{Nat} with the
-basic rewrite rules for addition of natural numbers:
-\begin{ttbox}
-Addsimps [add_0, add_Suc];
-\end{ttbox}
-
-\subsection{A trivial example}
-Proofs by induction typically involve simplification. Here is a proof
-that~0 is a right identity:
-\begin{ttbox}
-Goal "m+0 = m";
-{\out Level 0}
-{\out m + 0 = m}
-{\out 1. m + 0 = m}
-\end{ttbox}
-The first step is to perform induction on the variable~$m$. This returns a
-base case and inductive step as two subgoals:
-\begin{ttbox}
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + 0 = m}
-{\out 1. 0 + 0 = 0}
-{\out 2. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-Simplification solves the first subgoal trivially:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out m + 0 = m}
-{\out 1. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-The remaining subgoal requires \ttindex{Asm_simp_tac} in order to use the
-induction hypothesis as a rewrite rule:
-\begin{ttbox}
-by (Asm_simp_tac 1);
-{\out Level 3}
-{\out m + 0 = m}
-{\out No subgoals!}
-\end{ttbox}
-
-\subsection{An example of tracing}
-\index{tracing!of simplification|(}\index{*trace_simp}
-
-Let us prove a similar result involving more complex terms. We prove
-that addition is commutative.
-\begin{ttbox}
-Goal "m+Suc(n) = Suc(m+n)";
-{\out Level 0}
-{\out m + Suc(n) = Suc(m + n)}
-{\out 1. m + Suc(n) = Suc(m + n)}
-\end{ttbox}
-Performing induction on~$m$ yields two subgoals:
-\begin{ttbox}
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + Suc(n) = Suc(m + n)}
-{\out 1. 0 + Suc(n) = Suc(0 + n)}
-{\out 2. !!x. x + Suc(n) = Suc(x + n) ==>}
-{\out Suc(x) + Suc(n) = Suc(Suc(x) + n)}
-\end{ttbox}
-Simplification solves the first subgoal, this time rewriting two
-occurrences of~0:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out m + Suc(n) = Suc(m + n)}
-{\out 1. !!x. x + Suc(n) = Suc(x + n) ==>}
-{\out Suc(x) + Suc(n) = Suc(Suc(x) + n)}
-\end{ttbox}
-Switching tracing on illustrates how the simplifier solves the remaining
-subgoal:
-\begin{ttbox}
-set trace_simp;
-by (Asm_simp_tac 1);
-\ttbreak
-{\out Adding rewrite rule:}
-{\out .x + Suc n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out ?m + Suc ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + Suc n == Suc (Suc .x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out Suc ?m + ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out Suc ?m + ?n == Suc (?m + ?n)}
-{\out Rewriting:}
-{\out Suc .x + n == Suc (.x + n)}
-\ttbreak
-{\out Applying instance of rewrite rule:}
-{\out ?x = ?x == True}
-{\out Rewriting:}
-{\out Suc (Suc (.x + n)) = Suc (Suc (.x + n)) == True}
-\ttbreak
-{\out Level 3}
-{\out m + Suc(n) = Suc(m + n)}
-{\out No subgoals!}
-\end{ttbox}
-Many variations are possible. At Level~1 (in either example) we could have
-solved both subgoals at once using the tactical \ttindex{ALLGOALS}:
-\begin{ttbox}
-by (ALLGOALS Asm_simp_tac);
-{\out Level 2}
-{\out m + Suc(n) = Suc(m + n)}
-{\out No subgoals!}
-\end{ttbox}
-\index{tracing!of simplification|)}
-
-
-\subsection{Free variables and simplification}
-
-Here is a conjecture to be proved for an arbitrary function~$f$
-satisfying the law $f(Suc(\Var{n})) = Suc(f(\Var{n}))$:
-\begin{ttbox}
-val [prem] = Goal
- "(!!n. f(Suc(n)) = Suc(f(n))) ==> f(i+j) = i+f(j)";
-{\out Level 0}
-{\out f(i + j) = i + f(j)}
-{\out 1. f(i + j) = i + f(j)}
-\ttbreak
-{\out val prem = "f(Suc(?n)) = Suc(f(?n))}
-{\out [!!n. f(Suc(n)) = Suc(f(n))]" : thm}
-\end{ttbox}
-In the theorem~\texttt{prem}, note that $f$ is a free variable while
-$\Var{n}$ is a schematic variable.
-\begin{ttbox}
-by (res_inst_tac [("n","i")] induct 1);
-{\out Level 1}
-{\out f(i + j) = i + f(j)}
-{\out 1. f(0 + j) = 0 + f(j)}
-{\out 2. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-We simplify each subgoal in turn. The first one is trivial:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 2}
-{\out f(i + j) = i + f(j)}
-{\out 1. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-The remaining subgoal requires rewriting by the premise, so we add it
-to the current simpset:
-\begin{ttbox}
-by (asm_simp_tac (simpset() addsimps [prem]) 1);
-{\out Level 3}
-{\out f(i + j) = i + f(j)}
-{\out No subgoals!}
-\end{ttbox}
-
-
\section{Permutative rewrite rules}
\index{rewrite rules!permutative|(}
--- a/doc-src/Ref/substitution.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/substitution.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{Substitution Tactics} \label{substitution}
\index{tactics!substitution|(}\index{equality|(}
--- a/doc-src/Ref/syntax.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/syntax.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{Syntax Transformations} \label{chap:syntax}
\newcommand\ttapp{\mathrel{\hbox{\tt\$}}}
\newcommand\mtt[1]{\mbox{\tt #1}}
--- a/doc-src/Ref/tactic.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/tactic.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,235 +1,8 @@
-%% $Id$
+
\chapter{Tactics} \label{tactics}
-\index{tactics|(} Tactics have type \mltydx{tactic}. This is just an
-abbreviation for functions from theorems to theorem sequences, where
-the theorems represent states of a backward proof. Tactics seldom
-need to be coded from scratch, as functions; instead they are
-expressed using basic tactics and tacticals.
-
-This chapter only presents the primitive tactics. Substantial proofs
-require the power of automatic tools like simplification
-(Chapter~\ref{chap:simplification}) and classical tableau reasoning
-(Chapter~\ref{chap:classical}).
-
-\section{Resolution and assumption tactics}
-{\bf Resolution} is Isabelle's basic mechanism for refining a subgoal using
-a rule. {\bf Elim-resolution} is particularly suited for elimination
-rules, while {\bf destruct-resolution} is particularly suited for
-destruction rules. The {\tt r}, {\tt e}, {\tt d} naming convention is
-maintained for several different kinds of resolution tactics, as well as
-the shortcuts in the subgoal module.
-
-All the tactics in this section act on a subgoal designated by a positive
-integer~$i$. They fail (by returning the empty sequence) if~$i$ is out of
-range.
-
-\subsection{Resolution tactics}
-\index{resolution!tactics}
-\index{tactics!resolution|bold}
-\begin{ttbox}
-resolve_tac : thm list -> int -> tactic
-eresolve_tac : thm list -> int -> tactic
-dresolve_tac : thm list -> int -> tactic
-forward_tac : thm list -> int -> tactic
-\end{ttbox}
-These perform resolution on a list of theorems, $thms$, representing a list
-of object-rules. When generating next states, they take each of the rules
-in the order given. Each rule may yield several next states, or none:
-higher-order resolution may yield multiple resolvents.
-\begin{ttdescription}
-\item[\ttindexbold{resolve_tac} {\it thms} {\it i}]
- refines the proof state using the rules, which should normally be
- introduction rules. It resolves a rule's conclusion with
- subgoal~$i$ of the proof state.
-
-\item[\ttindexbold{eresolve_tac} {\it thms} {\it i}]
- \index{elim-resolution}
- performs elim-resolution with the rules, which should normally be
- elimination rules. It resolves with a rule, proves its first premise by
- assumption, and finally \emph{deletes} that assumption from any new
- subgoals. (To rotate a rule's premises,
- see \texttt{rotate_prems} in~{\S}\ref{MiscellaneousForwardRules}.)
-
-\item[\ttindexbold{dresolve_tac} {\it thms} {\it i}]
- \index{forward proof}\index{destruct-resolution}
- performs destruct-resolution with the rules, which normally should
- be destruction rules. This replaces an assumption by the result of
- applying one of the rules.
-
-\item[\ttindexbold{forward_tac}]\index{forward proof}
- is like {\tt dresolve_tac} except that the selected assumption is not
- deleted. It applies a rule to an assumption, adding the result as a new
- assumption.
-\end{ttdescription}
-
-\subsection{Assumption tactics}
-\index{tactics!assumption|bold}\index{assumptions!tactics for}
-\begin{ttbox}
-assume_tac : int -> tactic
-eq_assume_tac : int -> tactic
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{assume_tac} {\it i}]
-attempts to solve subgoal~$i$ by assumption.
-
-\item[\ttindexbold{eq_assume_tac}]
-is like {\tt assume_tac} but does not use unification. It succeeds (with a
-\emph{unique} next state) if one of the assumptions is identical to the
-subgoal's conclusion. Since it does not instantiate variables, it cannot
-make other subgoals unprovable. It is intended to be called from proof
-strategies, not interactively.
-\end{ttdescription}
-
-\subsection{Matching tactics} \label{match_tac}
-\index{tactics!matching}
-\begin{ttbox}
-match_tac : thm list -> int -> tactic
-ematch_tac : thm list -> int -> tactic
-dmatch_tac : thm list -> int -> tactic
-\end{ttbox}
-These are just like the resolution tactics except that they never
-instantiate unknowns in the proof state. Flexible subgoals are not updated
-willy-nilly, but are left alone. Matching --- strictly speaking --- means
-treating the unknowns in the proof state as constants; these tactics merely
-discard unifiers that would update the proof state.
-\begin{ttdescription}
-\item[\ttindexbold{match_tac} {\it thms} {\it i}]
-refines the proof state using the rules, matching a rule's
-conclusion with subgoal~$i$ of the proof state.
-
-\item[\ttindexbold{ematch_tac}]
-is like {\tt match_tac}, but performs elim-resolution.
-
-\item[\ttindexbold{dmatch_tac}]
-is like {\tt match_tac}, but performs destruct-resolution.
-\end{ttdescription}
-
-
-\subsection{Explicit instantiation} \label{res_inst_tac}
-\index{tactics!instantiation}\index{instantiation}
-\begin{ttbox}
-res_inst_tac : (string*string)list -> thm -> int -> tactic
-eres_inst_tac : (string*string)list -> thm -> int -> tactic
-dres_inst_tac : (string*string)list -> thm -> int -> tactic
-forw_inst_tac : (string*string)list -> thm -> int -> tactic
-instantiate_tac : (string*string)list -> tactic
-\end{ttbox}
-The first four of these tactics are designed for applying rules by resolution
-such as substitution and induction, which cause difficulties for higher-order
-unification. The tactics accept explicit instantiations for unknowns
-in the rule ---typically, in the rule's conclusion. The last one,
-{\tt instantiate_tac}, may be used to instantiate unknowns in the proof state,
-independently of rule application.
-
-Each instantiation is a pair {\tt($v$,$e$)},
-where $v$ is an unknown \emph{without} its leading question mark!
-\begin{itemize}
-\item If $v$ is the type unknown {\tt'a}, then
-the rule must contain a type unknown \verb$?'a$ of some
-sort~$s$, and $e$ should be a type of sort $s$.
-
-\item If $v$ is the unknown {\tt P}, then
-the rule must contain an unknown \verb$?P$ of some type~$\tau$,
-and $e$ should be a term of some type~$\sigma$ such that $\tau$ and
-$\sigma$ are unifiable. If the unification of $\tau$ and $\sigma$
-instantiates any type unknowns in $\tau$, these instantiations
-are recorded for application to the rule.
-\end{itemize}
-Types are instantiated before terms are. Because type instantiations are
-inferred from term instantiations, explicit type instantiations are seldom
-necessary --- if \verb$?t$ has type \verb$?'a$, then the instantiation list
-\texttt{[("'a","bool"), ("t","True")]} may be simplified to
-\texttt{[("t","True")]}. Type unknowns in the proof state may cause
-failure because the tactics cannot instantiate them.
-
-The first four instantiation tactics act on a given subgoal. Terms in the
-instantiations are type-checked in the context of that subgoal --- in
-particular, they may refer to that subgoal's parameters. Any unknowns in
-the terms receive subscripts and are lifted over the parameters; thus, you
-may not refer to unknowns in the subgoal.
-
-\begin{ttdescription}
-\item[\ttindexbold{res_inst_tac} {\it insts} {\it thm} {\it i}]
-instantiates the rule {\it thm} with the instantiations {\it insts}, as
-described above, and then performs resolution on subgoal~$i$. Resolution
-typically causes further instantiations; you need not give explicit
-instantiations for every unknown in the rule.
-
-\item[\ttindexbold{eres_inst_tac}]
-is like {\tt res_inst_tac}, but performs elim-resolution.
-
-\item[\ttindexbold{dres_inst_tac}]
-is like {\tt res_inst_tac}, but performs destruct-resolution.
-
-\item[\ttindexbold{forw_inst_tac}]
-is like {\tt dres_inst_tac} except that the selected assumption is not
-deleted. It applies the instantiated rule to an assumption, adding the
-result as a new assumption.
-
-\item[\ttindexbold{instantiate_tac} {\it insts}]
-instantiates unknowns in the proof state. This affects the main goal as
-well as all subgoals.
-\end{ttdescription}
-
+\index{tactics|(}
\section{Other basic tactics}
-\subsection{Tactic shortcuts}
-\index{shortcuts!for tactics}
-\index{tactics!resolution}\index{tactics!assumption}
-\index{tactics!meta-rewriting}
-\begin{ttbox}
-rtac : thm -> int -> tactic
-etac : thm -> int -> tactic
-dtac : thm -> int -> tactic
-ftac : thm -> int -> tactic
-atac : int -> tactic
-eatac : thm -> int -> int -> tactic
-datac : thm -> int -> int -> tactic
-fatac : thm -> int -> int -> tactic
-ares_tac : thm list -> int -> tactic
-rewtac : thm -> tactic
-\end{ttbox}
-These abbreviate common uses of tactics.
-\begin{ttdescription}
-\item[\ttindexbold{rtac} {\it thm} {\it i}]
-abbreviates \hbox{\tt resolve_tac [{\it thm}] {\it i}}, doing resolution.
-
-\item[\ttindexbold{etac} {\it thm} {\it i}]
-abbreviates \hbox{\tt eresolve_tac [{\it thm}] {\it i}}, doing elim-resolution.
-
-\item[\ttindexbold{dtac} {\it thm} {\it i}]
-abbreviates \hbox{\tt dresolve_tac [{\it thm}] {\it i}}, doing
-destruct-resolution.
-
-\item[\ttindexbold{ftac} {\it thm} {\it i}]
-abbreviates \hbox{\tt forward_tac [{\it thm}] {\it i}}, doing
-destruct-resolution without deleting the assumption.
-
-\item[\ttindexbold{atac} {\it i}]
-abbreviates \hbox{\tt assume_tac {\it i}}, doing proof by assumption.
-
-\item[\ttindexbold{eatac} {\it thm} {\it j} {\it i}]
-performs \hbox{\tt etac {\it thm}} and then {\it j} times \texttt{atac},
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{datac} {\it thm} {\it j} {\it i}]
-performs \hbox{\tt dtac {\it thm}} and then {\it j} times \texttt{atac},
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{fatac} {\it thm} {\it j} {\it i}]
-performs \hbox{\tt ftac {\it thm}} and then {\it j} times \texttt{atac},
-solving additionally {\it j}~premises of the rule {\it thm} by assumption.
-
-\item[\ttindexbold{ares_tac} {\it thms} {\it i}]
-tries proof by assumption and resolution; it abbreviates
-\begin{ttbox}
-assume_tac {\it i} ORELSE resolve_tac {\it thms} {\it i}
-\end{ttbox}
-
-\item[\ttindexbold{rewtac} {\it def}]
-abbreviates \hbox{\tt rewrite_goals_tac [{\it def}]}, unfolding a definition.
-\end{ttdescription}
-
\subsection{Inserting premises and facts}\label{cut_facts_tac}
\index{tactics!for inserting facts}\index{assumptions!inserting}
@@ -351,52 +124,6 @@
\section{Obscure tactics}
-\subsection{Renaming parameters in a goal} \index{parameters!renaming}
-\begin{ttbox}
-rename_tac : string -> int -> tactic
-rename_last_tac : string -> string list -> int -> tactic
-Logic.set_rename_prefix : string -> unit
-Logic.auto_rename : bool ref \hfill{\bf initially false}
-\end{ttbox}
-When creating a parameter, Isabelle chooses its name by matching variable
-names via the object-rule. Given the rule $(\forall I)$ formalized as
-$\left(\Forall x. P(x)\right) \Imp \forall x.P(x)$, Isabelle will note that
-the $\Forall$-bound variable in the premise has the same name as the
-$\forall$-bound variable in the conclusion.
-
-Sometimes there is insufficient information and Isabelle chooses an
-arbitrary name. The renaming tactics let you override Isabelle's choice.
-Because renaming parameters has no logical effect on the proof state, the
-{\tt by} command prints the message {\tt Warning:\ same as previous
-level}.
-
-Alternatively, you can suppress the naming mechanism described above and
-have Isabelle generate uniform names for parameters. These names have the
-form $p${\tt a}, $p${\tt b}, $p${\tt c},~\ldots, where $p$ is any desired
-prefix. They are ugly but predictable.
-
-\begin{ttdescription}
-\item[\ttindexbold{rename_tac} {\it str} {\it i}]
-interprets the string {\it str} as a series of blank-separated variable
-names, and uses them to rename the parameters of subgoal~$i$. The names
-must be distinct. If there are fewer names than parameters, then the
-tactic renames the innermost parameters and may modify the remaining ones
-to ensure that all the parameters are distinct.
-
-\item[\ttindexbold{rename_last_tac} {\it prefix} {\it suffixes} {\it i}]
-generates a list of names by attaching each of the {\it suffixes\/} to the
-{\it prefix}. It is intended for coding structural induction tactics,
-where several of the new parameters should have related names.
-
-\item[\ttindexbold{Logic.set_rename_prefix} {\it prefix};]
-sets the prefix for uniform renaming to~{\it prefix}. The default prefix
-is {\tt"k"}.
-
-\item[set \ttindexbold{Logic.auto_rename};]
-makes Isabelle generate uniform names for parameters.
-\end{ttdescription}
-
-
\subsection{Manipulating assumptions}
\index{assumptions!rotating}
\begin{ttbox}
@@ -594,142 +321,6 @@
is no longer than {\it limit}.
\end{ttdescription}
-
-\section{Programming tools for proof strategies}
-Do not consider using the primitives discussed in this section unless you
-really need to code tactics from scratch.
-
-\subsection{Operations on tactics}
-\index{tactics!primitives for coding} A tactic maps theorems to sequences of
-theorems. The type constructor for sequences (lazy lists) is called
-\mltydx{Seq.seq}. To simplify the types of tactics and tacticals,
-Isabelle defines a type abbreviation:
-\begin{ttbox}
-type tactic = thm -> thm Seq.seq
-\end{ttbox}
-The following operations provide means for coding tactics in a clean style.
-\begin{ttbox}
-PRIMITIVE : (thm -> thm) -> tactic
-SUBGOAL : ((term*int) -> tactic) -> int -> tactic
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{PRIMITIVE} $f$] packages the meta-rule~$f$ as a tactic that
- applies $f$ to the proof state and returns the result as a one-element
- sequence. If $f$ raises an exception, then the tactic's result is the empty
- sequence.
-
-\item[\ttindexbold{SUBGOAL} $f$ $i$]
-extracts subgoal~$i$ from the proof state as a term~$t$, and computes a
-tactic by calling~$f(t,i)$. It applies the resulting tactic to the same
-state. The tactic body is expressed using tactics and tacticals, but may
-peek at a particular subgoal:
-\begin{ttbox}
-SUBGOAL (fn (t,i) => {\it tactic-valued expression})
-\end{ttbox}
-\end{ttdescription}
-
-
-\subsection{Tracing}
-\index{tactics!tracing}
-\index{tracing!of tactics}
-\begin{ttbox}
-pause_tac: tactic
-print_tac: string -> tactic
-\end{ttbox}
-These tactics print tracing information when they are applied to a proof
-state. Their output may be difficult to interpret. Note that certain of
-the searching tacticals, such as {\tt REPEAT}, have built-in tracing
-options.
-\begin{ttdescription}
-\item[\ttindexbold{pause_tac}]
-prints {\footnotesize\tt** Press RETURN to continue:} and then reads a line
-from the terminal. If this line is blank then it returns the proof state
-unchanged; otherwise it fails (which may terminate a repetition).
-
-\item[\ttindexbold{print_tac}~$msg$]
-returns the proof state unchanged, with the side effect of printing it at
-the terminal.
-\end{ttdescription}
-
-
-\section{*Sequences}
-\index{sequences (lazy lists)|bold}
-The module {\tt Seq} declares a type of lazy lists. It uses
-Isabelle's type \mltydx{option} to represent the possible presence
-(\ttindexbold{Some}) or absence (\ttindexbold{None}) of
-a value:
-\begin{ttbox}
-datatype 'a option = None | Some of 'a;
-\end{ttbox}
-The {\tt Seq} structure is supposed to be accessed via fully qualified
-names and should not be \texttt{open}.
-
-\subsection{Basic operations on sequences}
-\begin{ttbox}
-Seq.empty : 'a seq
-Seq.make : (unit -> ('a * 'a seq) option) -> 'a seq
-Seq.single : 'a -> 'a seq
-Seq.pull : 'a seq -> ('a * 'a seq) option
-\end{ttbox}
-\begin{ttdescription}
-\item[Seq.empty] is the empty sequence.
-
-\item[\tt Seq.make (fn () => Some ($x$, $xq$))] constructs the
- sequence with head~$x$ and tail~$xq$, neither of which is evaluated.
-
-\item[Seq.single $x$]
-constructs the sequence containing the single element~$x$.
-
-\item[Seq.pull $xq$] returns {\tt None} if the sequence is empty and
- {\tt Some ($x$, $xq'$)} if the sequence has head~$x$ and tail~$xq'$.
- Warning: calling \hbox{Seq.pull $xq$} again will {\it recompute\/}
- the value of~$x$; it is not stored!
-\end{ttdescription}
-
-
-\subsection{Converting between sequences and lists}
-\begin{ttbox}
-Seq.chop : int * 'a seq -> 'a list * 'a seq
-Seq.list_of : 'a seq -> 'a list
-Seq.of_list : 'a list -> 'a seq
-\end{ttbox}
-\begin{ttdescription}
-\item[Seq.chop ($n$, $xq$)] returns the first~$n$ elements of~$xq$ as a
- list, paired with the remaining elements of~$xq$. If $xq$ has fewer
- than~$n$ elements, then so will the list.
-
-\item[Seq.list_of $xq$] returns the elements of~$xq$, which must be
- finite, as a list.
-
-\item[Seq.of_list $xs$] creates a sequence containing the elements
- of~$xs$.
-\end{ttdescription}
-
-
-\subsection{Combining sequences}
-\begin{ttbox}
-Seq.append : 'a seq * 'a seq -> 'a seq
-Seq.interleave : 'a seq * 'a seq -> 'a seq
-Seq.flat : 'a seq seq -> 'a seq
-Seq.map : ('a -> 'b) -> 'a seq -> 'b seq
-Seq.filter : ('a -> bool) -> 'a seq -> 'a seq
-\end{ttbox}
-\begin{ttdescription}
-\item[Seq.append ($xq$, $yq$)] concatenates $xq$ to $yq$.
-
-\item[Seq.interleave ($xq$, $yq$)] joins $xq$ with $yq$ by
- interleaving their elements. The result contains all the elements
- of the sequences, even if both are infinite.
-
-\item[Seq.flat $xqq$] concatenates a sequence of sequences.
-
-\item[Seq.map $f$ $xq$] applies $f$ to every element
- of~$xq=x@1,x@2,\ldots$, yielding the sequence $f(x@1),f(x@2),\ldots$.
-
-\item[Seq.filter $p$ $xq$] returns the sequence consisting of all
- elements~$x$ of~$xq$ such that $p(x)$ is {\tt true}.
-\end{ttdescription}
-
\index{tactics|)}
--- a/doc-src/Ref/tctical.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/tctical.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{Tacticals}
\index{tacticals|(}
Tacticals are operations on tactics. Their implementation makes use of
--- a/doc-src/Ref/theories.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/theories.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,216 +1,6 @@
-
-%% $Id$
\chapter{Theories, Terms and Types} \label{theories}
-\index{theories|(}\index{signatures|bold}
-\index{reading!axioms|see{\texttt{assume_ax}}} Theories organize the syntax,
-declarations and axioms of a mathematical development. They are built,
-starting from the Pure or CPure theory, by extending and merging existing
-theories. They have the \ML\ type \mltydx{theory}. Theory operations signal
-errors by raising exception \xdx{THEORY}, returning a message and a list of
-theories.
-
-Signatures, which contain information about sorts, types, constants and
-syntax, have the \ML\ type~\mltydx{Sign.sg}. For identification, each
-signature carries a unique list of \bfindex{stamps}, which are \ML\
-references to strings. The strings serve as human-readable names; the
-references serve as unique identifiers. Each primitive signature has a
-single stamp. When two signatures are merged, their lists of stamps are
-also merged. Every theory carries a unique signature.
-
-Terms and types are the underlying representation of logical syntax. Their
-\ML\ definitions are irrelevant to naive Isabelle users. Programmers who
-wish to extend Isabelle may need to know such details, say to code a tactic
-that looks for subgoals of a particular form. Terms and types may be
-`certified' to be well-formed with respect to a given signature.
-
-
-\section{Defining theories}\label{sec:ref-defining-theories}
-
-Theories are defined via theory files $name$\texttt{.thy} (there are also
-\ML-level interfaces which are only intended for people building advanced
-theory definition packages). Appendix~\ref{app:TheorySyntax} presents the
-concrete syntax for theory files; here follows an explanation of the
-constituent parts.
-\begin{description}
-\item[{\it theoryDef}] is the full definition. The new theory is called $id$.
- It is the union of the named \textbf{parent
- theories}\indexbold{theories!parent}, possibly extended with new
- components. \thydx{Pure} and \thydx{CPure} are the basic theories, which
- contain only the meta-logic. They differ just in their concrete syntax for
- function applications.
-
- The new theory begins as a merge of its parents.
- \begin{ttbox}
- Attempt to merge different versions of theories: "\(T@1\)", \(\ldots\), "\(T@n\)"
- \end{ttbox}
- This error may especially occur when a theory is redeclared --- say to
- change an inappropriate definition --- and bindings to old versions persist.
- Isabelle ensures that old and new theories of the same name are not involved
- in a proof.
-
-\item[$classes$]
- is a series of class declarations. Declaring {\tt$id$ < $id@1$ \dots\
- $id@n$} makes $id$ a subclass of the existing classes $id@1\dots
- id@n$. This rules out cyclic class structures. Isabelle automatically
- computes the transitive closure of subclass hierarchies; it is not
- necessary to declare \texttt{c < e} in addition to \texttt{c < d} and \texttt{d <
- e}.
-
-\item[$default$]
- introduces $sort$ as the new default sort for type variables. This applies
- to unconstrained type variables in an input string but not to type
- variables created internally. If omitted, the default sort is the listwise
- union of the default sorts of the parent theories (i.e.\ their logical
- intersection).
-
-\item[$sort$] is a finite set of classes. A single class $id$ abbreviates the
- sort $\{id\}$.
-
-\item[$types$]
- is a series of type declarations. Each declares a new type constructor
- or type synonym. An $n$-place type constructor is specified by
- $(\alpha@1,\dots,\alpha@n)name$, where the type variables serve only to
- indicate the number~$n$.
-
- A \textbf{type synonym}\indexbold{type synonyms} is an abbreviation
- $(\alpha@1,\dots,\alpha@n)name = \tau$, where $name$ and $\tau$ can
- be strings.
-
-\item[$infix$]
- declares a type or constant to be an infix operator having priority $nat$
- and associating to the left (\texttt{infixl}) or right (\texttt{infixr}).
- Only 2-place type constructors can have infix status; an example is {\tt
- ('a,'b)~"*"~(infixr~20)}, which may express binary product types.
-
-\item[$arities$] is a series of type arity declarations. Each assigns
- arities to type constructors. The $name$ must be an existing type
- constructor, which is given the additional arity $arity$.
-
-\item[$nonterminals$]\index{*nonterminal symbols} declares purely
- syntactic types to be used as nonterminal symbols of the context
- free grammar.
-
-\item[$consts$] is a series of constant declarations. Each new
- constant $name$ is given the specified type. The optional $mixfix$
- annotations may attach concrete syntax to the constant.
-
-\item[$syntax$] \index{*syntax section}\index{print mode} is a variant
- of $consts$ which adds just syntax without actually declaring
- logical constants. This gives full control over a theory's context
- free grammar. The optional $mode$ specifies the print mode where the
- mixfix productions should be added. If there is no \texttt{output}
- option given, all productions are also added to the input syntax
- (regardless of the print mode).
-
-\item[$mixfix$] \index{mixfix declarations}
- annotations can take three forms:
- \begin{itemize}
- \item A mixfix template given as a $string$ of the form
- {\tt"}\dots{\tt\_}\dots{\tt\_}\dots{\tt"} where the $i$-th underscore
- indicates the position where the $i$-th argument should go. The list
- of numbers gives the priority of each argument. The final number gives
- the priority of the whole construct.
-
- \item A constant $f$ of type $\tau@1\To(\tau@2\To\tau)$ can be given {\bf
- infix} status.
-
- \item A constant $f$ of type $(\tau@1\To\tau@2)\To\tau$ can be given {\bf
- binder} status. The declaration \texttt{binder} $\cal Q$ $p$ causes
- ${\cal Q}\,x.F(x)$ to be treated
- like $f(F)$, where $p$ is the priority.
- \end{itemize}
-
-\item[$trans$]
- specifies syntactic translation rules (macros). There are three forms:
- parse rules (\texttt{=>}), print rules (\texttt{<=}), and parse/print rules ({\tt
- ==}).
-
-\item[$rules$]
- is a series of rule declarations. Each has a name $id$ and the formula is
- given by the $string$. Rule names must be distinct within any single
- theory.
-
-\item[$defs$] is a series of definitions. They are just like $rules$, except
- that every $string$ must be a definition (see below for details).
-
-\item[$constdefs$] combines the declaration of constants and their
- definition. The first $string$ is the type, the second the definition.
-
-\item[$axclass$] \index{*axclass section} defines an \rmindex{axiomatic type
- class} \cite{Wenzel:1997:TPHOL} as the intersection of existing classes,
- with additional axioms holding. Class axioms may not contain more than one
- type variable. The class axioms (with implicit sort constraints added) are
- bound to the given names. Furthermore a class introduction rule is
- generated, which is automatically employed by $instance$ to prove
- instantiations of this class.
-
-\item[$instance$] \index{*instance section} proves class inclusions or
- type arities at the logical level and then transfers these to the
- type signature. The instantiation is proven and checked properly.
- The user has to supply sufficient witness information: theorems
- ($longident$), axioms ($string$), or even arbitrary \ML{} tactic
- code $verbatim$.
-
-\item[$oracle$] links the theory to a trusted external reasoner. It is
- allowed to create theorems, but each theorem carries a proof object
- describing the oracle invocation. See \S\ref{sec:oracles} for details.
-
-\item[$local$, $global$] change the current name declaration mode.
- Initially, theories start in $local$ mode, causing all names of
- types, constants, axioms etc.\ to be automatically qualified by the
- theory name. Changing this to $global$ causes all names to be
- declared as short base names only.
-
- The $local$ and $global$ declarations act like switches, affecting
- all following theory sections until changed again explicitly. Also
- note that the final state at the end of the theory will persist. In
- particular, this determines how the names of theorems stored later
- on are handled.
-
-\item[$setup$]\index{*setup!theory} applies a list of ML functions to
- the theory. The argument should denote a value of type
- \texttt{(theory -> theory) list}. Typically, ML packages are
- initialized in this way.
-
-\item[$ml$] \index{*ML section}
- consists of \ML\ code, typically for parse and print translation functions.
-\end{description}
-%
-Chapters~\ref{Defining-Logics} and \ref{chap:syntax} explain mixfix
-declarations, translation rules and the \texttt{ML} section in more detail.
-
-
-\subsection{*Classes and arities}
-\index{classes!context conditions}\index{arities!context conditions}
-
-In order to guarantee principal types~\cite{nipkow-prehofer},
-arity declarations must obey two conditions:
-\begin{itemize}
-\item There must not be any two declarations $ty :: (\vec{r})c$ and
- $ty :: (\vec{s})c$ with $\vec{r} \neq \vec{s}$. For example, this
- excludes the following:
-\begin{ttbox}
-arities
- foo :: (\{logic{\}}) logic
- foo :: (\{{\}})logic
-\end{ttbox}
-
-\item If there are two declarations $ty :: (s@1,\dots,s@n)c$ and $ty ::
- (s@1',\dots,s@n')c'$ such that $c' < c$ then $s@i' \preceq s@i$ must hold
- for $i=1,\dots,n$. The relationship $\preceq$, defined as
-\[ s' \preceq s \iff \forall c\in s. \exists c'\in s'.~ c'\le c, \]
-expresses that the set of types represented by $s'$ is a subset of the
-set of types represented by $s$. Assuming $term \preceq logic$, the
-following is forbidden:
-\begin{ttbox}
-arities
- foo :: (\{logic{\}})logic
- foo :: (\{{\}})term
-\end{ttbox}
-
-\end{itemize}
-
+\index{theories|(}
\section{The theory loader}\label{sec:more-theories}
\index{theories!reading}\index{files!reading}
@@ -247,13 +37,6 @@
dispose a large number of theories at once. Note that {\ML} bindings to
theorems etc.\ of removed theories may still persist.
-\item[reset \ttindexbold{delete_tmpfiles};] processing theory files usually
- involves temporary {\ML} files to be created. By default, these are deleted
- afterwards. Resetting the \texttt{delete_tmpfiles} flag inhibits this,
- leaving the generated code for debugging purposes. The basic location for
- temporary files is determined by the \texttt{ISABELLE_TMP} environment
- variable (which is private to the running Isabelle process and may be
- retrieved by \ttindex{getenv} from {\ML}).
\end{ttdescription}
\medskip Theory and {\ML} files are located by skimming through the
@@ -296,224 +79,6 @@
temporarily appended to the load path, too.
-\section{Locales}
-\label{Locales}
-
-Locales \cite{kammueller-locales} are a concept of local proof contexts. They
-are introduced as named syntactic objects within theories and can be
-opened in any descendant theory.
-
-\subsection{Declaring Locales}
-
-A locale is declared in a theory section that starts with the
-keyword \texttt{locale}. It consists typically of three parts, the
-\texttt{fixes} part, the \texttt{assumes} part, and the \texttt{defines} part.
-Appendix \ref{app:TheorySyntax} presents the full syntax.
-
-\subsubsection{Parts of Locales}
-
-The subsection introduced by the keyword \texttt{fixes} declares the locale
-constants in a way that closely resembles a global \texttt{consts}
-declaration. In particular, there may be an optional pretty printing syntax
-for the locale constants.
-
-The subsequent \texttt{assumes} part specifies the locale rules. They are
-defined like \texttt{rules}: by an identifier followed by the rule
-given as a string. Locale rules admit the statement of local assumptions
-about the locale constants. The \texttt{assumes} part is optional. Non-fixed
-variables in locale rules are automatically bound by the universal quantifier
-\texttt{!!} of the meta-logic.
-
-Finally, the \texttt{defines} part introduces the definitions that are
-available in the locale. Locale constants declared in the \texttt{fixes}
-section are defined using the meta-equality \texttt{==}. If the
-locale constant is a functiond then its definition can (as usual) have
-variables on the left-hand side acting as formal parameters; they are
-considered as schematic variables and are automatically generalized by
-universal quantification of the meta-logic. The right hand side of a
-definition must not contain variables that are not already on the left hand
-side. In so far locale definitions behave like theory level definitions.
-However, the locale concept realizes \emph{dependent definitions}: any variable
-that is fixed as a locale constant can occur on the right hand side of
-definitions. For an illustration of these dependent definitions see the
-occurrence of the locale constant \texttt{G} on the right hand side of the
-definitions of the locale \texttt{group} below. Naturally, definitions can
-already use the syntax of the locale constants in the \texttt{fixes}
-subsection. The \texttt{defines} part is, as the \texttt{assumes} part,
-optional.
-
-\subsubsection{Example for Definition}
-The concrete syntax of locale definitions is demonstrated by example below.
-
-Locale \texttt{group} assumes the definition of groups in a theory
-file\footnote{This and other examples are from \texttt{HOL/ex}.}. A locale
-defining a convenient proof environment for group related proofs may be
-added to the theory as follows:
-\begin{ttbox}
- locale group =
- fixes
- G :: "'a grouptype"
- e :: "'a"
- binop :: "'a => 'a => 'a" (infixr "#" 80)
- inv :: "'a => 'a" ("i(_)" [90] 91)
- assumes
- Group_G "G: Group"
- defines
- e_def "e == unit G"
- binop_def "x # y == bin_op G x y"
- inv_def "i(x) == inverse G x"
-\end{ttbox}
-
-\subsubsection{Polymorphism}
-
-In contrast to polymorphic definitions in theories, the use of the
-same type variable for the declaration of different locale constants in the
-fixes part means \emph{the same} type. In other words, the scope of the
-polymorphic variables is extended over all constant declarations of a locale.
-In the above example \texttt{'a} refers to the same type which is fixed inside
-the locale. In an exported theorem (see \S\ref{sec:locale-export}) the
-constructors of locale \texttt{group} are polymorphic, yet only simultaneously
-instantiatable.
-
-\subsubsection{Nested Locales}
-
-A locale can be defined as the extension of a previously defined
-locale. This operation of extension is optional and is syntactically
-expressed as
-\begin{ttbox}
-locale foo = bar + ...
-\end{ttbox}
-The locale \texttt{foo} builds on the constants and syntax of the locale {\tt
-bar}. That is, all contents of the locale \texttt{bar} can be used in
-definitions and rules of the corresponding parts of the locale {\tt
-foo}. Although locale \texttt{foo} assumes the \texttt{fixes} part of \texttt{bar} it
-does not automatically subsume its rules and definitions. Normally, one
-expects to use locale \texttt{foo} only if locale \texttt{bar} is already
-active. These aspects of use and activation of locales are considered in the
-subsequent section.
-
-
-\subsection{Locale Scope}
-
-Locales are by default inactive, but they can be invoked. The list of
-currently active locales is called \emph{scope}. The process of activating
-them is called \emph{opening}; the reverse is \emph{closing}.
-
-\subsubsection{Scope}
-The locale scope is part of each theory. It is a dynamic stack containing
-all active locales at a certain point in an interactive session.
-The scope lives until all locales are explicitly closed. At one time there
-can be more than one locale open. The contents of these various active
-locales are all visible in the scope. In case of nested locales for example,
-the nesting is actually reflected to the scope, which contains the nested
-locales as layers. To check the state of the scope during a development the
-function \texttt{Print\_scope} may be used. It displays the names of all open
-locales on the scope. The function \texttt{print\_locales} applied to a theory
-displays all locales contained in that theory and in addition also the
-current scope.
-
-The scope is manipulated by the commands for opening and closing of locales.
-
-\subsubsection{Opening}
-Locales can be \emph{opened} at any point during a session where
-we want to prove theorems concerning the locale. Opening a locale means
-making its contents visible by pushing it onto the scope of the current
-theory. Inside a scope of opened locales, theorems can use all definitions and
-rules contained in the locales on the scope. The rules and definitions may
-be accessed individually using the function \ttindex{thm}. This function is
-applied to the names assigned to locale rules and definitions as
-strings. The opening command is called \texttt{Open\_locale} and takes the
-name of the locale to be opened as its argument.
-
-If one opens a locale \texttt{foo} that is defined by extension from locale
-\texttt{bar}, the function \texttt{Open\_locale} checks if locale \texttt{bar}
-is open. If so, then it just opens \texttt{foo}, if not, then it prints a
-message and opens \texttt{bar} before opening \texttt{foo}. Naturally, this
-carries on, if \texttt{bar} is again an extension.
-
-\subsubsection{Closing}
-
-\emph{Closing} means to cancel the last opened locale, pushing it out of the
-scope. Theorems proved during the life cycle of this locale will be disabled,
-unless they have been explicitly exported, as described below. However, when
-the same locale is opened again these theorems may be used again as well,
-provided that they were saved as theorems in the first place, using
-\texttt{qed} or ML assignment. The command \texttt{Close\_locale} takes a
-locale name as a string and checks if this locale is actually the topmost
-locale on the scope. If this is the case, it removes this locale, otherwise
-it prints a warning message and does not change the scope.
-
-\subsubsection{Export of Theorems}
-\label{sec:locale-export}
-
-Export of theorems transports theorems out of the scope of locales. Locale
-rules that have been used in the proof of an exported theorem inside the
-locale are carried by the exported form of the theorem as its individual
-meta-assumptions. The locale constants are universally quantified variables
-in these theorems, hence such theorems can be instantiated individually.
-Definitions become unfolded; locale constants that were merely used for
-definitions vanish. Logically, exporting corresponds to a combined
-application of introduction rules for implication and universal
-quantification. Exporting forms a kind of normalization of theorems in a
-locale scope.
-
-According to the possibility of nested locales there are two different forms
-of export. The first one is realized by the function \texttt{export} that
-exports theorems through all layers of opened locales of the scope. Hence,
-the application of export to a theorem yields a theorem of the global level,
-that is, the current theory context without any local assumptions or
-definitions.
-
-When locales are nested we might want to export a theorem, not to the global
-level of the current theory but just to the previous level. The other export
-function, \texttt{Export}, transports theorems one level up in the scope; the
-theorem still uses locale constants, definitions and rules of the locales
-underneath.
-
-\subsection{Functions for Locales}
-\label{Syntax}
-\index{locales!functions}
-
-Here is a quick reference list of locale functions.
-\begin{ttbox}
- Open_locale : xstring -> unit
- Close_locale : xstring -> unit
- export : thm -> thm
- Export : thm -> thm
- thm : xstring -> thm
- Print_scope : unit -> unit
- print_locales: theory -> unit
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{Open_locale} $xstring$]
- opens the locale {\it xstring}, adding it to the scope of the theory of the
- current context. If the opened locale is built by extension, the ancestors
- are opened automatically.
-
-\item[\ttindexbold{Close_locale} $xstring$] eliminates the locale {\it
- xstring} from the scope if it is the topmost item on it, otherwise it does
- not change the scope and produces a warning.
-
-\item[\ttindexbold{export} $thm$] locale definitions become expanded in {\it
- thm} and locale rules that were used in the proof of {\it thm} become part
- of its individual assumptions. This normalization happens with respect to
- \emph{all open locales} on the scope.
-
-\item[\ttindexbold{Export} $thm$] works like \texttt{export} but normalizes
- theorems only up to the previous level of locales on the scope.
-
-\item[\ttindexbold{thm} $xstring$] applied to the name of a locale definition
- or rule it returns the definition as a theorem.
-
-\item[\ttindexbold{Print_scope}()] prints the names of the locales in the
- current scope of the current theory context.
-
-\item[\ttindexbold{print_locale} $theory$] prints all locales that are
- contained in {\it theory} directly or indirectly. It also displays the
- current scope similar to \texttt{Print\_scope}.
-\end{ttdescription}
-
-
\section{Basic operations on theories}\label{BasicOperationsOnTheories}
\subsection{*Theory inclusion}
@@ -905,111 +470,6 @@
\end{ttdescription}
-\section{Oracles: calling trusted external reasoners}
-\label{sec:oracles}
-\index{oracles|(}
-
-Oracles allow Isabelle to take advantage of external reasoners such as
-arithmetic decision procedures, model checkers, fast tautology checkers or
-computer algebra systems. Invoked as an oracle, an external reasoner can
-create arbitrary Isabelle theorems. It is your responsibility to ensure that
-the external reasoner is as trustworthy as your application requires.
-Isabelle's proof objects~(\S\ref{sec:proofObjects}) record how each theorem
-depends upon oracle calls.
-
-\begin{ttbox}
-invoke_oracle : theory -> xstring -> Sign.sg * object -> thm
-Theory.add_oracle : bstring * (Sign.sg * object -> term) -> theory
- -> theory
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{invoke_oracle} $thy$ $name$ ($sign$, $data$)]
- invokes the oracle $name$ of theory $thy$ passing the information
- contained in the exception value $data$ and creating a theorem
- having signature $sign$. Note that type \ttindex{object} is just an
- abbreviation for \texttt{exn}. Errors arise if $thy$ does not have
- an oracle called $name$, if the oracle rejects its arguments or if
- its result is ill-typed.
-
-\item[\ttindexbold{Theory.add_oracle} $name$ $fun$ $thy$] extends
- $thy$ by oracle $fun$ called $name$. It is seldom called
- explicitly, as there is concrete syntax for oracles in theory files.
-\end{ttdescription}
-
-A curious feature of {\ML} exceptions is that they are ordinary constructors.
-The {\ML} type \texttt{exn} is a datatype that can be extended at any time. (See
-my {\em {ML} for the Working Programmer}~\cite{paulson-ml2}, especially
-page~136.) The oracle mechanism takes advantage of this to allow an oracle to
-take any information whatever.
-
-There must be some way of invoking the external reasoner from \ML, either
-because it is coded in {\ML} or via an operating system interface. Isabelle
-expects the {\ML} function to take two arguments: a signature and an
-exception object.
-\begin{itemize}
-\item The signature will typically be that of a desendant of the theory
- declaring the oracle. The oracle will use it to distinguish constants from
- variables, etc., and it will be attached to the generated theorems.
-
-\item The exception is used to pass arbitrary information to the oracle. This
- information must contain a full description of the problem to be solved by
- the external reasoner, including any additional information that might be
- required. The oracle may raise the exception to indicate that it cannot
- solve the specified problem.
-\end{itemize}
-
-A trivial example is provided in theory \texttt{FOL/ex/IffOracle}. This
-oracle generates tautologies of the form $P\bimp\cdots\bimp P$, with
-an even number of $P$s.
-
-The \texttt{ML} section of \texttt{IffOracle.thy} begins by declaring
-a few auxiliary functions (suppressed below) for creating the
-tautologies. Then it declares a new exception constructor for the
-information required by the oracle: here, just an integer. It finally
-defines the oracle function itself.
-\begin{ttbox}
-exception IffOracleExn of int;\medskip
-fun mk_iff_oracle (sign, IffOracleExn n) =
- if n > 0 andalso n mod 2 = 0
- then Trueprop \$ mk_iff n
- else raise IffOracleExn n;
-\end{ttbox}
-Observe the function's two arguments, the signature \texttt{sign} and the
-exception given as a pattern. The function checks its argument for
-validity. If $n$ is positive and even then it creates a tautology
-containing $n$ occurrences of~$P$. Otherwise it signals error by
-raising its own exception (just by happy coincidence). Errors may be
-signalled by other means, such as returning the theorem \texttt{True}.
-Please ensure that the oracle's result is correctly typed; Isabelle
-will reject ill-typed theorems by raising a cryptic exception at top
-level.
-
-The \texttt{oracle} section of \texttt{IffOracle.thy} installs above
-\texttt{ML} function as follows:
-\begin{ttbox}
-IffOracle = FOL +\medskip
-oracle
- iff = mk_iff_oracle\medskip
-end
-\end{ttbox}
-
-Now in \texttt{IffOracle.ML} we first define a wrapper for invoking
-the oracle:
-\begin{ttbox}
-fun iff_oracle n = invoke_oracle IffOracle.thy "iff"
- (sign_of IffOracle.thy, IffOracleExn n);
-\end{ttbox}
-
-Here are some example applications of the \texttt{iff} oracle. An
-argument of 10 is allowed, but one of 5 is forbidden:
-\begin{ttbox}
-iff_oracle 10;
-{\out "P <-> P <-> P <-> P <-> P <-> P <-> P <-> P <-> P <-> P" : thm}
-iff_oracle 5;
-{\out Exception- IffOracleExn 5 raised}
-\end{ttbox}
-
-\index{oracles|)}
\index{theories|)}
--- a/doc-src/Ref/thm.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/Ref/thm.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+
\chapter{Theorems and Forward Proof}
\index{theorems|(}
@@ -13,19 +13,6 @@
ignore such complexities --- and skip all but the first section of
this chapter.
-The theorem operations do not print error messages. Instead, they raise
-exception~\xdx{THM}\@. Use \ttindex{print_exn} to display
-exceptions nicely:
-\begin{ttbox}
-allI RS mp handle e => print_exn e;
-{\out Exception THM raised:}
-{\out RSN: no unifiers -- premise 1}
-{\out (!!x. ?P(x)) ==> ALL x. ?P(x)}
-{\out [| ?P --> ?Q; ?P |] ==> ?Q}
-{\out}
-{\out uncaught exception THM}
-\end{ttbox}
-
\section{Basic operations on theorems}
\subsection{Pretty-printing a theorem}
--- a/doc-src/System/Thy/Basics.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/System/Thy/Basics.thy Wed Mar 04 10:45:52 2009 +0100
@@ -360,8 +360,8 @@
@{verbatim "-W"} option makes Isabelle enter a special process
wrapper for interaction via an external program; the protocol is a
stripped-down version of Proof General the interaction mode, see
- also @{"file" "~~/src/Pure/Tools/isabelle_process.ML"} and @{"file"
- "~~/src/Pure/Tools/isabelle_process.scala"}.
+ also @{"file" "~~/src/Pure/System/isabelle_process.ML"} and @{"file"
+ "~~/src/Pure/System/isabelle_process.scala"}.
\medskip The @{verbatim "-S"} option makes the Isabelle process more
secure by disabling some critical operations, notably runtime
--- a/doc-src/System/Thy/Presentation.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/System/Thy/Presentation.thy Wed Mar 04 10:45:52 2009 +0100
@@ -654,7 +654,7 @@
"-"}@{text foo}'' to drop, and ``@{verbatim "/"}@{text foo}'' to
fold text tagged as @{text foo}. The builtin default is equivalent
to the tag specification ``@{verbatim
- "/theory,/proof,/ML,+visible,-invisible"}''; see also the {\LaTeX}
+ "+theory,+proof,+ML,+visible,-invisible"}''; see also the {\LaTeX}
macros @{verbatim "\\isakeeptag"}, @{verbatim "\\isadroptag"}, and
@{verbatim "\\isafoldtag"}, in @{"file"
"~~/lib/texinputs/isabelle.sty"}.
--- a/doc-src/System/Thy/document/Basics.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/System/Thy/document/Basics.tex Wed Mar 04 10:45:52 2009 +0100
@@ -369,7 +369,7 @@
\verb|-W| option makes Isabelle enter a special process
wrapper for interaction via an external program; the protocol is a
stripped-down version of Proof General the interaction mode, see
- also \hyperlink{file.~~/src/Pure/Tools/isabelle-process.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}Tools{\isacharslash}isabelle{\isacharunderscore}process{\isachardot}ML}}}} and \hyperlink{file.~~/src/Pure/Tools/isabelle-process.scala}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}Tools{\isacharslash}isabelle{\isacharunderscore}process{\isachardot}scala}}}}.
+ also \hyperlink{file.~~/src/Pure/System/isabelle-process.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}System{\isacharslash}isabelle{\isacharunderscore}process{\isachardot}ML}}}} and \hyperlink{file.~~/src/Pure/System/isabelle-process.scala}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}System{\isacharslash}isabelle{\isacharunderscore}process{\isachardot}scala}}}}.
\medskip The \verb|-S| option makes the Isabelle process more
secure by disabling some critical operations, notably runtime
--- a/doc-src/System/Thy/document/Presentation.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/System/Thy/document/Presentation.tex Wed Mar 04 10:45:52 2009 +0100
@@ -668,7 +668,7 @@
tagged Isabelle command regions. Tags are specified as a comma
separated list of modifier/name pairs: ``\verb|+|\isa{foo}'' (or just ``\isa{foo}'') means to keep, ``\verb|-|\isa{foo}'' to drop, and ``\verb|/|\isa{foo}'' to
fold text tagged as \isa{foo}. The builtin default is equivalent
- to the tag specification ``\verb|/theory,/proof,/ML,+visible,-invisible|''; see also the {\LaTeX}
+ to the tag specification ``\verb|+theory,+proof,+ML,+visible,-invisible|''; see also the {\LaTeX}
macros \verb|\isakeeptag|, \verb|\isadroptag|, and
\verb|\isafoldtag|, in \hyperlink{file.~~/lib/texinputs/isabelle.sty}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}lib{\isacharslash}texinputs{\isacharslash}isabelle{\isachardot}sty}}}}.
--- a/doc-src/System/system.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/System/system.tex Wed Mar 04 10:45:52 2009 +0100
@@ -36,7 +36,7 @@
\input{Thy/document/Misc.tex}
\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
+ \bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
--- a/doc-src/TutorialI/Types/Numbers.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/TutorialI/Types/Numbers.thy Wed Mar 04 10:45:52 2009 +0100
@@ -100,8 +100,8 @@
@{thm[display] div_mult1_eq[no_vars]}
\rulename{div_mult1_eq}
-@{thm[display] mod_mult1_eq[no_vars]}
-\rulename{mod_mult1_eq}
+@{thm[display] mod_mult_right_eq[no_vars]}
+\rulename{mod_mult_right_eq}
@{thm[display] div_mult2_eq[no_vars]}
\rulename{div_mult2_eq}
@@ -147,8 +147,8 @@
@{thm[display] zdiv_zadd1_eq[no_vars]}
\rulename{zdiv_zadd1_eq}
-@{thm[display] zmod_zadd1_eq[no_vars]}
-\rulename{zmod_zadd1_eq}
+@{thm[display] mod_add_eq[no_vars]}
+\rulename{mod_add_eq}
@{thm[display] zdiv_zmult1_eq[no_vars]}
\rulename{zdiv_zmult1_eq}
--- a/doc-src/TutorialI/Types/document/Numbers.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/TutorialI/Types/document/Numbers.tex Wed Mar 04 10:45:52 2009 +0100
@@ -244,7 +244,7 @@
\begin{isabelle}%
a\ {\isacharasterisk}\ b\ mod\ c\ {\isacharequal}\ a\ {\isacharasterisk}\ {\isacharparenleft}b\ mod\ c{\isacharparenright}\ mod\ c%
\end{isabelle}
-\rulename{mod_mult1_eq}
+\rulename{mod_mult_right_eq}
\begin{isabelle}%
a\ div\ {\isacharparenleft}b\ {\isacharasterisk}\ c{\isacharparenright}\ {\isacharequal}\ a\ div\ b\ div\ c%
@@ -318,7 +318,7 @@
\begin{isabelle}%
{\isacharparenleft}a\ {\isacharplus}\ b{\isacharparenright}\ mod\ c\ {\isacharequal}\ {\isacharparenleft}a\ mod\ c\ {\isacharplus}\ b\ mod\ c{\isacharparenright}\ mod\ c%
\end{isabelle}
-\rulename{zmod_zadd1_eq}
+\rulename{mod_add_eq}
\begin{isabelle}%
a\ {\isacharasterisk}\ b\ div\ c\ {\isacharequal}\ a\ {\isacharasterisk}\ {\isacharparenleft}b\ div\ c{\isacharparenright}\ {\isacharplus}\ a\ {\isacharasterisk}\ {\isacharparenleft}b\ mod\ c{\isacharparenright}\ div\ c%
--- a/doc-src/TutorialI/Types/numerics.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/TutorialI/Types/numerics.tex Wed Mar 04 10:45:52 2009 +0100
@@ -154,7 +154,7 @@
a\ *\ b\ div\ c\ =\ a\ *\ (b\ div\ c)\ +\ a\ *\ (b\ mod\ c)\ div\ c%
\rulename{div_mult1_eq}\isanewline
a\ *\ b\ mod\ c\ =\ a\ *\ (b\ mod\ c)\ mod\ c%
-\rulename{mod_mult1_eq}\isanewline
+\rulename{mod_mult_right_eq}\isanewline
a\ div\ (b*c)\ =\ a\ div\ b\ div\ c%
\rulename{div_mult2_eq}\isanewline
a\ mod\ (b*c)\ =\ b * (a\ div\ b\ mod\ c)\ +\ a\ mod\ b%
@@ -276,7 +276,7 @@
\rulename{zdiv_zadd1_eq}
\par\smallskip
(a\ +\ b)\ mod\ c\ =\ (a\ mod\ c\ +\ b\ mod\ c)\ mod\ c%
-\rulename{zmod_zadd1_eq}
+\rulename{mod_add_eq}
\end{isabelle}
\begin{isabelle}
--- a/doc-src/ZF/FOL.tex Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/ZF/FOL.tex Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+%!TEX root = logics-ZF.tex
\chapter{First-Order Logic}
\index{first-order logic|(}
@@ -360,7 +360,8 @@
logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
theory:
\begin{isabelle}
-\isacommand{theory}\ IFOL\_examples\ =\ IFOL:
+\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
+\isacommand{begin}
\end{isabelle}
The proof begins by entering the goal, then applying the rule $({\imp}I)$.
\begin{isabelle}
@@ -441,7 +442,7 @@
\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
\isanewline
-\isacommand{by} (tactic {*IntPr.fast_tac 1*})\isanewline
+\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
No\ subgoals!
\end{isabelle}
@@ -529,7 +530,8 @@
$\all{x}P(x)$ is true. Either way the theorem holds. First, we must
work in a theory based on classical logic, the theory \isa{FOL}:
\begin{isabelle}
-\isacommand{theory}\ FOL\_examples\ =\ FOL:
+\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}
\end{isabelle}
The formal proof does not conform in any obvious way to the sketch given
@@ -631,7 +633,8 @@
$if::[o,o,o]\To o$. The axiom \tdx{if_def} asserts the
equation~$(if)$.
\begin{isabelle}
-\isacommand{theory}\ If\ =\ FOL:\isanewline
+\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}\isanewline
\isacommand{constdefs}\isanewline
\ \ if\ ::\ "[o,o,o]=>o"\isanewline
\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
--- a/doc-src/antiquote_setup.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/antiquote_setup.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Doc/antiquote_setup.ML
- ID: $Id$
Author: Makarius
Auxiliary antiquotations for the Isabelle manuals.
@@ -13,13 +12,17 @@
(* misc utils *)
-val clean_string = translate_string
+fun translate f = Symbol.explode #> map f #> implode;
+
+val clean_string = translate
(fn "_" => "\\_"
+ | "#" => "\\#"
| "<" => "$<$"
| ">" => "$>$"
- | "#" => "\\#"
| "{" => "\\{"
+ | "|" => "$\\mid$"
| "}" => "\\}"
+ | "\\<dash>" => "-"
| c => c);
fun clean_name "\\<dots>" = "dots"
@@ -28,7 +31,7 @@
| clean_name "_" = "underscore"
| clean_name "{" = "braceleft"
| clean_name "}" = "braceright"
- | clean_name s = s |> translate_string (fn "_" => "-" | c => c);
+ | clean_name s = s |> translate (fn "_" => "-" | "\\<dash>" => "-" | c => c);
(* verbatim text *)
@@ -66,8 +69,9 @@
val txt' = if kind = "" then txt else kind ^ " " ^ txt;
val _ = writeln (ml (txt1, txt2));
val _ = ML_Context.eval_in (SOME ctxt) false Position.none (ml (txt1, txt2));
+ val kind' = if kind = "" then "ML" else "ML " ^ kind;
in
- "\\indexml" ^ kind ^ enclose "{" "}" (clean_string txt1) ^
+ "\\indexdef{}{" ^ kind' ^ "}{" ^ clean_string txt1 ^ "}" ^
(txt'
|> (if ! O.quotes then quote else I)
|> (if ! O.display then enclose "\\begin{verbatim}\n" "\n\\end{verbatim}"
@@ -193,6 +197,7 @@
entity_antiqs no_check "" "case" @
entity_antiqs (K ThyOutput.defined_command) "" "antiquotation" @
entity_antiqs (fn _ => fn name => is_some (OS.Process.getEnv name)) "isatt" "setting" @
+ entity_antiqs no_check "" "inference" @
entity_antiqs no_check "isatt" "executable" @
entity_antiqs (K check_tool) "isatt" "tool" @
entity_antiqs (K (File.exists o Path.explode)) "isatt" "file" @
--- a/doc-src/isar.sty Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/isar.sty Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\usepackage{ifthen}
\newcommand{\indexdef}[3]%
@@ -20,3 +17,9 @@
\newcommand{\isasymIMPORTS}{\isakeyword{imports}}
\newcommand{\isasymIN}{\isakeyword{in}}
\newcommand{\isasymSTRUCTURE}{\isakeyword{structure}}
+\newcommand{\isasymFIXES}{\isakeyword{fixes}}
+\newcommand{\isasymASSUMES}{\isakeyword{assumes}}
+\newcommand{\isasymSHOWS}{\isakeyword{shows}}
+\newcommand{\isasymOBTAINS}{\isakeyword{obtains}}
+
+\newcommand{\isasymASSM}{\isacommand{assm}}
--- a/doc-src/manual.bib Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/manual.bib Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,4 @@
% BibTeX database for the Isabelle documentation
-%
-% Lawrence C Paulson $Id$
%publishers
@string{AP="Academic Press"}
@@ -185,6 +183,16 @@
{F}ormal-{L}ogic {E}ngineering},
crossref = {tphols99}}
+
+@InProceedings{Bezem-Coquand:2005,
+ author = {M.A. Bezem and T. Coquand},
+ title = {Automating {Coherent Logic}},
+ booktitle = {LPAR-12},
+ editor = {G. Sutcliffe and A. Voronkov},
+ volume = 3835,
+ series = LNCS,
+ publisher = Springer}
+
@book{Bird-Wadler,author="Richard Bird and Philip Wadler",
title="Introduction to Functional Programming",publisher=PH,year=1988}
@@ -469,6 +477,17 @@
number = {364/07}
}
+@InProceedings{Haftmann-Wenzel:2009,
+ author = {Florian Haftmann and Makarius Wenzel},
+ title = {Local theory specifications in {Isabelle/Isar}},
+ editor = {Stefano Berardi and Ferruccio Damiani and de Liguoro, Ugo},
+ booktitle = {Types for Proofs and Programs, TYPES 2008},
+ publisher = {Springer},
+ series = {LNCS},
+ volume = {????},
+ year = {2009}
+}
+
@manual{isabelle-classes,
author = {Florian Haftmann},
title = {Haskell-style type classes with {Isabelle}/{Isar}},
@@ -669,6 +688,16 @@
pages = {341-386},
crossref = {birtwistle89}}
+@Article{Miller:1991,
+ author = {Dale Miller},
+ title = {A Logic Programming Language with Lambda-Abstraction, Function Variables,
+ and Simple Unification},
+ journal = {Journal of Logic and Computation},
+ year = 1991,
+ volume = 1,
+ number = 4
+}
+
@Article{miller-mixed,
Author = {Dale Miller},
Title = {Unification Under a Mixed Prefix},
@@ -1198,6 +1227,15 @@
pages = {578-596},
crossref = {fme93}}
+@Article{Schroeder-Heister:1984,
+ author = {Peter Schroeder-Heister},
+ title = {A Natural Extension of Natural Deduction},
+ journal = {Journal of Symbolic Logic},
+ year = 1984,
+ volume = 49,
+ number = 4
+}
+
@inproceedings{slind-tfl,
author = {Konrad Slind},
title = {Function Definition in Higher Order Logic},
@@ -1331,6 +1369,24 @@
year=2002,
note = {\url{http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2002/wenzel.html}}}
+@Article{Wenzel-Wiedijk:2002,
+ author = {Freek Wiedijk and Markus Wenzel},
+ title = {A comparison of the mathematical proof languages {Mizar} and {Isar}.},
+ journal = {Journal of Automated Reasoning},
+ year = 2002,
+ volume = 29,
+ number = {3-4}
+}
+
+@InCollection{Wenzel-Paulson:2006,
+ author = {Markus Wenzel and Lawrence C. Paulson},
+ title = {{Isabelle/Isar}},
+ booktitle = {The Seventeen Provers of the World},
+ year = 2006,
+ editor = {F. Wiedijk},
+ series = {LNAI 3600}
+}
+
@InCollection{Wenzel:2006:Festschrift,
author = {Makarius Wenzel},
title = {{Isabelle/Isar} --- a generic framework for human-readable proof documents},
--- a/doc-src/more_antiquote.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/doc-src/more_antiquote.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Doc/more_antiquote.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
More antiquotations.
@@ -92,9 +91,9 @@
let
val thy = ProofContext.theory_of ctxt;
val const = Code_Unit.check_const thy raw_const;
- val (_, funcgr) = Code_Funcgr.make thy [const];
+ val (_, funcgr) = Code_Wellsorted.make thy [const];
fun holize thm = @{thm meta_eq_to_obj_eq} OF [thm];
- val thms = Code_Funcgr.eqns funcgr const
+ val thms = Code_Wellsorted.eqns funcgr const
|> map_filter (fn (thm, linear) => if linear then SOME thm else NONE)
|> map (holize o no_vars ctxt o AxClass.overload thy);
in ThyOutput.output_list pretty_thm src ctxt thms end;
--- a/doc/Contents Wed Mar 04 10:43:39 2009 +0100
+++ b/doc/Contents Wed Mar 04 10:45:52 2009 +0100
@@ -6,13 +6,16 @@
functions Tutorial on Function Definitions
codegen Tutorial on Code Generation
sugar LaTeX sugar for proof documents
- ind-defs (Co)Inductive Definitions in ZF
Reference Manuals
isar-ref The Isabelle/Isar Reference Manual
implementation The Isabelle/Isar Implementation Manual
system The Isabelle System Manual
- ref The Isabelle Reference Manual
+
+Old Manuals (outdated!)
+ intro Old Introduction to Isabelle
+ ref Old Isabelle Reference Manual
logics Isabelle's Logics: overview and misc logics
logics-HOL Isabelle's Logics: HOL
logics-ZF Isabelle's Logics: FOL and ZF
+ ind-defs (Co)Inductive Definitions in ZF
--- a/etc/settings Wed Mar 04 10:43:39 2009 +0100
+++ b/etc/settings Wed Mar 04 10:45:52 2009 +0100
@@ -60,12 +60,6 @@
#ML_OPTIONS=""
#ML_PLATFORM=""
-# Alice 1.4 (experimental!)
-#ML_SYSTEM=alice
-#ML_HOME="/usr/local/alice/bin"
-#ML_OPTIONS=""
-#ML_PLATFORM=""
-
###
### JVM components (Scala or Java)
@@ -268,6 +262,8 @@
# zChaff (SAT Solver, cf. Isabelle/src/HOL/Tools/sat_solver.ML)
#ZCHAFF_HOME=/usr/local/bin
+#ZCHAFF_VERSION=2004.5.13
+#ZCHAFF_VERSION=2004.11.15
# BerkMin561 (SAT Solver, cf. Isabelle/src/HOL/Tools/sat_solver.ML)
#BERKMIN_HOME=/usr/local/bin
--- a/lib/Tools/codegen Wed Mar 04 10:43:39 2009 +0100
+++ b/lib/Tools/codegen Wed Mar 04 10:45:52 2009 +0100
@@ -36,5 +36,5 @@
THY=$(echo $THY | sed -e 's/\\/\\\\"/g; s/"/\\\"/g')
ISAR="theory Codegen imports \"$THY\" begin export_code $CMD end"
-echo "$ISAR" | "$ISABELLE_TOOL" tty -l "$IMAGE"
+echo "$ISAR" | "$ISABELLE_PROCESS" -I "$IMAGE"
exit ${PIPESTATUS[1]}
--- a/src/FOL/IFOL.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/FOL/IFOL.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: FOL/IFOL.thy
- ID: $Id$
Author: Lawrence C Paulson and Markus Wenzel
*)
@@ -14,9 +13,10 @@
"~~/src/Tools/IsaPlanner/isand.ML"
"~~/src/Tools/IsaPlanner/rw_tools.ML"
"~~/src/Tools/IsaPlanner/rw_inst.ML"
- "~~/src/Provers/eqsubst.ML"
+ "~~/src/Tools/eqsubst.ML"
"~~/src/Provers/quantifier1.ML"
- "~~/src/Provers/project_rule.ML"
+ "~~/src/Tools/intuitionistic.ML"
+ "~~/src/Tools/project_rule.ML"
"~~/src/Tools/atomize_elim.ML"
("fologic.ML")
("hypsubstdata.ML")
@@ -610,6 +610,8 @@
subsection {* Intuitionistic Reasoning *}
+setup {* Intuitionistic.method_setup "iprover" *}
+
lemma impE':
assumes 1: "P --> Q"
and 2: "Q ==> R"
--- a/src/FOL/IsaMakefile Wed Mar 04 10:43:39 2009 +0100
+++ b/src/FOL/IsaMakefile Wed Mar 04 10:45:52 2009 +0100
@@ -32,12 +32,13 @@
$(SRC)/Provers/clasimp.ML $(SRC)/Provers/classical.ML \
$(SRC)/Tools/IsaPlanner/zipper.ML $(SRC)/Tools/IsaPlanner/isand.ML \
$(SRC)/Tools/IsaPlanner/rw_tools.ML \
- $(SRC)/Tools/IsaPlanner/rw_inst.ML $(SRC)/Provers/eqsubst.ML \
+ $(SRC)/Tools/IsaPlanner/rw_inst.ML $(SRC)/Tools/eqsubst.ML \
$(SRC)/Provers/hypsubst.ML $(SRC)/Tools/induct.ML \
- $(SRC)/Tools/atomize_elim.ML $(SRC)/Provers/project_rule.ML \
- $(SRC)/Provers/quantifier1.ML $(SRC)/Provers/splitter.ML FOL.thy \
- IFOL.thy ROOT.ML blastdata.ML cladata.ML document/root.tex \
- fologic.ML hypsubstdata.ML intprover.ML simpdata.ML
+ $(SRC)/Tools/intuitionistic.ML $(SRC)/Tools/atomize_elim.ML \
+ $(SRC)/Tools/project_rule.ML $(SRC)/Provers/quantifier1.ML \
+ $(SRC)/Provers/splitter.ML FOL.thy IFOL.thy ROOT.ML blastdata.ML \
+ cladata.ML document/root.tex fologic.ML hypsubstdata.ML intprover.ML \
+ simpdata.ML
@$(ISABELLE_TOOL) usedir -p 2 -b $(OUT)/Pure FOL
@@ -46,12 +47,12 @@
FOL-ex: FOL $(LOG)/FOL-ex.gz
$(LOG)/FOL-ex.gz: $(OUT)/FOL ex/First_Order_Logic.thy ex/If.thy \
- ex/IffOracle.thy ex/Nat.thy ex/Natural_Numbers.thy \
- ex/LocaleTest.thy \
- ex/Miniscope.thy ex/Prolog.thy ex/ROOT.ML ex/Classical.thy \
- ex/document/root.tex ex/Foundation.thy ex/Intuitionistic.thy \
- ex/Intro.thy ex/Propositional_Int.thy ex/Propositional_Cla.thy \
- ex/Quantifiers_Int.thy ex/Quantifiers_Cla.thy
+ ex/Iff_Oracle.thy ex/Nat.thy ex/Nat_Class.thy ex/Natural_Numbers.thy \
+ ex/LocaleTest.thy ex/Miniscope.thy ex/Prolog.thy ex/ROOT.ML \
+ ex/Classical.thy ex/document/root.tex ex/Foundation.thy \
+ ex/Intuitionistic.thy ex/Intro.thy ex/Propositional_Int.thy \
+ ex/Propositional_Cla.thy ex/Quantifiers_Int.thy \
+ ex/Quantifiers_Cla.thy
@$(ISABELLE_TOOL) usedir $(OUT)/FOL ex
--- a/src/FOL/ex/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/FOL/ex/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,4 @@
(* Title: FOL/ex/ROOT.ML
- ID: $Id$
- Author: Lawrence C Paulson, Cambridge University Computer Laboratory
- Copyright 1992 University of Cambridge
Examples for First-Order Logic.
*)
@@ -11,23 +8,19 @@
"Natural_Numbers",
"Intro",
"Nat",
+ "Nat_Class",
"Foundation",
"Prolog",
-
"Intuitionistic",
"Propositional_Int",
"Quantifiers_Int",
-
"Classical",
"Propositional_Cla",
"Quantifiers_Cla",
"Miniscope",
"If",
-
- "NatClass",
- "IffOracle"
+ "Iff_Oracle"
];
(*regression test for locales -- sets several global flags!*)
no_document use_thy "LocaleTest";
-
--- a/src/FOLP/simp.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/FOLP/simp.ML Wed Mar 04 10:45:52 2009 +0100
@@ -433,7 +433,7 @@
val thms = map (trivial o cterm_of(Thm.theory_of_thm thm)) As;
val new_rws = List.concat(map mk_rew_rules thms);
val rwrls = map mk_trans (List.concat(map mk_rew_rules thms));
- val anet' = foldr lhs_insert_thm anet rwrls
+ val anet' = List.foldr lhs_insert_thm anet rwrls
in if !tracing andalso not(null new_rws)
then (writeln"Adding rewrites:"; Display.prths new_rws; ())
else ();
--- a/src/HOL/Algebra/Coset.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Algebra/Coset.thy Wed Mar 04 10:45:52 2009 +0100
@@ -602,8 +602,8 @@
interpret group G by fact
show ?thesis
proof (intro equiv.intro)
- show "refl (carrier G) (rcong H)"
- by (auto simp add: r_congruent_def refl_def)
+ show "refl_on (carrier G) (rcong H)"
+ by (auto simp add: r_congruent_def refl_on_def)
next
show "sym (rcong H)"
proof (simp add: r_congruent_def sym_def, clarify)
--- a/src/HOL/Algebra/Exponent.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Algebra/Exponent.thy Wed Mar 04 10:45:52 2009 +0100
@@ -210,12 +210,12 @@
lemma p_fac_forw: "[| (m::nat) > 0; k>0; k < p^a; (p^r) dvd (p^a)* m - k |]
==> (p^r) dvd (p^a) - k"
-apply (frule_tac k1 = k and i = p in p_fac_forw_lemma [THEN le_imp_power_dvd], auto)
+apply (frule p_fac_forw_lemma [THEN le_imp_power_dvd, of _ k p], auto)
apply (subgoal_tac "p^r dvd p^a*m")
prefer 2 apply (blast intro: dvd_mult2)
apply (drule dvd_diffD1)
apply assumption
- prefer 2 apply (blast intro: dvd_diff)
+ prefer 2 apply (blast intro: nat_dvd_diff)
apply (drule gr0_implies_Suc, auto)
done
@@ -226,12 +226,12 @@
lemma p_fac_backw: "[| m>0; k>0; (p::nat)\<noteq>0; k < p^a; (p^r) dvd p^a - k |]
==> (p^r) dvd (p^a)*m - k"
-apply (frule_tac k1 = k and i = p in r_le_a_forw [THEN le_imp_power_dvd], auto)
+apply (frule_tac k1 = k and p1 = p in r_le_a_forw [THEN le_imp_power_dvd], auto)
apply (subgoal_tac "p^r dvd p^a*m")
prefer 2 apply (blast intro: dvd_mult2)
apply (drule dvd_diffD1)
apply assumption
- prefer 2 apply (blast intro: dvd_diff)
+ prefer 2 apply (blast intro: nat_dvd_diff)
apply (drule less_imp_Suc_add, auto)
done
--- a/src/HOL/Algebra/Sylow.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Algebra/Sylow.thy Wed Mar 04 10:45:52 2009 +0100
@@ -20,8 +20,8 @@
and "RelM == {(N1,N2). N1 \<in> calM & N2 \<in> calM &
(\<exists>g \<in> carrier(G). N1 = (N2 #> g) )}"
-lemma (in sylow) RelM_refl: "refl calM RelM"
-apply (auto simp add: refl_def RelM_def calM_def)
+lemma (in sylow) RelM_refl_on: "refl_on calM RelM"
+apply (auto simp add: refl_on_def RelM_def calM_def)
apply (blast intro!: coset_mult_one [symmetric])
done
@@ -40,7 +40,7 @@
lemma (in sylow) RelM_equiv: "equiv calM RelM"
apply (unfold equiv_def)
-apply (blast intro: RelM_refl RelM_sym RelM_trans)
+apply (blast intro: RelM_refl_on RelM_sym RelM_trans)
done
lemma (in sylow) M_subset_calM_prep: "M' \<in> calM // RelM ==> M' \<subseteq> calM"
--- a/src/HOL/Algebra/poly/UnivPoly2.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Algebra/poly/UnivPoly2.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,5 @@
(*
Title: Univariate Polynomials
- Id: $Id$
Author: Clemens Ballarin, started 9 December 1996
Copyright: Clemens Ballarin
*)
@@ -388,7 +387,7 @@
proof (cases k)
case 0 then show ?thesis by simp ring
next
- case Suc then show ?thesis by (simp add: algebra_simps) ring
+ case Suc then show ?thesis by simp (ring, simp)
qed
then show "coeff (monom a 0 * p) k = coeff (a *s p) k" by ring
qed
--- a/src/HOL/Arith_Tools.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Arith_Tools.thy Wed Mar 04 10:45:52 2009 +0100
@@ -68,8 +68,9 @@
apply (subst add_eq_if)
apply (simp split add: nat.split
del: nat_numeral_1_eq_1
- add: numeral_1_eq_Suc_0 [symmetric] Let_def
- neg_imp_number_of_eq_0 neg_number_of_pred_iff_0)
+ add: nat_numeral_1_eq_1 [symmetric]
+ numeral_1_eq_Suc_0 [symmetric]
+ neg_number_of_pred_iff_0)
done
lemma nat_rec_number_of [simp]:
@@ -89,7 +90,8 @@
apply (subst add_eq_if)
apply (simp split add: nat.split
del: nat_numeral_1_eq_1
- add: numeral_1_eq_Suc_0 [symmetric] Let_def neg_imp_number_of_eq_0
+ add: nat_numeral_1_eq_1 [symmetric]
+ numeral_1_eq_Suc_0 [symmetric]
neg_number_of_pred_iff_0)
done
--- a/src/HOL/Complex_Main.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Complex_Main.thy Wed Mar 04 10:45:52 2009 +0100
@@ -9,7 +9,6 @@
Ln
Taylor
Integration
- FrechetDeriv
begin
end
--- a/src/HOL/Decision_Procs/Approximation.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/Approximation.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,9 @@
-(* Title: HOL/Reflection/Approximation.thy
- * Author: Johannes Hölzl <hoelzl@in.tum.de> 2008 / 2009
- *)
+(* Title: HOL/Reflection/Approximation.thy
+ Author: Johannes Hoelzl <hoelzl@in.tum.de> 2008 / 2009
+*)
+
header {* Prove unequations about real numbers by computation *}
+
theory Approximation
imports Complex_Main Float Reflection Dense_Linear_Order Efficient_Nat
begin
--- a/src/HOL/Decision_Procs/Cooper.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/Cooper.thy Wed Mar 04 10:45:52 2009 +0100
@@ -620,7 +620,7 @@
{assume "i=0" hence ?case using "12.hyps" by (simp add: dvd_def Let_def)}
moreover
{assume i1: "abs i = 1"
- from zdvd_1_left[where m = "Inum bs a"] uminus_dvd_conv[where d="1" and t="Inum bs a"]
+ from one_dvd[of "Inum bs a"] uminus_dvd_conv[where d="1" and t="Inum bs a"]
have ?case using i1 apply (cases "i=0", simp_all add: Let_def)
by (cases "i > 0", simp_all)}
moreover
@@ -640,7 +640,7 @@
{assume "i=0" hence ?case using "13.hyps" by (simp add: dvd_def Let_def)}
moreover
{assume i1: "abs i = 1"
- from zdvd_1_left[where m = "Inum bs a"] uminus_dvd_conv[where d="1" and t="Inum bs a"]
+ from one_dvd[of "Inum bs a"] uminus_dvd_conv[where d="1" and t="Inum bs a"]
have ?case using i1 apply (cases "i=0", simp_all add: Let_def)
apply (cases "i > 0", simp_all) done}
moreover
@@ -990,7 +990,7 @@
have "j=0 \<or> (j\<noteq>0 \<and> ?c = 0) \<or> (j\<noteq>0 \<and> ?c >0) \<or> (j\<noteq> 0 \<and> ?c<0)" by arith
moreover
{assume "j=0" hence z: "zlfm (Dvd j a) = (zlfm (Eq a))" by (simp add: Let_def)
- hence ?case using prems by (simp del: zlfm.simps add: zdvd_0_left)}
+ hence ?case using prems by (simp del: zlfm.simps)}
moreover
{assume "?c=0" and "j\<noteq>0" hence ?case
using zsplit0_I[OF spl, where x="i" and bs="bs"]
@@ -1005,7 +1005,7 @@
moreover
{assume cn: "?c < 0" and jnz: "j\<noteq>0" hence l: "?L (?l (Dvd j a))"
by (simp add: nb Let_def split_def)
- hence ?case using Ia cn jnz zdvd_zminus_iff[where m="abs j" and n="?c*i + ?N ?r" ]
+ hence ?case using Ia cn jnz dvd_minus_iff[of "abs j" "?c*i + ?N ?r" ]
by (simp add: Let_def split_def) }
ultimately show ?case by blast
next
@@ -1019,7 +1019,7 @@
have "j=0 \<or> (j\<noteq>0 \<and> ?c = 0) \<or> (j\<noteq>0 \<and> ?c >0) \<or> (j\<noteq> 0 \<and> ?c<0)" by arith
moreover
{assume "j=0" hence z: "zlfm (NDvd j a) = (zlfm (NEq a))" by (simp add: Let_def)
- hence ?case using prems by (simp del: zlfm.simps add: zdvd_0_left)}
+ hence ?case using prems by (simp del: zlfm.simps)}
moreover
{assume "?c=0" and "j\<noteq>0" hence ?case
using zsplit0_I[OF spl, where x="i" and bs="bs"]
@@ -1034,7 +1034,7 @@
moreover
{assume cn: "?c < 0" and jnz: "j\<noteq>0" hence l: "?L (?l (Dvd j a))"
by (simp add: nb Let_def split_def)
- hence ?case using Ia cn jnz zdvd_zminus_iff[where m="abs j" and n="?c*i + ?N ?r" ]
+ hence ?case using Ia cn jnz dvd_minus_iff[of "abs j" "?c*i + ?N ?r"]
by (simp add: Let_def split_def)}
ultimately show ?case by blast
qed auto
@@ -1092,10 +1092,10 @@
using lin ad d
proof(induct p rule: iszlfm.induct)
case (9 i c e) thus ?case using d
- by (simp add: zdvd_trans[where m="i" and n="d" and k="d'"])
+ by (simp add: dvd_trans[of "i" "d" "d'"])
next
case (10 i c e) thus ?case using d
- by (simp add: zdvd_trans[where m="i" and n="d" and k="d'"])
+ by (simp add: dvd_trans[of "i" "d" "d'"])
qed simp_all
lemma \<delta> : assumes lin:"iszlfm p"
@@ -1354,7 +1354,7 @@
case (9 j c e) hence nb: "numbound0 e" by simp
have "Ifm bbs (x#bs) (mirror (Dvd j (CN 0 c e))) = (j dvd c*x - Inum (x#bs) e)" (is "_ = (j dvd c*x - ?e)") by simp
also have "\<dots> = (j dvd (- (c*x - ?e)))"
- by (simp only: zdvd_zminus_iff)
+ by (simp only: dvd_minus_iff)
also have "\<dots> = (j dvd (c* (- x)) + ?e)"
apply (simp only: minus_mult_right[symmetric] minus_mult_left[symmetric] diff_def zadd_ac zminus_zadd_distrib)
by (simp add: algebra_simps)
@@ -1366,7 +1366,7 @@
case (10 j c e) hence nb: "numbound0 e" by simp
have "Ifm bbs (x#bs) (mirror (Dvd j (CN 0 c e))) = (j dvd c*x - Inum (x#bs) e)" (is "_ = (j dvd c*x - ?e)") by simp
also have "\<dots> = (j dvd (- (c*x - ?e)))"
- by (simp only: zdvd_zminus_iff)
+ by (simp only: dvd_minus_iff)
also have "\<dots> = (j dvd (c* (- x)) + ?e)"
apply (simp only: minus_mult_right[symmetric] minus_mult_left[symmetric] diff_def zadd_ac zminus_zadd_distrib)
by (simp add: algebra_simps)
@@ -1392,7 +1392,7 @@
and dr: "d\<beta> p l"
and d: "l dvd l'"
shows "d\<beta> p l'"
-using dr linp zdvd_trans[where n="l" and k="l'", simplified d]
+using dr linp dvd_trans[of _ "l" "l'", simplified d]
by (induct p rule: iszlfm.induct) simp_all
lemma \<alpha>_l: assumes lp: "iszlfm p"
@@ -1431,7 +1431,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l*x + (l div c) * Inum (x # bs) e < 0) =
@@ -1449,7 +1449,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l*x + (l div c) * Inum (x# bs) e \<le> 0) =
@@ -1467,7 +1467,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l*x + (l div c)* Inum (x # bs) e > 0) =
@@ -1485,7 +1485,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l*x + (l div c)* Inum (x # bs) e \<ge> 0) =
@@ -1505,7 +1505,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l * x + (l div c) * Inum (x # bs) e = 0) =
@@ -1523,7 +1523,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(l * x + (l div c) * Inum (x # bs) e \<noteq> 0) =
@@ -1541,7 +1541,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(\<exists> (k::int). l * x + (l div c) * Inum (x # bs) e = ((l div c) * j) * k) = (\<exists> (k::int). (c * (l div c)) * x + (l div c) * Inum (x # bs) e = ((l div c) * j) * k)" by simp
@@ -1558,7 +1558,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(\<exists> (k::int). l * x + (l div c) * Inum (x # bs) e = ((l div c) * j) * k) = (\<exists> (k::int). (c * (l div c)) * x + (l div c) * Inum (x # bs) e = ((l div c) * j) * k)" by simp
--- a/src/HOL/Decision_Procs/Ferrack.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/Ferrack.thy Wed Mar 04 10:45:52 2009 +0100
@@ -501,9 +501,9 @@
assumes gdg: "g dvd g'" and dgt':"dvdnumcoeff t g'"
shows "dvdnumcoeff t g"
using dgt' gdg
- by (induct t rule: dvdnumcoeff.induct, simp_all add: gdg zdvd_trans[OF gdg])
+ by (induct t rule: dvdnumcoeff.induct, simp_all add: gdg dvd_trans[OF gdg])
-declare zdvd_trans [trans add]
+declare dvd_trans [trans add]
lemma natabs0: "(nat (abs x) = 0) = (x = 0)"
by arith
--- a/src/HOL/Decision_Procs/MIR.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/MIR.thy Wed Mar 04 10:45:52 2009 +0100
@@ -83,7 +83,7 @@
have "real (floor x) \<le> x" by simp
hence "real (floor x) < real (n + 1) " using ub by arith
hence "floor x < n+1" by simp
- moreover from lb have "n \<le> floor x" using floor_mono2[where x="real n" and y="x"]
+ moreover from lb have "n \<le> floor x" using floor_mono[where x="real n" and y="x"]
by simp ultimately show "floor x = n" by simp
qed
@@ -132,13 +132,13 @@
assume d: "real d rdvd t"
from d int_rdvd_real have d2: "d dvd (floor t)" and ti: "real (floor t) = t" by auto
- from iffD2[OF zdvd_abs1] d2 have "(abs d) dvd (floor t)" by blast
+ from iffD2[OF abs_dvd_iff] d2 have "(abs d) dvd (floor t)" by blast
with ti int_rdvd_real[symmetric] have "real (abs d) rdvd t" by blast
thus "abs (real d) rdvd t" by simp
next
assume "abs (real d) rdvd t" hence "real (abs d) rdvd t" by simp
with int_rdvd_real[where i="abs d" and x="t"] have d2: "abs d dvd floor t" and ti: "real (floor t) =t" by auto
- from iffD1[OF zdvd_abs1] d2 have "d dvd floor t" by blast
+ from iffD1[OF abs_dvd_iff] d2 have "d dvd floor t" by blast
with ti int_rdvd_real[symmetric] show "real d rdvd t" by blast
qed
@@ -675,9 +675,9 @@
assumes gdg: "g dvd g'" and dgt':"dvdnumcoeff t g'"
shows "dvdnumcoeff t g"
using dgt' gdg
- by (induct t rule: dvdnumcoeff.induct, simp_all add: gdg zdvd_trans[OF gdg])
-
-declare zdvd_trans [trans add]
+ by (induct t rule: dvdnumcoeff.induct, simp_all add: gdg dvd_trans[OF gdg])
+
+declare dvd_trans [trans add]
lemma natabs0: "(nat (abs x) = 0) = (x = 0)"
by arith
@@ -1775,11 +1775,11 @@
"(real (a::int) \<le> b) = (a \<le> floor b \<or> (a = floor b \<and> real (floor b) < b))"
proof( auto)
assume alb: "real a \<le> b" and agb: "\<not> a \<le> floor b"
- from alb have "floor (real a) \<le> floor b " by (simp only: floor_mono2)
+ from alb have "floor (real a) \<le> floor b " by (simp only: floor_mono)
hence "a \<le> floor b" by simp with agb show "False" by simp
next
assume alb: "a \<le> floor b"
- hence "real a \<le> real (floor b)" by (simp only: floor_mono2)
+ hence "real a \<le> real (floor b)" by (simp only: floor_mono)
also have "\<dots>\<le> b" by simp finally show "real a \<le> b" .
qed
@@ -2114,10 +2114,10 @@
using lin ad d
proof(induct p rule: iszlfm.induct)
case (9 i c e) thus ?case using d
- by (simp add: zdvd_trans[where m="i" and n="d" and k="d'"])
+ by (simp add: dvd_trans[of "i" "d" "d'"])
next
case (10 i c e) thus ?case using d
- by (simp add: zdvd_trans[where m="i" and n="d" and k="d'"])
+ by (simp add: dvd_trans[of "i" "d" "d'"])
qed simp_all
lemma \<delta> : assumes lin:"iszlfm p bs"
@@ -2496,7 +2496,7 @@
and dr: "d\<beta> p l"
and d: "l dvd l'"
shows "d\<beta> p l'"
-using dr linp zdvd_trans[where n="l" and k="l'", simplified d]
+using dr linp dvd_trans[of _ "l" "l'", simplified d]
by (induct p rule: iszlfm.induct) simp_all
lemma \<alpha>_l: assumes lp: "iszlfm p (a#bs)"
@@ -2535,7 +2535,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e < (0\<Colon>real)) =
@@ -2553,7 +2553,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e \<le> (0\<Colon>real)) =
@@ -2571,7 +2571,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e > (0\<Colon>real)) =
@@ -2589,7 +2589,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e \<ge> (0\<Colon>real)) =
@@ -2607,7 +2607,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e = (0\<Colon>real)) =
@@ -2625,7 +2625,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(real l * real x + real (l div c) * Inum (real x # bs) e \<noteq> (0\<Colon>real)) =
@@ -2643,7 +2643,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(\<exists> (k::int). real l * real x + real (l div c) * Inum (real x # bs) e = (real (l div c) * real j) * real k) = (\<exists> (k::int). real (c * (l div c)) * real x + real (l div c) * Inum (real x # bs) e = (real (l div c) * real j) * real k)" by simp
@@ -2660,7 +2660,7 @@
by (simp add: zdiv_mono1[OF clel cp])
then have ldcp:"0 < l div c"
by (simp add: zdiv_self[OF cnz])
- have "c * (l div c) = c* (l div c) + l mod c" using d' zdvd_iff_zmod_eq_0[where m="c" and n="l"] by simp
+ have "c * (l div c) = c* (l div c) + l mod c" using d' dvd_eq_mod_eq_0[of "c" "l"] by simp
hence cl:"c * (l div c) =l" using zmod_zdiv_equality[where a="l" and b="c", symmetric]
by simp
hence "(\<exists> (k::int). real l * real x + real (l div c) * Inum (real x # bs) e = (real (l div c) * real j) * real k) = (\<exists> (k::int). real (c * (l div c)) * real x + real (l div c) * Inum (real x # bs) e = (real (l div c) * real j) * real k)" by simp
@@ -3697,7 +3697,7 @@
assumes xb: "real m \<le> x \<and> x < real ((n::int) + 1)"
shows "\<exists> j\<in> {m.. n}. real j \<le> x \<and> x < real (j+1)" (is "\<exists> j\<in> ?N. ?P j")
by (rule bexI[where P="?P" and x="floor x" and A="?N"])
-(auto simp add: floor_less_eq[where x="x" and a="n+1", simplified] xb[simplified] floor_mono2[where x="real m" and y="x", OF conjunct1[OF xb], simplified floor_real_of_int[where n="m"]])
+(auto simp add: floor_less_eq[where x="x" and a="n+1", simplified] xb[simplified] floor_mono[where x="real m" and y="x", OF conjunct1[OF xb], simplified floor_real_of_int[where n="m"]])
lemma rsplit0_complete:
assumes xp:"0 \<le> x" and x1:"x < 1"
@@ -5926,7 +5926,7 @@
apply mir
done
-lemma "ALL x y. \<lfloor>x\<rfloor> = \<lfloor>y\<rfloor> \<longrightarrow> 0 \<le> abs (y - x) \<and> abs (y - x) \<le> 1"
+lemma "ALL (x::real) (y::real). \<lfloor>x\<rfloor> = \<lfloor>y\<rfloor> \<longrightarrow> 0 \<le> abs (y - x) \<and> abs (y - x) \<le> 1"
apply mir
done
--- a/src/HOL/Decision_Procs/cooper_tac.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/cooper_tac.ML Wed Mar 04 10:45:52 2009 +0100
@@ -27,12 +27,9 @@
val Suc_plus1 = @{thm Suc_plus1};
val imp_le_cong = @{thm imp_le_cong};
val conj_le_cong = @{thm conj_le_cong};
-val nat_mod_add_eq = @{thm mod_add1_eq} RS sym;
-val nat_mod_add_left_eq = @{thm mod_add_left_eq} RS sym;
-val nat_mod_add_right_eq = @{thm mod_add_right_eq} RS sym;
-val int_mod_add_eq = @{thm mod_add_eq} RS sym;
-val int_mod_add_left_eq = @{thm zmod_zadd_left_eq} RS sym;
-val int_mod_add_right_eq = @{thm zmod_zadd_right_eq} RS sym;
+val mod_add_left_eq = @{thm mod_add_left_eq} RS sym;
+val mod_add_right_eq = @{thm mod_add_right_eq} RS sym;
+val mod_add_eq = @{thm mod_add_eq} RS sym;
val nat_div_add_eq = @{thm div_add1_eq} RS sym;
val int_div_add_eq = @{thm zdiv_zadd1_eq} RS sym;
@@ -70,14 +67,13 @@
val (t,np,nh) = prepare_for_linz q g
(* Some simpsets for dealing with mod div abs and nat*)
val mod_div_simpset = HOL_basic_ss
- addsimps [refl,nat_mod_add_eq, nat_mod_add_left_eq,
- nat_mod_add_right_eq, int_mod_add_eq,
- int_mod_add_right_eq, int_mod_add_left_eq,
+ addsimps [refl,mod_add_eq, mod_add_left_eq,
+ mod_add_right_eq,
nat_div_add_eq, int_div_add_eq,
@{thm mod_self}, @{thm "zmod_self"},
@{thm mod_by_0}, @{thm div_by_0},
@{thm "zdiv_zero"}, @{thm "zmod_zero"}, @{thm "div_0"}, @{thm "mod_0"},
- @{thm "zdiv_1"}, @{thm "zmod_1"}, @{thm "div_1"}, @{thm "mod_1"},
+ @{thm "div_by_1"}, @{thm "mod_by_1"}, @{thm "div_1"}, @{thm "mod_1"},
Suc_plus1]
addsimps @{thms add_ac}
addsimprocs [cancel_div_mod_proc]
--- a/src/HOL/Decision_Procs/ferrack_tac.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/ferrack_tac.ML Wed Mar 04 10:45:52 2009 +0100
@@ -31,12 +31,8 @@
val Suc_plus1 = @{thm Suc_plus1};
val imp_le_cong = @{thm imp_le_cong};
val conj_le_cong = @{thm conj_le_cong};
-val nat_mod_add_eq = @{thm mod_add1_eq} RS sym;
-val nat_mod_add_left_eq = @{thm mod_add_left_eq} RS sym;
-val nat_mod_add_right_eq = @{thm mod_add_right_eq} RS sym;
-val int_mod_add_eq = @{thm mod_add_eq} RS sym;
-val int_mod_add_left_eq = @{thm zmod_zadd_left_eq} RS sym;
-val int_mod_add_right_eq = @{thm zmod_zadd_right_eq} RS sym;
+val mod_add_left_eq = @{thm mod_add_left_eq} RS sym;
+val mod_add_right_eq = @{thm mod_add_right_eq} RS sym;
val nat_div_add_eq = @{thm div_add1_eq} RS sym;
val int_div_add_eq = @{thm zdiv_zadd1_eq} RS sym;
val ZDIVISION_BY_ZERO_MOD = @{thm DIVISION_BY_ZERO} RS conjunct2;
--- a/src/HOL/Decision_Procs/mir_tac.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Decision_Procs/mir_tac.ML Wed Mar 04 10:45:52 2009 +0100
@@ -46,12 +46,9 @@
val Suc_plus1 = @{thm "Suc_plus1"};
val imp_le_cong = @{thm "imp_le_cong"};
val conj_le_cong = @{thm "conj_le_cong"};
-val nat_mod_add_eq = @{thm "mod_add1_eq"} RS sym;
-val nat_mod_add_left_eq = @{thm "mod_add_left_eq"} RS sym;
-val nat_mod_add_right_eq = @{thm "mod_add_right_eq"} RS sym;
-val int_mod_add_eq = @{thm "mod_add_eq"} RS sym;
-val int_mod_add_left_eq = @{thm "zmod_zadd_left_eq"} RS sym;
-val int_mod_add_right_eq = @{thm "zmod_zadd_right_eq"} RS sym;
+val mod_add_eq = @{thm "mod_add_eq"} RS sym;
+val mod_add_left_eq = @{thm "mod_add_left_eq"} RS sym;
+val mod_add_right_eq = @{thm "mod_add_right_eq"} RS sym;
val nat_div_add_eq = @{thm "div_add1_eq"} RS sym;
val int_div_add_eq = @{thm "zdiv_zadd1_eq"} RS sym;
val ZDIVISION_BY_ZERO_MOD = @{thm "DIVISION_BY_ZERO"} RS conjunct2;
@@ -96,10 +93,10 @@
val (t,np,nh) = prepare_for_mir thy q g
(* Some simpsets for dealing with mod div abs and nat*)
val mod_div_simpset = HOL_basic_ss
- addsimps [refl,nat_mod_add_eq,
+ addsimps [refl, mod_add_eq,
@{thm "mod_self"}, @{thm "zmod_self"},
@{thm "zdiv_zero"},@{thm "zmod_zero"},@{thm "div_0"}, @{thm "mod_0"},
- @{thm "zdiv_1"}, @{thm "zmod_1"}, @{thm "div_1"}, @{thm "mod_1"},
+ @{thm "div_by_1"}, @{thm "mod_by_1"}, @{thm "div_1"}, @{thm "mod_1"},
@{thm "Suc_plus1"}]
addsimps @{thms add_ac}
addsimprocs [cancel_div_mod_proc]
--- a/src/HOL/Deriv.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Deriv.thy Wed Mar 04 10:45:52 2009 +0100
@@ -9,7 +9,7 @@
header{* Differentiation *}
theory Deriv
-imports Lim Polynomial
+imports Lim
begin
text{*Standard Definitions*}
@@ -217,9 +217,7 @@
by (cases "n", simp, simp add: DERIV_power_Suc f)
-(* ------------------------------------------------------------------------ *)
-(* Caratheodory formulation of derivative at a point: standard proof *)
-(* ------------------------------------------------------------------------ *)
+text {* Caratheodory formulation of derivative at a point *}
lemma CARAT_DERIV:
"(DERIV f x :> l) =
@@ -307,6 +305,9 @@
==> DERIV (%y. f(y) / (g y)) x :> (d*g(x) - (e*f(x))) / (g(x) ^ Suc (Suc 0))"
by (drule (2) DERIV_divide) (simp add: mult_commute power_Suc)
+lemma lemma_DERIV_subst: "[| DERIV f x :> D; D = E |] ==> DERIV f x :> E"
+by auto
+
subsection {* Differentiability predicate *}
@@ -655,6 +656,9 @@
apply (blast intro: IVT2)
done
+
+subsection {* Boundedness of continuous functions *}
+
text{*By bisection, function continuous on closed interval is bounded above*}
lemma isCont_bounded:
@@ -773,6 +777,8 @@
done
+subsection {* Local extrema *}
+
text{*If @{term "0 < f'(x)"} then @{term x} is Locally Strictly Increasing At The Right*}
lemma DERIV_left_inc:
@@ -877,6 +883,9 @@
shows "[| DERIV f x :> l; 0 < d; \<forall>y. \<bar>x-y\<bar> < d --> f(x) = f(y) |] ==> l = 0"
by (auto dest!: DERIV_local_max)
+
+subsection {* Rolle's Theorem *}
+
text{*Lemma about introducing open ball in open interval*}
lemma lemma_interval_lt:
"[| a < x; x < b |]
@@ -1163,6 +1172,8 @@
qed
+subsection {* Continuous injective functions *}
+
text{*Dull lemma: an continuous injection on an interval must have a
strict maximum at an end point, not in the middle.*}
@@ -1356,6 +1367,9 @@
using neq by (rule LIM_inverse)
qed
+
+subsection {* Generalized Mean Value Theorem *}
+
theorem GMVT:
fixes a b :: real
assumes alb: "a < b"
@@ -1442,245 +1456,6 @@
with g'cdef f'cdef cint show ?thesis by auto
qed
-lemma lemma_DERIV_subst: "[| DERIV f x :> D; D = E |] ==> DERIV f x :> E"
-by auto
-
-
-subsection {* Derivatives of univariate polynomials *}
-
-definition
- pderiv :: "'a::real_normed_field poly \<Rightarrow> 'a poly" where
- "pderiv = poly_rec 0 (\<lambda>a p p'. p + pCons 0 p')"
-
-lemma pderiv_0 [simp]: "pderiv 0 = 0"
- unfolding pderiv_def by (simp add: poly_rec_0)
-
-lemma pderiv_pCons: "pderiv (pCons a p) = p + pCons 0 (pderiv p)"
- unfolding pderiv_def by (simp add: poly_rec_pCons)
-
-lemma coeff_pderiv: "coeff (pderiv p) n = of_nat (Suc n) * coeff p (Suc n)"
- apply (induct p arbitrary: n, simp)
- apply (simp add: pderiv_pCons coeff_pCons algebra_simps split: nat.split)
- done
-
-lemma pderiv_eq_0_iff: "pderiv p = 0 \<longleftrightarrow> degree p = 0"
- apply (rule iffI)
- apply (cases p, simp)
- apply (simp add: expand_poly_eq coeff_pderiv del: of_nat_Suc)
- apply (simp add: expand_poly_eq coeff_pderiv coeff_eq_0)
- done
-
-lemma degree_pderiv: "degree (pderiv p) = degree p - 1"
- apply (rule order_antisym [OF degree_le])
- apply (simp add: coeff_pderiv coeff_eq_0)
- apply (cases "degree p", simp)
- apply (rule le_degree)
- apply (simp add: coeff_pderiv del: of_nat_Suc)
- apply (rule subst, assumption)
- apply (rule leading_coeff_neq_0, clarsimp)
- done
-
-lemma pderiv_singleton [simp]: "pderiv [:a:] = 0"
-by (simp add: pderiv_pCons)
-
-lemma pderiv_add: "pderiv (p + q) = pderiv p + pderiv q"
-by (rule poly_ext, simp add: coeff_pderiv algebra_simps)
-
-lemma pderiv_minus: "pderiv (- p) = - pderiv p"
-by (rule poly_ext, simp add: coeff_pderiv)
-
-lemma pderiv_diff: "pderiv (p - q) = pderiv p - pderiv q"
-by (rule poly_ext, simp add: coeff_pderiv algebra_simps)
-
-lemma pderiv_smult: "pderiv (smult a p) = smult a (pderiv p)"
-by (rule poly_ext, simp add: coeff_pderiv algebra_simps)
-
-lemma pderiv_mult: "pderiv (p * q) = p * pderiv q + q * pderiv p"
-apply (induct p)
-apply simp
-apply (simp add: pderiv_add pderiv_smult pderiv_pCons algebra_simps)
-done
-
-lemma pderiv_power_Suc:
- "pderiv (p ^ Suc n) = smult (of_nat (Suc n)) (p ^ n) * pderiv p"
-apply (induct n)
-apply simp
-apply (subst power_Suc)
-apply (subst pderiv_mult)
-apply (erule ssubst)
-apply (simp add: smult_add_left algebra_simps)
-done
-
-lemma DERIV_cmult2: "DERIV f x :> D ==> DERIV (%x. (f x) * c :: real) x :> D * c"
-by (simp add: DERIV_cmult mult_commute [of _ c])
-
-lemma DERIV_pow2: "DERIV (%x. x ^ Suc n) x :> real (Suc n) * (x ^ n)"
-by (rule lemma_DERIV_subst, rule DERIV_pow, simp)
-declare DERIV_pow2 [simp] DERIV_pow [simp]
-
-lemma DERIV_add_const: "DERIV f x :> D ==> DERIV (%x. a + f x :: 'a::real_normed_field) x :> D"
-by (rule lemma_DERIV_subst, rule DERIV_add, auto)
-
-lemma poly_DERIV[simp]: "DERIV (%x. poly p x) x :> poly (pderiv p) x"
-apply (induct p)
-apply simp
-apply (simp add: pderiv_pCons)
-apply (rule lemma_DERIV_subst)
-apply (rule DERIV_add DERIV_mult DERIV_const DERIV_ident | assumption)+
-apply simp
-done
-
-text{* Consequences of the derivative theorem above*}
-
-lemma poly_differentiable[simp]: "(%x. poly p x) differentiable (x::real)"
-apply (simp add: differentiable_def)
-apply (blast intro: poly_DERIV)
-done
-
-lemma poly_isCont[simp]: "isCont (%x. poly p x) (x::real)"
-by (rule poly_DERIV [THEN DERIV_isCont])
-
-lemma poly_IVT_pos: "[| a < b; poly p (a::real) < 0; 0 < poly p b |]
- ==> \<exists>x. a < x & x < b & (poly p x = 0)"
-apply (cut_tac f = "%x. poly p x" and a = a and b = b and y = 0 in IVT_objl)
-apply (auto simp add: order_le_less)
-done
-
-lemma poly_IVT_neg: "[| (a::real) < b; 0 < poly p a; poly p b < 0 |]
- ==> \<exists>x. a < x & x < b & (poly p x = 0)"
-by (insert poly_IVT_pos [where p = "- p" ]) simp
-
-lemma poly_MVT: "(a::real) < b ==>
- \<exists>x. a < x & x < b & (poly p b - poly p a = (b - a) * poly (pderiv p) x)"
-apply (drule_tac f = "poly p" in MVT, auto)
-apply (rule_tac x = z in exI)
-apply (auto simp add: real_mult_left_cancel poly_DERIV [THEN DERIV_unique])
-done
-
-text{*Lemmas for Derivatives*}
-
-(* FIXME
-lemma lemma_order_pderiv [rule_format]:
- "\<forall>p q a. 0 < n &
- poly (pderiv p) \<noteq> poly [] &
- poly p = poly ([- a, 1] %^ n *** q) & ~ [- a, 1] divides q
- --> n = Suc (order a (pderiv p))"
-apply (induct "n", safe)
-apply (rule order_unique_lemma, rule conjI, assumption)
-apply (subgoal_tac "\<forall>r. r divides (pderiv p) = r divides (pderiv ([-a, 1] %^ Suc n *** q))")
-apply (drule_tac [2] poly_pderiv_welldef)
- prefer 2 apply (simp add: divides_def del: pmult_Cons pexp_Suc)
-apply (simp del: pmult_Cons pexp_Suc)
-apply (rule conjI)
-apply (simp add: divides_def fun_eq del: pmult_Cons pexp_Suc)
-apply (rule_tac x = "[-a, 1] *** (pderiv q) +++ real (Suc n) %* q" in exI)
-apply (simp add: poly_pderiv_mult poly_pderiv_exp_prime poly_add poly_mult poly_cmult right_distrib mult_ac del: pmult_Cons pexp_Suc)
-apply (simp add: poly_mult right_distrib left_distrib mult_ac del: pmult_Cons)
-apply (erule_tac V = "\<forall>r. r divides pderiv p = r divides pderiv ([- a, 1] %^ Suc n *** q)" in thin_rl)
-apply (unfold divides_def)
-apply (simp (no_asm) add: poly_pderiv_mult poly_pderiv_exp_prime fun_eq poly_add poly_mult del: pmult_Cons pexp_Suc)
-apply (rule contrapos_np, assumption)
-apply (rotate_tac 3, erule contrapos_np)
-apply (simp del: pmult_Cons pexp_Suc, safe)
-apply (rule_tac x = "inverse (real (Suc n)) %* (qa +++ -- (pderiv q))" in exI)
-apply (subgoal_tac "poly ([-a, 1] %^ n *** q) = poly ([-a, 1] %^ n *** ([-a, 1] *** (inverse (real (Suc n)) %* (qa +++ -- (pderiv q))))) ")
-apply (drule poly_mult_left_cancel [THEN iffD1], simp)
-apply (simp add: fun_eq poly_mult poly_add poly_cmult poly_minus del: pmult_Cons mult_cancel_left, safe)
-apply (rule_tac c1 = "real (Suc n)" in real_mult_left_cancel [THEN iffD1])
-apply (simp (no_asm))
-apply (subgoal_tac "real (Suc n) * (poly ([- a, 1] %^ n) xa * poly q xa) =
- (poly qa xa + - poly (pderiv q) xa) *
- (poly ([- a, 1] %^ n) xa *
- ((- a + xa) * (inverse (real (Suc n)) * real (Suc n))))")
-apply (simp only: mult_ac)
-apply (rotate_tac 2)
-apply (drule_tac x = xa in spec)
-apply (simp add: left_distrib mult_ac del: pmult_Cons)
-done
-
-lemma order_pderiv: "[| poly (pderiv p) \<noteq> poly []; order a p \<noteq> 0 |]
- ==> (order a p = Suc (order a (pderiv p)))"
-apply (case_tac "poly p = poly []")
-apply (auto dest: pderiv_zero)
-apply (drule_tac a = a and p = p in order_decomp)
-using neq0_conv
-apply (blast intro: lemma_order_pderiv)
-done
-
-text{*Now justify the standard squarefree decomposition, i.e. f / gcd(f,f'). *}
-
-lemma poly_squarefree_decomp_order: "[| poly (pderiv p) \<noteq> poly [];
- poly p = poly (q *** d);
- poly (pderiv p) = poly (e *** d);
- poly d = poly (r *** p +++ s *** pderiv p)
- |] ==> order a q = (if order a p = 0 then 0 else 1)"
-apply (subgoal_tac "order a p = order a q + order a d")
-apply (rule_tac [2] s = "order a (q *** d)" in trans)
-prefer 2 apply (blast intro: order_poly)
-apply (rule_tac [2] order_mult)
- prefer 2 apply force
-apply (case_tac "order a p = 0", simp)
-apply (subgoal_tac "order a (pderiv p) = order a e + order a d")
-apply (rule_tac [2] s = "order a (e *** d)" in trans)
-prefer 2 apply (blast intro: order_poly)
-apply (rule_tac [2] order_mult)
- prefer 2 apply force
-apply (case_tac "poly p = poly []")
-apply (drule_tac p = p in pderiv_zero, simp)
-apply (drule order_pderiv, assumption)
-apply (subgoal_tac "order a (pderiv p) \<le> order a d")
-apply (subgoal_tac [2] " ([-a, 1] %^ (order a (pderiv p))) divides d")
- prefer 2 apply (simp add: poly_entire order_divides)
-apply (subgoal_tac [2] " ([-a, 1] %^ (order a (pderiv p))) divides p & ([-a, 1] %^ (order a (pderiv p))) divides (pderiv p) ")
- prefer 3 apply (simp add: order_divides)
- prefer 2 apply (simp add: divides_def del: pexp_Suc pmult_Cons, safe)
-apply (rule_tac x = "r *** qa +++ s *** qaa" in exI)
-apply (simp add: fun_eq poly_add poly_mult left_distrib right_distrib mult_ac del: pexp_Suc pmult_Cons, auto)
-done
-
-
-lemma poly_squarefree_decomp_order2: "[| poly (pderiv p) \<noteq> poly [];
- poly p = poly (q *** d);
- poly (pderiv p) = poly (e *** d);
- poly d = poly (r *** p +++ s *** pderiv p)
- |] ==> \<forall>a. order a q = (if order a p = 0 then 0 else 1)"
-apply (blast intro: poly_squarefree_decomp_order)
-done
-
-lemma order_pderiv2: "[| poly (pderiv p) \<noteq> poly []; order a p \<noteq> 0 |]
- ==> (order a (pderiv p) = n) = (order a p = Suc n)"
-apply (auto dest: order_pderiv)
-done
-
-lemma rsquarefree_roots:
- "rsquarefree p = (\<forall>a. ~(poly p a = 0 & poly (pderiv p) a = 0))"
-apply (simp add: rsquarefree_def)
-apply (case_tac "poly p = poly []", simp, simp)
-apply (case_tac "poly (pderiv p) = poly []")
-apply simp
-apply (drule pderiv_iszero, clarify)
-apply (subgoal_tac "\<forall>a. order a p = order a [h]")
-apply (simp add: fun_eq)
-apply (rule allI)
-apply (cut_tac p = "[h]" and a = a in order_root)
-apply (simp add: fun_eq)
-apply (blast intro: order_poly)
-apply (auto simp add: order_root order_pderiv2)
-apply (erule_tac x="a" in allE, simp)
-done
-
-lemma poly_squarefree_decomp: "[| poly (pderiv p) \<noteq> poly [];
- poly p = poly (q *** d);
- poly (pderiv p) = poly (e *** d);
- poly d = poly (r *** p +++ s *** pderiv p)
- |] ==> rsquarefree q & (\<forall>a. (poly q a = 0) = (poly p a = 0))"
-apply (frule poly_squarefree_decomp_order2, assumption+)
-apply (case_tac "poly p = poly []")
-apply (blast dest: pderiv_zero)
-apply (simp (no_asm) add: rsquarefree_def order_root del: pmult_Cons)
-apply (simp add: poly_entire del: pmult_Cons)
-done
-*)
subsection {* Theorems about Limits *}
--- a/src/HOL/Divides.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Divides.thy Wed Mar 04 10:45:52 2009 +0100
@@ -44,10 +44,10 @@
by (simp add: mod_div_equality2)
lemma mod_by_0 [simp]: "a mod 0 = a"
- using mod_div_equality [of a zero] by simp
+using mod_div_equality [of a zero] by simp
lemma mod_0 [simp]: "0 mod a = 0"
- using mod_div_equality [of zero a] div_0 by simp
+using mod_div_equality [of zero a] div_0 by simp
lemma div_mult_self2 [simp]:
assumes "b \<noteq> 0"
@@ -178,6 +178,12 @@
lemma dvd_div_mult_self: "a dvd b \<Longrightarrow> (b div a) * a = b"
by (subst (2) mod_div_equality [of b a, symmetric]) (simp add:dvd_imp_mod_0)
+lemma dvd_div_mult: "a dvd b \<Longrightarrow> (b div a) * c = b * c div a"
+apply (cases "a = 0")
+ apply simp
+apply (auto simp: dvd_def mult_assoc)
+done
+
lemma div_dvd_div[simp]:
"a dvd b \<Longrightarrow> a dvd c \<Longrightarrow> (b div a dvd c div a) = (b dvd c)"
apply (cases "a = 0")
@@ -188,6 +194,12 @@
apply(fastsimp simp add: mult_assoc)
done
+lemma dvd_mod_imp_dvd: "[| k dvd m mod n; k dvd n |] ==> k dvd m"
+ apply (subgoal_tac "k dvd (m div n) *n + m mod n")
+ apply (simp add: mod_div_equality)
+ apply (simp only: dvd_add dvd_mult)
+ done
+
text {* Addition respects modular equivalence. *}
lemma mod_add_left_eq: "(a + b) mod c = (a mod c + b) mod c"
@@ -330,6 +342,25 @@
unfolding diff_minus using assms
by (intro mod_add_cong mod_minus_cong)
+lemma dvd_neg_div: "y dvd x \<Longrightarrow> -x div y = - (x div y)"
+apply (case_tac "y = 0") apply simp
+apply (auto simp add: dvd_def)
+apply (subgoal_tac "-(y * k) = y * - k")
+ apply (erule ssubst)
+ apply (erule div_mult_self1_is_id)
+apply simp
+done
+
+lemma dvd_div_neg: "y dvd x \<Longrightarrow> x div -y = - (x div y)"
+apply (case_tac "y = 0") apply simp
+apply (auto simp add: dvd_def)
+apply (subgoal_tac "y * k = -y * -k")
+ apply (erule ssubst)
+ apply (rule div_mult_self1_is_id)
+ apply simp
+apply simp
+done
+
end
@@ -478,9 +509,9 @@
from divmod_rel have divmod_m_n: "divmod_rel m n (m div n) (m mod n)" .
with assms have m_div_n: "m div n \<ge> 1"
by (cases "m div n") (auto simp add: divmod_rel_def)
- from assms divmod_m_n have "divmod_rel (m - n) n (m div n - 1) (m mod n)"
+ from assms divmod_m_n have "divmod_rel (m - n) n (m div n - Suc 0) (m mod n)"
by (cases "m div n") (auto simp add: divmod_rel_def)
- with divmod_eq have "divmod (m - n) n = (m div n - 1, m mod n)" by simp
+ with divmod_eq have "divmod (m - n) n = (m div n - Suc 0, m mod n)" by simp
moreover from divmod_div_mod have "divmod (m - n) n = ((m - n) div n, (m - n) mod n)" .
ultimately have "m div n = Suc ((m - n) div n)"
and "m mod n = (m - n) mod n" using m_div_n by simp_all
@@ -653,16 +684,6 @@
apply (blast intro: divmod_rel [THEN divmod_rel_mult1_eq, THEN div_eq])
done
-lemma mod_mult1_eq: "(a*b) mod c = a*(b mod c) mod (c::nat)"
-by (rule mod_mult_right_eq)
-
-lemma mod_mult1_eq': "(a*b) mod (c::nat) = ((a mod c) * b) mod c"
-by (rule mod_mult_left_eq)
-
-lemma mod_mult_distrib_mod:
- "(a*b) mod (c::nat) = ((a mod c) * (b mod c)) mod c"
-by (rule mod_mult_eq)
-
lemma divmod_rel_add1_eq:
"[| divmod_rel a c aq ar; divmod_rel b c bq br; c > 0 |]
==> divmod_rel (a + b) c (aq + bq + (ar+br) div c) ((ar + br) mod c)"
@@ -675,9 +696,6 @@
apply (blast intro: divmod_rel_add1_eq [THEN div_eq] divmod_rel)
done
-lemma mod_add1_eq: "(a+b) mod (c::nat) = (a mod c + b mod c) mod c"
-by (rule mod_add_eq)
-
lemma mod_lemma: "[| (0::nat) < c; r < b |] ==> b * (q mod c) + r < b * c"
apply (cut_tac m = q and n = c in mod_less_divisor)
apply (drule_tac [2] m = "q mod c" in less_imp_Suc_add, auto)
@@ -795,12 +813,6 @@
apply (auto simp add: Suc_diff_le le_mod_geq)
done
-lemma nat_mod_div_trivial: "m mod n div n = (0 :: nat)"
-by simp
-
-lemma nat_mod_mod_trivial: "m mod n mod n = (m mod n :: nat)"
-by simp
-
subsubsection {* The Divides Relation *}
@@ -810,6 +822,9 @@
lemma dvd_1_iff_1 [simp]: "(m dvd Suc 0) = (m = Suc 0)"
by (simp add: dvd_def)
+lemma nat_dvd_1_iff_1 [simp]: "m dvd (1::nat) \<longleftrightarrow> m = 1"
+by (simp add: dvd_def)
+
lemma dvd_anti_sym: "[| m dvd n; n dvd m |] ==> m = (n::nat)"
unfolding dvd_def
by (force dest: mult_eq_self_implies_10 simp add: mult_assoc mult_eq_1_iff)
@@ -819,9 +834,9 @@
interpretation dvd!: order "op dvd" "\<lambda>n m \<Colon> nat. n dvd m \<and> \<not> m dvd n"
proof qed (auto intro: dvd_refl dvd_trans dvd_anti_sym)
-lemma dvd_diff: "[| k dvd m; k dvd n |] ==> k dvd (m-n :: nat)"
- unfolding dvd_def
- by (blast intro: diff_mult_distrib2 [symmetric])
+lemma nat_dvd_diff[simp]: "[| k dvd m; k dvd n |] ==> k dvd (m-n :: nat)"
+unfolding dvd_def
+by (blast intro: diff_mult_distrib2 [symmetric])
lemma dvd_diffD: "[| k dvd m-n; k dvd n; n\<le>m |] ==> k dvd (m::nat)"
apply (erule linorder_not_less [THEN iffD2, THEN add_diff_inverse, THEN subst])
@@ -829,7 +844,7 @@
done
lemma dvd_diffD1: "[| k dvd m-n; k dvd m; n\<le>m |] ==> k dvd (n::nat)"
-by (drule_tac m = m in dvd_diff, auto)
+by (drule_tac m = m in nat_dvd_diff, auto)
lemma dvd_reduce: "(k dvd n + k) = (k dvd (n::nat))"
apply (rule iffI)
@@ -838,7 +853,7 @@
apply (subgoal_tac "n = (n+k) -k")
prefer 2 apply simp
apply (erule ssubst)
- apply (erule dvd_diff)
+ apply (erule nat_dvd_diff)
apply (rule dvd_refl)
done
@@ -848,12 +863,6 @@
apply (blast intro: mod_mult_distrib2 [symmetric])
done
-lemma dvd_mod_imp_dvd: "[| (k::nat) dvd m mod n; k dvd n |] ==> k dvd m"
- apply (subgoal_tac "k dvd (m div n) *n + m mod n")
- apply (simp add: mod_div_equality)
- apply (simp only: dvd_add dvd_mult)
- done
-
lemma dvd_mod_iff: "k dvd n ==> ((k::nat) dvd m mod n) = (k dvd m)"
by (blast intro: dvd_mod_imp_dvd dvd_mod)
@@ -889,21 +898,9 @@
apply (simp only: dvd_eq_mod_eq_0)
done
-lemma le_imp_power_dvd: "!!i::nat. m \<le> n ==> i^m dvd i^n"
- apply (unfold dvd_def)
- apply (erule linorder_not_less [THEN iffD2, THEN add_diff_inverse, THEN subst])
- apply (simp add: power_add)
- done
-
lemma nat_zero_less_power_iff [simp]: "(x^n > 0) = (x > (0::nat) | n=0)"
by (induct n) auto
-lemma power_le_dvd [rule_format]: "k^j dvd n --> i\<le>j --> k^i dvd (n::nat)"
- apply (induct j)
- apply (simp_all add: le_Suc_eq)
- apply (blast dest!: dvd_mult_right)
- done
-
lemma power_dvd_imp_le: "[|i^m dvd i^n; (1::nat) < i|] ==> m \<le> n"
apply (rule power_le_imp_le_exp, assumption)
apply (erule dvd_imp_le, simp)
--- a/src/HOL/Equiv_Relations.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Equiv_Relations.thy Wed Mar 04 10:45:52 2009 +0100
@@ -12,7 +12,7 @@
locale equiv =
fixes A and r
- assumes refl: "refl A r"
+ assumes refl_on: "refl_on A r"
and sym: "sym r"
and trans: "trans r"
@@ -27,21 +27,21 @@
"sym r ==> trans r ==> r\<inverse> O r \<subseteq> r"
by (unfold trans_def sym_def converse_def) blast
-lemma refl_comp_subset: "refl A r ==> r \<subseteq> r\<inverse> O r"
- by (unfold refl_def) blast
+lemma refl_on_comp_subset: "refl_on A r ==> r \<subseteq> r\<inverse> O r"
+ by (unfold refl_on_def) blast
lemma equiv_comp_eq: "equiv A r ==> r\<inverse> O r = r"
apply (unfold equiv_def)
apply clarify
apply (rule equalityI)
- apply (iprover intro: sym_trans_comp_subset refl_comp_subset)+
+ apply (iprover intro: sym_trans_comp_subset refl_on_comp_subset)+
done
text {* Second half. *}
lemma comp_equivI:
"r\<inverse> O r = r ==> Domain r = A ==> equiv A r"
- apply (unfold equiv_def refl_def sym_def trans_def)
+ apply (unfold equiv_def refl_on_def sym_def trans_def)
apply (erule equalityE)
apply (subgoal_tac "\<forall>x y. (x, y) \<in> r --> (y, x) \<in> r")
apply fast
@@ -63,12 +63,12 @@
done
lemma equiv_class_self: "equiv A r ==> a \<in> A ==> a \<in> r``{a}"
- by (unfold equiv_def refl_def) blast
+ by (unfold equiv_def refl_on_def) blast
lemma subset_equiv_class:
"equiv A r ==> r``{b} \<subseteq> r``{a} ==> b \<in> A ==> (a,b) \<in> r"
-- {* lemma for the next result *}
- by (unfold equiv_def refl_def) blast
+ by (unfold equiv_def refl_on_def) blast
lemma eq_equiv_class:
"r``{a} = r``{b} ==> equiv A r ==> b \<in> A ==> (a, b) \<in> r"
@@ -79,7 +79,7 @@
by (unfold equiv_def trans_def sym_def) blast
lemma equiv_type: "equiv A r ==> r \<subseteq> A \<times> A"
- by (unfold equiv_def refl_def) blast
+ by (unfold equiv_def refl_on_def) blast
theorem equiv_class_eq_iff:
"equiv A r ==> ((x, y) \<in> r) = (r``{x} = r``{y} & x \<in> A & y \<in> A)"
@@ -103,7 +103,7 @@
by (unfold quotient_def) blast
lemma Union_quotient: "equiv A r ==> Union (A//r) = A"
- by (unfold equiv_def refl_def quotient_def) blast
+ by (unfold equiv_def refl_on_def quotient_def) blast
lemma quotient_disj:
"equiv A r ==> X \<in> A//r ==> Y \<in> A//r ==> X = Y | (X \<inter> Y = {})"
@@ -228,7 +228,7 @@
lemma congruent2_implies_congruent:
"equiv A r1 ==> congruent2 r1 r2 f ==> a \<in> A ==> congruent r2 (f a)"
- by (unfold congruent_def congruent2_def equiv_def refl_def) blast
+ by (unfold congruent_def congruent2_def equiv_def refl_on_def) blast
lemma congruent2_implies_congruent_UN:
"equiv A1 r1 ==> equiv A2 r2 ==> congruent2 r1 r2 f ==> a \<in> A2 ==>
@@ -237,7 +237,7 @@
apply clarify
apply (rule equiv_type [THEN subsetD, THEN SigmaE2], assumption+)
apply (simp add: UN_equiv_class congruent2_implies_congruent)
- apply (unfold congruent2_def equiv_def refl_def)
+ apply (unfold congruent2_def equiv_def refl_on_def)
apply (blast del: equalityI)
done
@@ -272,7 +272,7 @@
==> congruent2 r1 r2 f"
-- {* Suggested by John Harrison -- the two subproofs may be *}
-- {* \emph{much} simpler than the direct proof. *}
- apply (unfold congruent2_def equiv_def refl_def)
+ apply (unfold congruent2_def equiv_def refl_on_def)
apply clarify
apply (blast intro: trans)
done
--- a/src/HOL/Extraction/Euclid.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Extraction/Euclid.thy Wed Mar 04 10:45:52 2009 +0100
@@ -189,7 +189,7 @@
assume pn: "p \<le> n"
from `prime p` have "0 < p" by (rule prime_g_zero)
then have "p dvd n!" using pn by (rule dvd_factorial)
- with dvd have "p dvd ?k - n!" by (rule dvd_diff)
+ with dvd have "p dvd ?k - n!" by (rule nat_dvd_diff)
then have "p dvd 1" by simp
with prime show False using prime_nd_one by auto
qed
--- a/src/HOL/Fact.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Fact.thy Wed Mar 04 10:45:52 2009 +0100
@@ -7,7 +7,7 @@
header{*Factorial Function*}
theory Fact
-imports Nat
+imports Main
begin
consts fact :: "nat => nat"
@@ -58,7 +58,7 @@
"n < Suc m ==> fact (Suc m - n) = (Suc m - n) * fact (m - n)"
apply (induct n arbitrary: m)
apply auto
-apply (drule_tac x = "m - 1" in meta_spec, auto)
+apply (drule_tac x = "m - Suc 0" in meta_spec, auto)
done
lemma fact_num0: "fact 0 = 1"
--- a/src/HOL/GCD.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/GCD.thy Wed Mar 04 10:45:52 2009 +0100
@@ -60,9 +60,12 @@
lemma gcd_non_0: "n > 0 \<Longrightarrow> gcd m n = gcd n (m mod n)"
by simp
-lemma gcd_1 [simp, algebra]: "gcd m (Suc 0) = 1"
+lemma gcd_1 [simp, algebra]: "gcd m (Suc 0) = Suc 0"
by simp
+lemma nat_gcd_1_right [simp, algebra]: "gcd m 1 = 1"
+ unfolding One_nat_def by (rule gcd_1)
+
declare gcd.simps [simp del]
text {*
@@ -116,9 +119,12 @@
apply (blast intro: dvd_trans)
done
-lemma gcd_1_left [simp, algebra]: "gcd (Suc 0) m = 1"
+lemma gcd_1_left [simp, algebra]: "gcd (Suc 0) m = Suc 0"
by (simp add: gcd_commute)
+lemma nat_gcd_1_left [simp, algebra]: "gcd 1 m = 1"
+ unfolding One_nat_def by (rule gcd_1_left)
+
text {*
\medskip Multiplication laws
*}
@@ -156,7 +162,6 @@
apply (simp add: gcd_assoc)
apply (simp add: gcd_commute)
apply (simp_all add: mult_commute)
- apply (blast intro: dvd_mult)
done
@@ -404,7 +409,7 @@
{fix x y assume H: "a * x - b * y = d \<or> b * x - a * y = d"
have dv: "?g dvd a*x" "?g dvd b * y" "?g dvd b*x" "?g dvd a * y"
using dvd_mult2[OF gcd_dvd1[of a b]] dvd_mult2[OF gcd_dvd2[of a b]] by simp_all
- from dvd_diff[OF dv(1,2)] dvd_diff[OF dv(3,4)] H
+ from nat_dvd_diff[OF dv(1,2)] nat_dvd_diff[OF dv(3,4)] H
have ?rhs by auto}
ultimately show ?thesis by blast
qed
@@ -597,8 +602,8 @@
from h' have "int (nat \<bar>k\<bar>) = int (nat \<bar>i\<bar> * h')" by simp
then have "\<bar>k\<bar> = \<bar>i\<bar> * int h'" by (simp add: int_mult)
then show ?thesis
- apply (subst zdvd_abs1 [symmetric])
- apply (subst zdvd_abs2 [symmetric])
+ apply (subst abs_dvd_iff [symmetric])
+ apply (subst dvd_abs_iff [symmetric])
apply (unfold dvd_def)
apply (rule_tac x = "int h'" in exI, simp)
done
@@ -614,11 +619,11 @@
let ?m' = "nat \<bar>m\<bar>"
let ?n' = "nat \<bar>n\<bar>"
from `k dvd m` and `k dvd n` have dvd': "?k' dvd ?m'" "?k' dvd ?n'"
- unfolding zdvd_int by (simp_all only: int_nat_abs zdvd_abs1 zdvd_abs2)
+ unfolding zdvd_int by (simp_all only: int_nat_abs abs_dvd_iff dvd_abs_iff)
from gcd_greatest [OF dvd'] have "int (nat \<bar>k\<bar>) dvd zgcd m n"
unfolding zgcd_def by (simp only: zdvd_int)
then have "\<bar>k\<bar> dvd zgcd m n" by (simp only: int_nat_abs)
- then show "k dvd zgcd m n" by (simp add: zdvd_abs1)
+ then show "k dvd zgcd m n" by simp
qed
lemma div_zgcd_relprime:
@@ -721,7 +726,7 @@
assumes "k dvd i" shows "k dvd (zlcm i j)"
proof -
have "nat(abs k) dvd nat(abs i)" using `k dvd i`
- by(simp add:int_dvd_iff[symmetric] dvd_int_iff[symmetric] zdvd_abs1)
+ by(simp add:int_dvd_iff[symmetric] dvd_int_iff[symmetric])
thus ?thesis by(simp add:zlcm_def dvd_int_iff)(blast intro: dvd_trans)
qed
@@ -729,7 +734,7 @@
assumes "k dvd j" shows "k dvd (zlcm i j)"
proof -
have "nat(abs k) dvd nat(abs j)" using `k dvd j`
- by(simp add:int_dvd_iff[symmetric] dvd_int_iff[symmetric] zdvd_abs1)
+ by(simp add:int_dvd_iff[symmetric] dvd_int_iff[symmetric])
thus ?thesis by(simp add:zlcm_def dvd_int_iff)(blast intro: dvd_trans)
qed
--- a/src/HOL/Groebner_Basis.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Groebner_Basis.thy Wed Mar 04 10:45:52 2009 +0100
@@ -147,7 +147,7 @@
next show "pwr (mul x y) q = mul (pwr x q) (pwr y q)" by (rule pwr_mul)
next show "pwr (pwr x p) q = pwr x (p * q)" by (rule pwr_pwr)
next show "pwr x 0 = r1" using pwr_0 .
-next show "pwr x 1 = x" by (simp add: nat_number pwr_Suc pwr_0 mul_1 mul_c)
+next show "pwr x 1 = x" unfolding One_nat_def by (simp add: nat_number pwr_Suc pwr_0 mul_1 mul_c)
next show "mul x (add y z) = add (mul x y) (mul x z)" using mul_d by simp
next show "pwr x (Suc q) = mul x (pwr x q)" using pwr_Suc by simp
next show "pwr x (2 * n) = mul (pwr x n) (pwr x n)" by (simp add: nat_number mul_pwr)
@@ -436,8 +436,8 @@
*} "solve polynomial equations over (semi)rings and ideal membership problems using Groebner bases"
declare dvd_def[algebra]
declare dvd_eq_mod_eq_0[symmetric, algebra]
-declare nat_mod_div_trivial[algebra]
-declare nat_mod_mod_trivial[algebra]
+declare mod_div_trivial[algebra]
+declare mod_mod_trivial[algebra]
declare conjunct1[OF DIVISION_BY_ZERO, algebra]
declare conjunct2[OF DIVISION_BY_ZERO, algebra]
declare zmod_zdiv_equality[symmetric,algebra]
@@ -448,16 +448,16 @@
declare zmod_zminus2[algebra]
declare zdiv_zero[algebra]
declare zmod_zero[algebra]
-declare zmod_1[algebra]
-declare zdiv_1[algebra]
+declare mod_by_1[algebra]
+declare div_by_1[algebra]
declare zmod_minus1_right[algebra]
declare zdiv_minus1_right[algebra]
declare mod_div_trivial[algebra]
declare mod_mod_trivial[algebra]
-declare zmod_zmult_self1[algebra]
-declare zmod_zmult_self2[algebra]
+declare mod_mult_self2_is_0[algebra]
+declare mod_mult_self1_is_0[algebra]
declare zmod_eq_0_iff[algebra]
-declare zdvd_0_left[algebra]
+declare dvd_0_left_iff[algebra]
declare zdvd1_eq[algebra]
declare zmod_eq_dvd_iff[algebra]
declare nat_mod_eq_iff[algebra]
--- a/src/HOL/HOL.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/HOL.thy Wed Mar 04 10:45:52 2009 +0100
@@ -12,14 +12,15 @@
"~~/src/Tools/IsaPlanner/isand.ML"
"~~/src/Tools/IsaPlanner/rw_tools.ML"
"~~/src/Tools/IsaPlanner/rw_inst.ML"
- "~~/src/Provers/project_rule.ML"
+ "~~/src/Tools/intuitionistic.ML"
+ "~~/src/Tools/project_rule.ML"
"~~/src/Provers/hypsubst.ML"
"~~/src/Provers/splitter.ML"
"~~/src/Provers/classical.ML"
"~~/src/Provers/blast.ML"
"~~/src/Provers/clasimp.ML"
- "~~/src/Provers/coherent.ML"
- "~~/src/Provers/eqsubst.ML"
+ "~~/src/Tools/coherent.ML"
+ "~~/src/Tools/eqsubst.ML"
"~~/src/Provers/quantifier1.ML"
("Tools/simpdata.ML")
"~~/src/Tools/random_word.ML"
@@ -28,7 +29,8 @@
("~~/src/Tools/induct_tacs.ML")
"~~/src/Tools/value.ML"
"~~/src/Tools/code/code_name.ML"
- "~~/src/Tools/code/code_funcgr.ML"
+ "~~/src/Tools/code/code_funcgr.ML" (*formal dependency*)
+ "~~/src/Tools/code/code_wellsorted.ML"
"~~/src/Tools/code/code_thingol.ML"
"~~/src/Tools/code/code_printer.ML"
"~~/src/Tools/code/code_target.ML"
@@ -38,6 +40,9 @@
("Tools/recfun_codegen.ML")
begin
+setup {* Intuitionistic.method_setup "iprover" *}
+
+
subsection {* Primitive logic *}
subsubsection {* Core syntax *}
@@ -290,7 +295,7 @@
typed_print_translation {*
let
fun tr' c = (c, fn show_sorts => fn T => fn ts =>
- if T = dummyT orelse not (! show_types) andalso can Term.dest_Type T then raise Match
+ if (not o null) ts orelse T = dummyT orelse not (! show_types) andalso can Term.dest_Type T then raise Match
else Syntax.const Syntax.constrainC $ Syntax.const c $ Syntax.term_of_typ show_sorts T);
in map tr' [@{const_syntax HOL.one}, @{const_syntax HOL.zero}] end;
*} -- {* show types that are presumably too general *}
@@ -1704,11 +1709,6 @@
subsection {* Nitpick theorem store *}
ML {*
-structure Nitpick_Const_Def_Thms = NamedThmsFun
-(
- val name = "nitpick_const_def"
- val description = "pseudo-definition of constants as needed by Nitpick"
-)
structure Nitpick_Const_Simp_Thms = NamedThmsFun
(
val name = "nitpick_const_simp"
@@ -1725,8 +1725,7 @@
val description = "introduction rules for (co)inductive predicates as needed by Nitpick"
)
*}
-setup {* Nitpick_Const_Def_Thms.setup
- #> Nitpick_Const_Simp_Thms.setup
+setup {* Nitpick_Const_Simp_Thms.setup
#> Nitpick_Const_Psimp_Thms.setup
#> Nitpick_Ind_Intro_Thms.setup *}
--- a/src/HOL/Hoare/Arith2.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Hoare/Arith2.thy Wed Mar 04 10:45:52 2009 +0100
@@ -42,12 +42,12 @@
lemma cd_diff_l: "n<=m ==> cd x m n = cd x (m-n) n"
apply (unfold cd_def)
- apply (blast intro: dvd_diff dest: dvd_diffD)
+ apply (fastsimp dest: dvd_diffD)
done
lemma cd_diff_r: "m<=n ==> cd x m n = cd x m (n-m)"
apply (unfold cd_def)
- apply (blast intro: dvd_diff dest: dvd_diffD)
+ apply (fastsimp dest: dvd_diffD)
done
--- a/src/HOL/Import/lazy_seq.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Import/lazy_seq.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Import/lazy_seq.ML
- ID: $Id$
Author: Sebastian Skalberg, TU Muenchen
Alternative version of lazy sequences.
@@ -408,8 +407,8 @@
make (fn () => copy (f x))
end
-fun EVERY fs = foldr (op THEN) succeed fs
-fun FIRST fs = foldr (op ORELSE) fail fs
+fun EVERY fs = List.foldr (op THEN) succeed fs
+fun FIRST fs = List.foldr (op ORELSE) fail fs
fun TRY f x =
make (fn () =>
--- a/src/HOL/Import/proof_kernel.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Import/proof_kernel.ML Wed Mar 04 10:45:52 2009 +0100
@@ -777,7 +777,7 @@
val (c,asl) = case terms of
[] => raise ERR "x2p" "Bad oracle description"
| (hd::tl) => (hd,tl)
- val tg = foldr (fn (oracle,tg) => Tag.merge (Tag.read oracle) tg) Tag.empty_tag ors
+ val tg = List.foldr (fn (oracle,tg) => Tag.merge (Tag.read oracle) tg) Tag.empty_tag ors
in
mk_proof (POracle(tg,map xml_to_term asl,xml_to_term c))
end
@@ -1840,7 +1840,7 @@
| inst_type ty1 ty2 (ty as Type(name,tys)) =
Type(name,map (inst_type ty1 ty2) tys)
in
- foldr (fn (v,th) =>
+ List.foldr (fn (v,th) =>
let
val cdom = fst (dom_rng (fst (dom_rng cty)))
val vty = type_of v
@@ -1852,7 +1852,7 @@
end
| SOME _ => raise ERR "GEN_ABS" "Bad constant"
| NONE =>
- foldr (fn (v,th) => mk_ABS v th thy) th vlist'
+ List.foldr (fn (v,th) => mk_ABS v th thy) th vlist'
val res = HOLThm(rens_of info',th1)
val _ = message "RESULT:"
val _ = if_debug pth res
@@ -2020,7 +2020,7 @@
Sign.add_consts_i consts thy'
end
- val thy1 = foldr (fn(name,thy)=>
+ val thy1 = List.foldr (fn(name,thy)=>
snd (get_defname thyname name thy)) thy1 names
fun new_name name = fst (get_defname thyname name thy1)
val names' = map (fn name => (new_name name,name,false)) names
@@ -2041,7 +2041,7 @@
then quotename name
else (quotename newname) ^ ": " ^ (quotename name),thy')
end
- val (new_names,thy') = foldr (fn(name,(names,thy)) =>
+ val (new_names,thy') = List.foldr (fn(name,(names,thy)) =>
let
val (name',thy') = handle_const (name,thy)
in
--- a/src/HOL/Induct/Common_Patterns.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Induct/Common_Patterns.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Induct/Common_Patterns.thy
- ID: $Id$
Author: Makarius
*)
--- a/src/HOL/Induct/LList.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Induct/LList.thy Wed Mar 04 10:45:52 2009 +0100
@@ -8,7 +8,7 @@
bounds on the amount of lookahead required.
Could try (but would it work for the gfp analogue of term?)
- LListD_Fun_def "LListD_Fun(A) == (%Z. diag({Numb(0)}) <++> diag(A) <**> Z)"
+ LListD_Fun_def "LListD_Fun(A) == (%Z. Id_on({Numb(0)}) <++> Id_on(A) <**> Z)"
A nice but complex example would be [ML for the Working Programmer, page 176]
from(1) = enumerate (Lmap (Lmap(pack), makeqq(from(1),from(1))))
@@ -95,7 +95,7 @@
llistD_Fun :: "('a llist * 'a llist)set => ('a llist * 'a llist)set" where
"llistD_Fun(r) =
prod_fun Abs_LList Abs_LList `
- LListD_Fun (diag(range Leaf))
+ LListD_Fun (Id_on(range Leaf))
(prod_fun Rep_LList Rep_LList ` r)"
@@ -265,12 +265,12 @@
subsection{* @{text llist} equality as a @{text gfp}; the bisimulation principle *}
text{*This theorem is actually used, unlike the many similar ones in ZF*}
-lemma LListD_unfold: "LListD r = dsum (diag {Numb 0}) (dprod r (LListD r))"
+lemma LListD_unfold: "LListD r = dsum (Id_on {Numb 0}) (dprod r (LListD r))"
by (fast intro!: LListD.intros [unfolded NIL_def CONS_def]
elim: LListD.cases [unfolded NIL_def CONS_def])
lemma LListD_implies_ntrunc_equality [rule_format]:
- "\<forall>M N. (M,N) \<in> LListD(diag A) --> ntrunc k M = ntrunc k N"
+ "\<forall>M N. (M,N) \<in> LListD(Id_on A) --> ntrunc k M = ntrunc k N"
apply (induct_tac "k" rule: nat_less_induct)
apply (safe del: equalityI)
apply (erule LListD.cases)
@@ -283,7 +283,7 @@
text{*The domain of the @{text LListD} relation*}
lemma Domain_LListD:
- "Domain (LListD(diag A)) \<subseteq> llist(A)"
+ "Domain (LListD(Id_on A)) \<subseteq> llist(A)"
apply (rule subsetI)
apply (erule llist.coinduct)
apply (simp add: NIL_def CONS_def)
@@ -291,10 +291,10 @@
done
text{*This inclusion justifies the use of coinduction to show @{text "M = N"}*}
-lemma LListD_subset_diag: "LListD(diag A) \<subseteq> diag(llist(A))"
+lemma LListD_subset_Id_on: "LListD(Id_on A) \<subseteq> Id_on(llist(A))"
apply (rule subsetI)
apply (rule_tac p = x in PairE, safe)
-apply (rule diag_eqI)
+apply (rule Id_on_eqI)
apply (rule LListD_implies_ntrunc_equality [THEN ntrunc_equality], assumption)
apply (erule DomainI [THEN Domain_LListD [THEN subsetD]])
done
@@ -321,7 +321,7 @@
by (simp add: LListD_Fun_def NIL_def)
lemma LListD_Fun_CONS_I:
- "[| x\<in>A; (M,N):s |] ==> (CONS x M, CONS x N) \<in> LListD_Fun (diag A) s"
+ "[| x\<in>A; (M,N):s |] ==> (CONS x M, CONS x N) \<in> LListD_Fun (Id_on A) s"
by (simp add: LListD_Fun_def CONS_def, blast)
text{*Utilise the "strong" part, i.e. @{text "gfp(f)"}*}
@@ -335,24 +335,24 @@
text{*This converse inclusion helps to strengthen @{text LList_equalityI}*}
-lemma diag_subset_LListD: "diag(llist(A)) \<subseteq> LListD(diag A)"
+lemma Id_on_subset_LListD: "Id_on(llist(A)) \<subseteq> LListD(Id_on A)"
apply (rule subsetI)
apply (erule LListD_coinduct)
apply (rule subsetI)
-apply (erule diagE)
+apply (erule Id_onE)
apply (erule ssubst)
apply (erule llist.cases)
-apply (simp_all add: diagI LListD_Fun_NIL_I LListD_Fun_CONS_I)
+apply (simp_all add: Id_onI LListD_Fun_NIL_I LListD_Fun_CONS_I)
done
-lemma LListD_eq_diag: "LListD(diag A) = diag(llist(A))"
-apply (rule equalityI LListD_subset_diag diag_subset_LListD)+
+lemma LListD_eq_Id_on: "LListD(Id_on A) = Id_on(llist(A))"
+apply (rule equalityI LListD_subset_Id_on Id_on_subset_LListD)+
done
-lemma LListD_Fun_diag_I: "M \<in> llist(A) ==> (M,M) \<in> LListD_Fun (diag A) (X Un diag(llist(A)))"
-apply (rule LListD_eq_diag [THEN subst])
+lemma LListD_Fun_Id_on_I: "M \<in> llist(A) ==> (M,M) \<in> LListD_Fun (Id_on A) (X Un Id_on(llist(A)))"
+apply (rule LListD_eq_Id_on [THEN subst])
apply (rule LListD_Fun_LListD_I)
-apply (simp add: LListD_eq_diag diagI)
+apply (simp add: LListD_eq_Id_on Id_onI)
done
@@ -360,11 +360,11 @@
[also admits true equality]
Replace @{text A} by some particular set, like @{text "{x. True}"}??? *}
lemma LList_equalityI:
- "[| (M,N) \<in> r; r \<subseteq> LListD_Fun (diag A) (r Un diag(llist(A))) |]
+ "[| (M,N) \<in> r; r \<subseteq> LListD_Fun (Id_on A) (r Un Id_on(llist(A))) |]
==> M=N"
-apply (rule LListD_subset_diag [THEN subsetD, THEN diagE])
+apply (rule LListD_subset_Id_on [THEN subsetD, THEN Id_onE])
apply (erule LListD_coinduct)
-apply (simp add: LListD_eq_diag, safe)
+apply (simp add: LListD_eq_Id_on, safe)
done
@@ -525,14 +525,14 @@
f(NIL)=g(NIL);
!!x l. [| x\<in>A; l \<in> llist(A) |] ==>
(f(CONS x l),g(CONS x l)) \<in>
- LListD_Fun (diag A) ((%u.(f(u),g(u)))`llist(A) Un
- diag(llist(A)))
+ LListD_Fun (Id_on A) ((%u.(f(u),g(u)))`llist(A) Un
+ Id_on(llist(A)))
|] ==> f(M) = g(M)"
apply (rule LList_equalityI)
apply (erule imageI)
apply (rule image_subsetI)
apply (erule_tac a=x in llist.cases)
-apply (erule ssubst, erule ssubst, erule LListD_Fun_diag_I, blast)
+apply (erule ssubst, erule ssubst, erule LListD_Fun_Id_on_I, blast)
done
@@ -687,7 +687,7 @@
lemma LListD_Fun_subset_Times_llist:
"r \<subseteq> (llist A) <*> (llist A)
- ==> LListD_Fun (diag A) r \<subseteq> (llist A) <*> (llist A)"
+ ==> LListD_Fun (Id_on A) r \<subseteq> (llist A) <*> (llist A)"
by (auto simp add: LListD_Fun_def)
lemma subset_Times_llist:
@@ -703,9 +703,9 @@
apply (simp add: LListI [THEN Abs_LList_inverse])
done
-lemma prod_fun_range_eq_diag:
+lemma prod_fun_range_eq_Id_on:
"prod_fun Rep_LList Rep_LList ` range(%x. (x, x)) =
- diag(llist(range Leaf))"
+ Id_on(llist(range Leaf))"
apply (rule equalityI, blast)
apply (fast elim: LListI [THEN Abs_LList_inverse, THEN subst])
done
@@ -730,10 +730,10 @@
apply (rule image_compose [THEN subst])
apply (rule prod_fun_compose [THEN subst])
apply (subst image_Un)
-apply (subst prod_fun_range_eq_diag)
+apply (subst prod_fun_range_eq_Id_on)
apply (rule LListD_Fun_subset_Times_llist [THEN prod_fun_lemma])
apply (rule subset_Times_llist [THEN Un_least])
-apply (rule diag_subset_Times)
+apply (rule Id_on_subset_Times)
done
subsubsection{* Rules to prove the 2nd premise of @{text llist_equalityI} *}
@@ -755,8 +755,8 @@
apply (rule Rep_LList_inverse [THEN subst])
apply (rule prod_fun_imageI)
apply (subst image_Un)
-apply (subst prod_fun_range_eq_diag)
-apply (rule Rep_LList [THEN LListD, THEN LListD_Fun_diag_I])
+apply (subst prod_fun_range_eq_Id_on)
+apply (rule Rep_LList [THEN LListD, THEN LListD_Fun_Id_on_I])
done
text{*A special case of @{text list_equality} for functions over lazy lists*}
--- a/src/HOL/Induct/QuoDataType.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Induct/QuoDataType.thy Wed Mar 04 10:45:52 2009 +0100
@@ -47,7 +47,7 @@
theorem equiv_msgrel: "equiv UNIV msgrel"
proof -
- have "reflexive msgrel" by (simp add: refl_def msgrel_refl)
+ have "refl msgrel" by (simp add: refl_on_def msgrel_refl)
moreover have "sym msgrel" by (simp add: sym_def, blast intro: msgrel.SYM)
moreover have "trans msgrel" by (simp add: trans_def, blast intro: msgrel.TRANS)
ultimately show ?thesis by (simp add: equiv_def)
--- a/src/HOL/Induct/QuoNestedDataType.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Induct/QuoNestedDataType.thy Wed Mar 04 10:45:52 2009 +0100
@@ -44,7 +44,7 @@
theorem equiv_exprel: "equiv UNIV exprel"
proof -
- have "reflexive exprel" by (simp add: refl_def exprel_refl)
+ have "refl exprel" by (simp add: refl_on_def exprel_refl)
moreover have "sym exprel" by (simp add: sym_def, blast intro: exprel.SYM)
moreover have "trans exprel" by (simp add: trans_def, blast intro: exprel.TRANS)
ultimately show ?thesis by (simp add: equiv_def)
--- a/src/HOL/Induct/SList.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Induct/SList.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,15 +1,10 @@
-(* *********************************************************************** *)
-(* *)
-(* Title: SList.thy (Extended List Theory) *)
-(* Based on: $Id$ *)
-(* Author: Lawrence C Paulson, Cambridge University Computer Laboratory*)
-(* Author: B. Wolff, University of Bremen *)
-(* Purpose: Enriched theory of lists *)
-(* mutual indirect recursive data-types *)
-(* *)
-(* *********************************************************************** *)
+(* Title: SList.thy
+ Author: Lawrence C Paulson, Cambridge University Computer Laboratory
+ Author: B. Wolff, University of Bremen
-(* Definition of type 'a list (strict lists) by a least fixed point
+Enriched theory of lists; mutual indirect recursive data-types.
+
+Definition of type 'a list (strict lists) by a least fixed point
We use list(A) == lfp(%Z. {NUMB(0)} <+> A <*> Z)
and not list == lfp(%Z. {NUMB(0)} <+> range(Leaf) <*> Z)
@@ -24,6 +19,8 @@
Tidied by lcp. Still needs removal of nat_rec.
*)
+header {* Extended List Theory (old) *}
+
theory SList
imports Sexp
begin
@@ -79,12 +76,12 @@
(*Declaring the abstract list constructors*)
-(*<*)no_translations
+no_translations
"[x, xs]" == "x#[xs]"
"[x]" == "x#[]"
-no_syntax
- Nil :: "'a list" ("[]")
- Cons :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixr "#" 65)(*>*)
+no_notation
+ Nil ("[]") and
+ Cons (infixr "#" 65)
definition
Nil :: "'a list" ("[]") where
@@ -149,8 +146,8 @@
ttl :: "'a list => 'a list" where
"ttl xs = list_rec xs [] (%x xs r. xs)"
-(*<*)no_syntax
- member :: "'a \<Rightarrow> 'a list \<Rightarrow> bool" (infixl "mem" 55)(*>*)
+no_notation member (infixl "mem" 55)
+
definition
member :: "['a, 'a list] => bool" (infixl "mem" 55) where
"x mem xs = list_rec xs False (%y ys r. if y=x then True else r)"
@@ -163,8 +160,8 @@
map :: "('a=>'b) => ('a list => 'b list)" where
"map f xs = list_rec xs [] (%x l r. f(x)#r)"
-(*<*)no_syntax
- "\<^const>List.append" :: "'a list => 'a list => 'a list" (infixr "@" 65)(*>*)
+no_notation append (infixr "@" 65)
+
definition
append :: "['a list, 'a list] => 'a list" (infixr "@" 65) where
"xs@ys = list_rec xs ys (%x l r. x#r)"
@@ -342,14 +339,14 @@
lemma not_CONS_self: "N: list(A) ==> !M. N ~= CONS M N"
-by (erule list.induct, simp_all)
+apply (erule list.induct) apply simp_all done
lemma not_Cons_self2: "\<forall>x. l ~= x#l"
-by (induct_tac "l" rule: list_induct, simp_all)
+by (induct l rule: list_induct) simp_all
lemma neq_Nil_conv2: "(xs ~= []) = (\<exists>y ys. xs = y#ys)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
(** Conversion rules for List_case: case analysis operator **)
@@ -491,7 +488,7 @@
lemma expand_list_case:
"P(list_case a f xs) = ((xs=[] --> P a ) & (!y ys. xs=y#ys --> P(f y ys)))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
(**** Function definitions ****)
@@ -533,41 +530,44 @@
(** @ - append **)
lemma append_assoc [simp]: "(xs@ys)@zs = xs@(ys@zs)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma append_Nil2 [simp]: "xs @ [] = xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
(** mem **)
lemma mem_append [simp]: "x mem (xs@ys) = (x mem xs | x mem ys)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma mem_filter [simp]: "x mem [x\<leftarrow>xs. P x ] = (x mem xs & P(x))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
(** list_all **)
lemma list_all_True [simp]: "(Alls x:xs. True) = True"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma list_all_conj [simp]:
"list_all p (xs@ys) = ((list_all p xs) & (list_all p ys))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma list_all_mem_conv: "(Alls x:xs. P(x)) = (!x. x mem xs --> P(x))"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
apply blast
done
lemma nat_case_dist : "(! n. P n) = (P 0 & (! n. P (Suc n)))"
apply auto
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
done
lemma alls_P_eq_P_nth: "(Alls u:A. P u) = (!n. n < length A --> P(nth n A))"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply simp_all
apply (rule trans)
apply (rule_tac [2] nat_case_dist [symmetric], simp_all)
done
@@ -583,7 +583,7 @@
lemma Abs_Rep_map:
"(!!x. f(x): sexp) ==>
Abs_map g (Rep_map f xs) = map (%t. g(f(t))) xs"
-apply (induct_tac "xs" rule: list_induct)
+apply (induct xs rule: list_induct)
apply (simp_all add: Rep_map_type list_sexp [THEN subsetD])
done
@@ -591,24 +591,25 @@
(** Additional mapping lemmas **)
lemma map_ident [simp]: "map(%x. x)(xs) = xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma map_append [simp]: "map f (xs@ys) = map f xs @ map f ys"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma map_compose: "map(f o g)(xs) = map f (map g xs)"
apply (simp add: o_def)
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
done
lemma mem_map_aux1 [rule_format]:
"x mem (map f q) --> (\<exists>y. y mem q & x = f y)"
-by (induct_tac "q" rule: list_induct, simp_all, blast)
+by (induct q rule: list_induct) auto
lemma mem_map_aux2 [rule_format]:
"(\<exists>y. y mem q & x = f y) --> x mem (map f q)"
-by (induct_tac "q" rule: list_induct, auto)
+by (induct q rule: list_induct) auto
lemma mem_map: "x mem (map f q) = (\<exists>y. y mem q & x = f y)"
apply (rule iffI)
@@ -617,10 +618,10 @@
done
lemma hd_append [rule_format]: "A ~= [] --> hd(A @ B) = hd(A)"
-by (induct_tac "A" rule: list_induct, auto)
+by (induct A rule: list_induct) auto
lemma tl_append [rule_format]: "A ~= [] --> tl(A @ B) = tl(A) @ B"
-by (induct_tac "A" rule: list_induct, auto)
+by (induct A rule: list_induct) auto
(** take **)
@@ -638,8 +639,8 @@
by (simp add: drop_def)
lemma drop_Suc1 [simp]: "drop [] (Suc x) = []"
-apply (simp add: drop_def)
-apply (induct_tac "x", auto)
+apply (induct x)
+apply (simp_all add: drop_def)
done
lemma drop_Suc2 [simp]: "drop(a#xs)(Suc x) = drop xs x"
@@ -698,9 +699,7 @@
lemma zipWith_Cons_Nil [simp]: "zipWith f (x,[]) = []"
-apply (simp add: zipWith_def)
-apply (induct_tac "x" rule: list_induct, simp_all)
-done
+by (induct x rule: list_induct) (simp_all add: zipWith_def)
lemma zipWith_Nil_Cons [simp]: "zipWith f ([],x) = []"
@@ -722,23 +721,23 @@
done
lemma map_flat: "map f (flat S) = flat(map (map f) S)"
-by (induct_tac "S" rule: list_induct, simp_all)
+by (induct S rule: list_induct) simp_all
lemma list_all_map_eq: "(Alls u:xs. f(u) = g(u)) --> map f xs = map g xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma filter_map_d: "filter p (map f xs) = map f (filter(p o f)(xs))"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma filter_compose: "filter p (filter q xs) = filter(%x. p x & q x) xs"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
(* "filter(p, filter(q,xs)) = filter(q, filter(p,xs))",
"filter(p, filter(p,xs)) = filter(p,xs)" BIRD's thms.*)
lemma filter_append [rule_format, simp]:
"\<forall>B. filter p (A @ B) = (filter p A @ filter p B)"
-by (induct_tac "A" rule: list_induct, simp_all)
+by (induct A rule: list_induct) simp_all
(* inits(xs) == map(fst,splits(xs)),
@@ -749,44 +748,50 @@
x mem xs & y mem ys = <x,y> mem diag(xs,ys) *)
lemma length_append: "length(xs@ys) = length(xs)+length(ys)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma length_map: "length(map f xs) = length(xs)"
-by (induct_tac "xs" rule: list_induct, simp_all)
+by (induct xs rule: list_induct) simp_all
lemma take_Nil [simp]: "take [] n = []"
-by (induct_tac "n", simp_all)
+by (induct n) simp_all
lemma take_take_eq [simp]: "\<forall>n. take (take xs n) n = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct xs rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
done
lemma take_take_Suc_eq1 [rule_format]:
"\<forall>n. take (take xs(Suc(n+m))) n = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct_tac xs rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
done
declare take_Suc [simp del]
lemma take_take_1: "take (take xs (n+m)) n = take xs n"
-apply (induct_tac "m")
+apply (induct m)
apply (simp_all add: take_take_Suc_eq1)
done
lemma take_take_Suc_eq2 [rule_format]:
"\<forall>n. take (take xs n)(Suc(n+m)) = take xs n"
-apply (induct_tac "xs" rule: list_induct, simp_all)
+apply (induct_tac xs rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "n", auto)
+apply (induct_tac n)
+apply auto
done
lemma take_take_2: "take(take xs n)(n+m) = take xs n"
-apply (induct_tac "m")
+apply (induct m)
apply (simp_all add: take_take_Suc_eq2)
done
@@ -794,29 +799,33 @@
(* length(drop(xs,n)) = length(xs) - n *)
lemma drop_Nil [simp]: "drop [] n = []"
-by (induct_tac "n", auto)
+by (induct n) auto
lemma drop_drop [rule_format]: "\<forall>xs. drop (drop xs m) n = drop xs(m+n)"
-apply (induct_tac "m", auto)
-apply (induct_tac "xs" rule: list_induct, auto)
+apply (induct_tac m)
+apply auto
+apply (induct_tac xs rule: list_induct)
+apply auto
done
lemma take_drop [rule_format]: "\<forall>xs. (take xs n) @ (drop xs n) = xs"
-apply (induct_tac "n", auto)
-apply (induct_tac "xs" rule: list_induct, auto)
+apply (induct_tac n)
+apply auto
+apply (induct_tac xs rule: list_induct)
+apply auto
done
lemma copy_copy: "copy x n @ copy x m = copy x (n+m)"
-by (induct_tac "n", auto)
+by (induct n) auto
lemma length_copy: "length(copy x n) = n"
-by (induct_tac "n", auto)
+by (induct n) auto
lemma length_take [rule_format, simp]:
"\<forall>xs. length(take xs n) = min (length xs) n"
-apply (induct_tac "n")
+apply (induct n)
apply auto
-apply (induct_tac "xs" rule: list_induct)
+apply (induct_tac xs rule: list_induct)
apply auto
done
@@ -824,85 +833,93 @@
by (simp only: length_append [symmetric] take_drop)
lemma take_append [rule_format]: "\<forall>A. length(A) = n --> take(A@B) n = A"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct, simp_all)
done
lemma take_append2 [rule_format]:
"\<forall>A. length(A) = n --> take(A@B) (n+k) = A @ take B k"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct, simp_all)
done
lemma take_map [rule_format]: "\<forall>n. take (map f A) n = map f (take A n)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "n", simp_all)
+apply (induct_tac n)
+apply simp_all
done
lemma drop_append [rule_format]: "\<forall>A. length(A) = n --> drop(A@B)n = B"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply simp_all
done
lemma drop_append2 [rule_format]:
"\<forall>A. length(A) = n --> drop(A@B)(n+k) = drop B k"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, simp_all)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply simp_all
done
lemma drop_all [rule_format]: "\<forall>A. length(A) = n --> drop A n = []"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, auto)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply auto
done
lemma drop_map [rule_format]: "\<forall>n. drop (map f A) n = map f (drop A n)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "n", simp_all)
+apply (induct_tac n)
+apply simp_all
done
lemma take_all [rule_format]: "\<forall>A. length(A) = n --> take A n = A"
-apply (induct_tac "n")
+apply (induct n)
apply (rule allI)
apply (rule_tac [2] allI)
-apply (induct_tac "A" rule: list_induct)
-apply (induct_tac [3] "A" rule: list_induct, auto)
+apply (induct_tac A rule: list_induct)
+apply (induct_tac [3] A rule: list_induct)
+apply auto
done
lemma foldl_single: "foldl f a [b] = f a b"
by simp_all
-lemma foldl_append [rule_format, simp]:
- "\<forall>a. foldl f a (A @ B) = foldl f (foldl f a A) B"
-by (induct_tac "A" rule: list_induct, simp_all)
+lemma foldl_append [simp]:
+ "\<And>a. foldl f a (A @ B) = foldl f (foldl f a A) B"
+by (induct A rule: list_induct) simp_all
-lemma foldl_map [rule_format]:
- "\<forall>e. foldl f e (map g S) = foldl (%x y. f x (g y)) e S"
-by (induct_tac "S" rule: list_induct, simp_all)
+lemma foldl_map:
+ "\<And>e. foldl f e (map g S) = foldl (%x y. f x (g y)) e S"
+by (induct S rule: list_induct) simp_all
lemma foldl_neutr_distr [rule_format]:
assumes r_neutr: "\<forall>a. f a e = a"
and r_neutl: "\<forall>a. f e a = a"
and assoc: "\<forall>a b c. f a (f b c) = f(f a b) c"
shows "\<forall>y. f y (foldl f e A) = foldl f y A"
-apply (induct_tac "A" rule: list_induct)
+apply (induct A rule: list_induct)
apply (simp_all add: r_neutr r_neutl, clarify)
apply (erule all_dupE)
apply (rule trans)
@@ -923,95 +940,98 @@
lemma foldr_append [rule_format, simp]:
"\<forall>a. foldr f a (A @ B) = foldr f (foldr f a B) A"
-apply (induct_tac "A" rule: list_induct, simp_all)
-done
+by (induct A rule: list_induct) simp_all
-lemma foldr_map [rule_format]: "\<forall>e. foldr f e (map g S) = foldr (f o g) e S"
-apply (simp add: o_def)
-apply (induct_tac "S" rule: list_induct, simp_all)
-done
+lemma foldr_map: "\<And>e. foldr f e (map g S) = foldr (f o g) e S"
+by (induct S rule: list_induct) (simp_all add: o_def)
lemma foldr_Un_eq_UN: "foldr op Un {} S = (UN X: {t. t mem S}.X)"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
lemma foldr_neutr_distr:
"[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |]
==> foldr f y S = f (foldr f e S) y"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
lemma foldr_append2:
"[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |]
==> foldr f e (A @ B) = f (foldr f e A) (foldr f e B)"
apply auto
-apply (rule foldr_neutr_distr, auto)
+apply (rule foldr_neutr_distr)
+apply auto
done
lemma foldr_flat:
"[| !a. f e a = a; !a b c. f a (f b c) = f(f a b) c |] ==>
foldr f e (flat S) = (foldr f e)(map (foldr f e) S)"
-apply (induct_tac "S" rule: list_induct)
+apply (induct S rule: list_induct)
apply (simp_all del: foldr_append add: foldr_append2)
done
lemma list_all_map: "(Alls x:map f xs .P(x)) = (Alls x:xs.(P o f)(x))"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
lemma list_all_and:
"(Alls x:xs. P(x)&Q(x)) = ((Alls x:xs. P(x))&(Alls x:xs. Q(x)))"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
lemma nth_map [rule_format]:
"\<forall>i. i < length(A) --> nth i (map f A) = f(nth i A)"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "i", auto)
+apply (induct_tac i)
+apply auto
done
lemma nth_app_cancel_right [rule_format]:
"\<forall>i. i < length(A) --> nth i(A@B) = nth i A"
-apply (induct_tac "A" rule: list_induct, simp_all)
+apply (induct A rule: list_induct)
+apply simp_all
apply (rule allI)
-apply (induct_tac "i", simp_all)
+apply (induct_tac i)
+apply simp_all
done
lemma nth_app_cancel_left [rule_format]:
"\<forall>n. n = length(A) --> nth(n+i)(A@B) = nth i B"
-by (induct_tac "A" rule: list_induct, simp_all)
+by (induct A rule: list_induct) simp_all
(** flat **)
lemma flat_append [simp]: "flat(xs@ys) = flat(xs) @ flat(ys)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
lemma filter_flat: "filter p (flat S) = flat(map (filter p) S)"
-by (induct_tac "S" rule: list_induct, auto)
+by (induct S rule: list_induct) auto
(** rev **)
lemma rev_append [simp]: "rev(xs@ys) = rev(ys) @ rev(xs)"
-by (induct_tac "xs" rule: list_induct, auto)
+by (induct xs rule: list_induct) auto
lemma rev_rev_ident [simp]: "rev(rev l) = l"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
lemma rev_flat: "rev(flat ls) = flat (map rev (rev ls))"
-by (induct_tac "ls" rule: list_induct, auto)
+by (induct ls rule: list_induct) auto
lemma rev_map_distrib: "rev(map f l) = map f (rev l)"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
lemma foldl_rev: "foldl f b (rev l) = foldr (%x y. f y x) b l"
-by (induct_tac "l" rule: list_induct, auto)
+by (induct l rule: list_induct) auto
lemma foldr_rev: "foldr f b (rev l) = foldl (%x y. f y x) b l"
apply (rule sym)
apply (rule trans)
-apply (rule_tac [2] foldl_rev, simp)
+apply (rule_tac [2] foldl_rev)
+apply simp
done
end
--- a/src/HOL/Int.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Int.thy Wed Mar 04 10:45:52 2009 +0100
@@ -77,7 +77,7 @@
by (simp add: intrel_def)
lemma equiv_intrel: "equiv UNIV intrel"
-by (simp add: intrel_def equiv_def refl_def sym_def trans_def)
+by (simp add: intrel_def equiv_def refl_on_def sym_def trans_def)
text{*Reduces equality of equivalence classes to the @{term intrel} relation:
@{term "(intrel `` {x} = intrel `` {y}) = ((x,y) \<in> intrel)"} *}
@@ -832,8 +832,8 @@
le_imp_0_less [THEN order_less_imp_le])
next
case (neg n)
- thus ?thesis by (simp del: of_nat_Suc of_nat_add
- add: algebra_simps of_nat_1 [symmetric] of_nat_add [symmetric])
+ thus ?thesis by (simp del: of_nat_Suc of_nat_add of_nat_1
+ add: algebra_simps of_nat_1 [where 'a=int, symmetric] of_nat_add [symmetric])
qed
lemma bin_less_0_simps:
@@ -1165,8 +1165,8 @@
le_imp_0_less [THEN order_less_imp_le])
next
case (neg n)
- thus ?thesis by (simp del: of_nat_Suc of_nat_add
- add: algebra_simps of_nat_1 [symmetric] of_nat_add [symmetric])
+ thus ?thesis by (simp del: of_nat_Suc of_nat_add of_nat_1
+ add: algebra_simps of_nat_1 [where 'a=int, symmetric] of_nat_add [symmetric])
qed
text {* Less-Than or Equals *}
@@ -1547,7 +1547,7 @@
"abs(-1 ^ n) = (1::'a::{ordered_idom,number_ring,recpower})"
by (simp add: power_abs)
-lemma of_int_number_of_eq:
+lemma of_int_number_of_eq [simp]:
"of_int (number_of v) = (number_of v :: 'a :: number_ring)"
by (simp add: number_of_eq)
@@ -1785,11 +1785,12 @@
lemma int_val_lemma:
"(\<forall>i<n::nat. abs(f(i+1) - f i) \<le> 1) -->
f 0 \<le> k --> k \<le> f n --> (\<exists>i \<le> n. f i = (k::int))"
+unfolding One_nat_def
apply (induct n, simp)
apply (intro strip)
apply (erule impE, simp)
apply (erule_tac x = n in allE, simp)
-apply (case_tac "k = f (n+1) ")
+apply (case_tac "k = f (Suc n)")
apply force
apply (erule impE)
apply (simp add: abs_if split add: split_if_asm)
@@ -1803,6 +1804,7 @@
f m \<le> k; k \<le> f n |] ==> ? i. m \<le> i & i \<le> n & f i = (k::int)"
apply (cut_tac n = "n-m" and f = "%i. f (i+m) " and k = k
in int_val_lemma)
+unfolding One_nat_def
apply simp
apply (erule exE)
apply (rule_tac x = "i+m" in exI, arith)
--- a/src/HOL/IntDiv.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/IntDiv.thy Wed Mar 04 10:45:52 2009 +0100
@@ -547,34 +547,6 @@
simproc_setup binary_int_mod ("number_of m mod number_of n :: int") =
{* K (divmod_proc (@{thm divmod_rel_mod_eq})) *}
-(* The following 8 lemmas are made unnecessary by the above simprocs: *)
-
-lemmas div_pos_pos_number_of =
- div_pos_pos [of "number_of v" "number_of w", standard]
-
-lemmas div_neg_pos_number_of =
- div_neg_pos [of "number_of v" "number_of w", standard]
-
-lemmas div_pos_neg_number_of =
- div_pos_neg [of "number_of v" "number_of w", standard]
-
-lemmas div_neg_neg_number_of =
- div_neg_neg [of "number_of v" "number_of w", standard]
-
-
-lemmas mod_pos_pos_number_of =
- mod_pos_pos [of "number_of v" "number_of w", standard]
-
-lemmas mod_neg_pos_number_of =
- mod_neg_pos [of "number_of v" "number_of w", standard]
-
-lemmas mod_pos_neg_number_of =
- mod_pos_neg [of "number_of v" "number_of w", standard]
-
-lemmas mod_neg_neg_number_of =
- mod_neg_neg [of "number_of v" "number_of w", standard]
-
-
lemmas posDivAlg_eqn_number_of [simp] =
posDivAlg_eqn [of "number_of v" "number_of w", standard]
@@ -584,15 +556,6 @@
text{*Special-case simplification *}
-lemma zmod_1 [simp]: "a mod (1::int) = 0"
-apply (cut_tac a = a and b = 1 in pos_mod_sign)
-apply (cut_tac [2] a = a and b = 1 in pos_mod_bound)
-apply (auto simp del:pos_mod_bound pos_mod_sign)
-done
-
-lemma zdiv_1 [simp]: "a div (1::int) = a"
-by (cut_tac a = a and b = 1 in zmod_zdiv_equality, auto)
-
lemma zmod_minus1_right [simp]: "a mod (-1::int) = 0"
apply (cut_tac a = a and b = "-1" in neg_mod_sign)
apply (cut_tac [2] a = a and b = "-1" in neg_mod_bound)
@@ -726,9 +689,6 @@
apply (blast intro: divmod_rel_div_mod [THEN zmult1_lemma, THEN divmod_rel_mod])
done
-lemma zdiv_zmult_self1 [simp]: "b \<noteq> (0::int) ==> (a*b) div b = a"
-by (simp add: zdiv_zmult1_eq)
-
lemma zmod_zdiv_trivial: "(a mod b) div b = (0::int)"
apply (case_tac "b = 0", simp)
apply (auto simp add: linorder_neq_iff div_pos_pos_trivial div_neg_neg_trivial)
@@ -754,7 +714,7 @@
assume not0: "b \<noteq> 0"
show "(a + c * b) div b = c + a div b"
unfolding zdiv_zadd1_eq [of a "c * b"] using not0
- by (simp add: zmod_zmult1_eq zmod_zdiv_trivial)
+ by (simp add: zmod_zmult1_eq zmod_zdiv_trivial zdiv_zmult1_eq)
qed auto
lemma posDivAlg_div_mod:
@@ -784,41 +744,12 @@
show ?thesis by simp
qed
-lemma zdiv_zadd_self1: "a \<noteq> (0::int) ==> (a+b) div a = b div a + 1"
-by (rule div_add_self1) (* already declared [simp] *)
-
-lemma zdiv_zadd_self2: "a \<noteq> (0::int) ==> (b+a) div a = b div a + 1"
-by (rule div_add_self2) (* already declared [simp] *)
-
-lemma zdiv_zmult_self2: "b \<noteq> (0::int) ==> (b*a) div b = a"
-by (rule div_mult_self1_is_id) (* already declared [simp] *)
-
-lemma zmod_zmult_self1: "(a*b) mod b = (0::int)"
-by (rule mod_mult_self2_is_0) (* already declared [simp] *)
-
-lemma zmod_zmult_self2: "(b*a) mod b = (0::int)"
-by (rule mod_mult_self1_is_0) (* already declared [simp] *)
-
lemma zmod_eq_0_iff: "(m mod d = 0) = (EX q::int. m = d*q)"
by (simp add: dvd_eq_mod_eq_0 [symmetric] dvd_def)
(* REVISIT: should this be generalized to all semiring_div types? *)
lemmas zmod_eq_0D [dest!] = zmod_eq_0_iff [THEN iffD1]
-lemma zmod_zadd_left_eq: "(a+b) mod (c::int) = ((a mod c) + b) mod c"
-by (rule mod_add_left_eq)
-
-lemma zmod_zadd_right_eq: "(a+b) mod (c::int) = (a + (b mod c)) mod c"
-by (rule mod_add_right_eq)
-
-lemma zmod_zadd_self1: "(a+b) mod a = b mod (a::int)"
-by (rule mod_add_self1) (* already declared [simp] *)
-
-lemma zmod_zadd_self2: "(b+a) mod a = b mod (a::int)"
-by (rule mod_add_self2) (* already declared [simp] *)
-
-lemma zmod_zdiff1_eq: "(a - b) mod c = (a mod c - b mod c) mod (c::int)"
-by (rule mod_diff_eq)
subsection{*Proving @{term "a div (b*c) = (a div b) div c"} *}
@@ -902,13 +833,6 @@
"(k*m) div (k*n) = (if k = (0::int) then 0 else m div n)"
by (simp add:zdiv_zmult_zmult1)
-(*
-lemma zdiv_zmult_zmult2: "c \<noteq> (0::int) ==> (a*c) div (b*c) = a div b"
-apply (drule zdiv_zmult_zmult1)
-apply (auto simp add: mult_commute)
-done
-*)
-
subsection{*Distribution of Factors over mod*}
@@ -933,9 +857,6 @@
apply (auto simp add: mult_commute)
done
-lemma zmod_zmod_cancel: "n dvd m \<Longrightarrow> (k::int) mod m mod n = k mod n"
-by (rule mod_mod_cancel)
-
subsection {*Splitting Rules for div and mod*}
@@ -1070,7 +991,7 @@
apply (subgoal_tac "(1 + 2* (-b - 1)) mod (2* (-a)) =
1 + 2* ((-b - 1) mod (-a))")
apply (rule_tac [2] pos_zmod_mult_2)
-apply (auto simp add: minus_mult_right [symmetric] right_diff_distrib)
+apply (auto simp add: right_diff_distrib)
apply (subgoal_tac " (-1 - (2 * b)) = - (1 + (2 * b))")
prefer 2 apply simp
apply (simp only: zmod_zminus_zminus diff_minus minus_add_distrib [symmetric])
@@ -1132,38 +1053,8 @@
subsection {* The Divides Relation *}
-lemma zdvd_iff_zmod_eq_0: "(m dvd n) = (n mod m = (0::int))"
- by (rule dvd_eq_mod_eq_0)
-
lemmas zdvd_iff_zmod_eq_0_number_of [simp] =
- zdvd_iff_zmod_eq_0 [of "number_of x" "number_of y", standard]
-
-lemma zdvd_0_right: "(m::int) dvd 0"
- by (rule dvd_0_right) (* already declared [iff] *)
-
-lemma zdvd_0_left: "(0 dvd (m::int)) = (m = 0)"
- by (rule dvd_0_left_iff) (* already declared [noatp,simp] *)
-
-lemma zdvd_1_left: "1 dvd (m::int)"
- by (rule one_dvd) (* already declared [simp] *)
-
-lemma zdvd_refl: "m dvd (m::int)"
- by (rule dvd_refl) (* already declared [simp] *)
-
-lemma zdvd_trans: "m dvd n ==> n dvd k ==> m dvd (k::int)"
- by (rule dvd_trans)
-
-lemma zdvd_zminus_iff: "m dvd -n \<longleftrightarrow> m dvd (n::int)"
- by (rule dvd_minus_iff) (* already declared [simp] *)
-
-lemma zdvd_zminus2_iff: "-m dvd n \<longleftrightarrow> m dvd (n::int)"
- by (rule minus_dvd_iff) (* already declared [simp] *)
-
-lemma zdvd_abs1: "( \<bar>i::int\<bar> dvd j) = (i dvd j)"
- by (rule abs_dvd_iff) (* already declared [simp] *)
-
-lemma zdvd_abs2: "( (i::int) dvd \<bar>j\<bar>) = (i dvd j)"
- by (rule dvd_abs_iff) (* already declared [simp] *)
+ dvd_eq_mod_eq_0 [of "number_of x::int" "number_of y::int", standard]
lemma zdvd_anti_sym:
"0 < m ==> 0 < n ==> m dvd n ==> n dvd m ==> m = (n::int)"
@@ -1171,58 +1062,32 @@
apply (simp add: mult_assoc zero_less_mult_iff zmult_eq_1_iff)
done
-lemma zdvd_zadd: "k dvd m ==> k dvd n ==> k dvd (m + n :: int)"
- by (rule dvd_add)
-
-lemma zdvd_dvd_eq: assumes anz:"a \<noteq> 0" and ab: "(a::int) dvd b" and ba:"b dvd a"
+lemma zdvd_dvd_eq: assumes "a \<noteq> 0" and "(a::int) dvd b" and "b dvd a"
shows "\<bar>a\<bar> = \<bar>b\<bar>"
proof-
- from ab obtain k where k:"b = a*k" unfolding dvd_def by blast
- from ba obtain k' where k':"a = b*k'" unfolding dvd_def by blast
+ from `a dvd b` obtain k where k:"b = a*k" unfolding dvd_def by blast
+ from `b dvd a` obtain k' where k':"a = b*k'" unfolding dvd_def by blast
from k k' have "a = a*k*k'" by simp
with mult_cancel_left1[where c="a" and b="k*k'"]
- have kk':"k*k' = 1" using anz by (simp add: mult_assoc)
+ have kk':"k*k' = 1" using `a\<noteq>0` by (simp add: mult_assoc)
hence "k = 1 \<and> k' = 1 \<or> k = -1 \<and> k' = -1" by (simp add: zmult_eq_1_iff)
thus ?thesis using k k' by auto
qed
-lemma zdvd_zdiff: "k dvd m ==> k dvd n ==> k dvd (m - n :: int)"
- by (rule Ring_and_Field.dvd_diff)
-
lemma zdvd_zdiffD: "k dvd m - n ==> k dvd n ==> k dvd (m::int)"
apply (subgoal_tac "m = n + (m - n)")
apply (erule ssubst)
- apply (blast intro: zdvd_zadd, simp)
+ apply (blast intro: dvd_add, simp)
done
-lemma zdvd_zmult: "k dvd (n::int) ==> k dvd m * n"
- by (rule dvd_mult)
-
-lemma zdvd_zmult2: "k dvd (m::int) ==> k dvd m * n"
- by (rule dvd_mult2)
-
-lemma zdvd_triv_right: "(k::int) dvd m * k"
- by (rule dvd_triv_right) (* already declared [simp] *)
-
-lemma zdvd_triv_left: "(k::int) dvd k * m"
- by (rule dvd_triv_left) (* already declared [simp] *)
-
-lemma zdvd_zmultD2: "j * k dvd n ==> j dvd (n::int)"
- by (rule dvd_mult_left)
-
-lemma zdvd_zmultD: "j * k dvd n ==> k dvd (n::int)"
- by (rule dvd_mult_right)
-
-lemma zdvd_zmult_mono: "i dvd m ==> j dvd (n::int) ==> i * j dvd m * n"
- by (rule mult_dvd_mono)
-
lemma zdvd_reduce: "(k dvd n + k * m) = (k dvd (n::int))"
- apply (rule iffI)
- apply (erule_tac [2] zdvd_zadd)
- apply (subgoal_tac "n = (n + k * m) - k * m")
- apply (erule ssubst)
- apply (erule zdvd_zdiff, simp_all)
- done
+apply (rule iffI)
+ apply (erule_tac [2] dvd_add)
+ apply (subgoal_tac "n = (n + k * m) - k * m")
+ apply (erule ssubst)
+ apply (erule dvd_diff)
+ apply(simp_all)
+done
lemma zdvd_zmod: "f dvd m ==> f dvd (n::int) ==> f dvd m mod n"
apply (simp add: dvd_def)
@@ -1232,7 +1097,7 @@
lemma zdvd_zmod_imp_zdvd: "k dvd m mod n ==> k dvd n ==> k dvd (m::int)"
apply (subgoal_tac "k dvd n * (m div n) + m mod n")
apply (simp add: zmod_zdiv_equality [symmetric])
- apply (simp only: zdvd_zadd zdvd_zmult2)
+ apply (simp only: dvd_add dvd_mult2)
done
lemma zdvd_not_zless: "0 < m ==> m < n ==> \<not> n dvd (m::int)"
@@ -1252,7 +1117,7 @@
lemma zdvd_mult_div_cancel:"(n::int) dvd m \<Longrightarrow> n * (m div n) = m"
apply (subgoal_tac "m mod n = 0")
apply (simp add: zmult_div_cancel)
-apply (simp only: zdvd_iff_zmod_eq_0)
+apply (simp only: dvd_eq_mod_eq_0)
done
lemma zdvd_mult_cancel: assumes d:"k * m dvd k * n" and kz:"k \<noteq> (0::int)"
@@ -1265,10 +1130,6 @@
thus ?thesis by simp
qed
-lemma zdvd_zmult_cancel_disj[simp]:
- "(k*m) dvd (k*n) = (k=0 | m dvd (n::int))"
-by (auto simp: zdvd_zmult_mono dest: zdvd_mult_cancel)
-
theorem ex_nat: "(\<exists>x::nat. P x) = (\<exists>x::int. 0 <= x \<and> P (nat x))"
apply (simp split add: split_nat)
@@ -1300,44 +1161,38 @@
then show ?thesis by (simp only: negative_eq_positive) auto
qed
qed
- then show ?thesis by (auto elim!: dvdE simp only: zdvd_triv_left int_mult)
+ then show ?thesis by (auto elim!: dvdE simp only: dvd_triv_left int_mult)
qed
lemma zdvd1_eq[simp]: "(x::int) dvd 1 = ( \<bar>x\<bar> = 1)"
proof
- assume d: "x dvd 1" hence "int (nat \<bar>x\<bar>) dvd int (nat 1)" by (simp add: zdvd_abs1)
+ assume d: "x dvd 1" hence "int (nat \<bar>x\<bar>) dvd int (nat 1)" by simp
hence "nat \<bar>x\<bar> dvd 1" by (simp add: zdvd_int)
hence "nat \<bar>x\<bar> = 1" by simp
thus "\<bar>x\<bar> = 1" by (cases "x < 0", auto)
next
assume "\<bar>x\<bar>=1" thus "x dvd 1"
- by(cases "x < 0",simp_all add: minus_equation_iff zdvd_iff_zmod_eq_0)
+ by(cases "x < 0",simp_all add: minus_equation_iff dvd_eq_mod_eq_0)
qed
lemma zdvd_mult_cancel1:
assumes mp:"m \<noteq>(0::int)" shows "(m * n dvd m) = (\<bar>n\<bar> = 1)"
proof
assume n1: "\<bar>n\<bar> = 1" thus "m * n dvd m"
- by (cases "n >0", auto simp add: zdvd_zminus2_iff minus_equation_iff)
+ by (cases "n >0", auto simp add: minus_dvd_iff minus_equation_iff)
next
assume H: "m * n dvd m" hence H2: "m * n dvd m * 1" by simp
from zdvd_mult_cancel[OF H2 mp] show "\<bar>n\<bar> = 1" by (simp only: zdvd1_eq)
qed
lemma int_dvd_iff: "(int m dvd z) = (m dvd nat (abs z))"
- unfolding zdvd_int by (cases "z \<ge> 0") (simp_all add: zdvd_zminus_iff)
+ unfolding zdvd_int by (cases "z \<ge> 0") simp_all
lemma dvd_int_iff: "(z dvd int m) = (nat (abs z) dvd m)"
- unfolding zdvd_int by (cases "z \<ge> 0") (simp_all add: zdvd_zminus2_iff)
+ unfolding zdvd_int by (cases "z \<ge> 0") simp_all
lemma nat_dvd_iff: "(nat z dvd m) = (if 0 \<le> z then (z dvd int m) else m = 0)"
by (auto simp add: dvd_int_iff)
-lemma zminus_dvd_iff [iff]: "(-z dvd w) = (z dvd (w::int))"
- by (rule minus_dvd_iff)
-
-lemma dvd_zminus_iff [iff]: "(z dvd -w) = (z dvd (w::int))"
- by (rule dvd_minus_iff)
-
lemma zdvd_imp_le: "[| z dvd n; 0 < n |] ==> z \<le> (n::int)"
apply (rule_tac z=n in int_cases)
apply (auto simp add: dvd_int_iff)
@@ -1367,10 +1222,13 @@
apply (auto simp add: IntDiv.divmod_rel_def of_nat_mult)
done
+lemma abs_div: "(y::int) dvd x \<Longrightarrow> abs (x div y) = abs x div abs y"
+by (unfold dvd_def, cases "y=0", auto simp add: abs_mult)
+
text{*Suggested by Matthias Daum*}
lemma int_power_div_base:
"\<lbrakk>0 < m; 0 < k\<rbrakk> \<Longrightarrow> k ^ m div k = (k::int) ^ (m - Suc 0)"
-apply (subgoal_tac "k ^ m = k ^ ((m - 1) + 1)")
+apply (subgoal_tac "k ^ m = k ^ ((m - Suc 0) + Suc 0)")
apply (erule ssubst)
apply (simp only: power_add)
apply simp_all
@@ -1387,8 +1245,8 @@
by (rule mod_diff_right_eq [symmetric])
lemmas zmod_simps =
- IntDiv.zmod_zadd_left_eq [symmetric]
- IntDiv.zmod_zadd_right_eq [symmetric]
+ mod_add_left_eq [symmetric]
+ mod_add_right_eq [symmetric]
IntDiv.zmod_zmult1_eq [symmetric]
mod_mult_left_eq [symmetric]
IntDiv.zpower_zmod
@@ -1463,14 +1321,14 @@
assume H: "x mod n = y mod n"
hence "x mod n - y mod n = 0" by simp
hence "(x mod n - y mod n) mod n = 0" by simp
- hence "(x - y) mod n = 0" by (simp add: zmod_zdiff1_eq[symmetric])
- thus "n dvd x - y" by (simp add: zdvd_iff_zmod_eq_0)
+ hence "(x - y) mod n = 0" by (simp add: mod_diff_eq[symmetric])
+ thus "n dvd x - y" by (simp add: dvd_eq_mod_eq_0)
next
assume H: "n dvd x - y"
then obtain k where k: "x-y = n*k" unfolding dvd_def by blast
hence "x = n*k + y" by simp
hence "x mod n = (n*k + y) mod n" by simp
- thus "x mod n = y mod n" by (simp add: zmod_zadd_left_eq)
+ thus "x mod n = y mod n" by (simp add: mod_add_left_eq)
qed
lemma nat_mod_eq_lemma: assumes xyn: "(x::nat) mod n = y mod n" and xy:"y \<le> x"
--- a/src/HOL/Integration.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Integration.thy Wed Mar 04 10:45:52 2009 +0100
@@ -134,7 +134,7 @@
apply (frule partition [THEN iffD1], safe)
apply (drule_tac x = "psize D" and P="%n. psize D \<le> n --> ?P n" in spec, safe)
apply (case_tac "psize D = 0")
-apply (drule_tac [2] n = "psize D - 1" in partition_lt, auto)
+apply (drule_tac [2] n = "psize D - Suc 0" in partition_lt, auto)
done
lemma partition_gt: "[|partition(a,b) D; n < (psize D)|] ==> D(n) < D(psize D)"
@@ -145,7 +145,7 @@
apply (rotate_tac 2)
apply (drule_tac x = "psize D" in spec)
apply (rule ccontr)
-apply (drule_tac n = "psize D - 1" in partition_lt)
+apply (drule_tac n = "psize D - Suc 0" in partition_lt)
apply auto
done
--- a/src/HOL/IsaMakefile Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/IsaMakefile Wed Mar 04 10:45:52 2009 +0100
@@ -13,7 +13,6 @@
HOL-Library \
HOL-ex \
HOL-Auth \
- HOL-AxClasses \
HOL-Bali \
HOL-Decision_Procs \
HOL-Extraction \
@@ -79,38 +78,39 @@
$(OUT)/Pure: Pure
BASE_DEPENDENCIES = $(OUT)/Pure \
+ $(SRC)/Provers/blast.ML \
+ $(SRC)/Provers/clasimp.ML \
+ $(SRC)/Provers/classical.ML \
+ $(SRC)/Provers/hypsubst.ML \
+ $(SRC)/Provers/quantifier1.ML \
+ $(SRC)/Provers/splitter.ML \
+ $(SRC)/Tools/IsaPlanner/isand.ML \
+ $(SRC)/Tools/IsaPlanner/rw_inst.ML \
+ $(SRC)/Tools/IsaPlanner/rw_tools.ML \
+ $(SRC)/Tools/IsaPlanner/zipper.ML \
+ $(SRC)/Tools/atomize_elim.ML \
+ $(SRC)/Tools/code/code_funcgr.ML \
+ $(SRC)/Tools/code/code_haskell.ML \
+ $(SRC)/Tools/code/code_ml.ML \
+ $(SRC)/Tools/code/code_name.ML \
+ $(SRC)/Tools/code/code_printer.ML \
+ $(SRC)/Tools/code/code_target.ML \
+ $(SRC)/Tools/code/code_thingol.ML \
+ $(SRC)/Tools/code/code_wellsorted.ML \
+ $(SRC)/Tools/coherent.ML \
+ $(SRC)/Tools/eqsubst.ML \
+ $(SRC)/Tools/induct.ML \
+ $(SRC)/Tools/intuitionistic.ML \
+ $(SRC)/Tools/induct_tacs.ML \
+ $(SRC)/Tools/nbe.ML \
+ $(SRC)/Tools/project_rule.ML \
+ $(SRC)/Tools/random_word.ML \
+ $(SRC)/Tools/value.ML \
Code_Setup.thy \
HOL.thy \
Tools/hologic.ML \
Tools/recfun_codegen.ML \
Tools/simpdata.ML \
- $(SRC)/Tools/atomize_elim.ML \
- $(SRC)/Tools/code/code_funcgr.ML \
- $(SRC)/Tools/code/code_funcgr.ML \
- $(SRC)/Tools/code/code_name.ML \
- $(SRC)/Tools/code/code_printer.ML \
- $(SRC)/Tools/code/code_target.ML \
- $(SRC)/Tools/code/code_ml.ML \
- $(SRC)/Tools/code/code_haskell.ML \
- $(SRC)/Tools/code/code_thingol.ML \
- $(SRC)/Tools/induct.ML \
- $(SRC)/Tools/induct_tacs.ML \
- $(SRC)/Tools/IsaPlanner/isand.ML \
- $(SRC)/Tools/IsaPlanner/rw_inst.ML \
- $(SRC)/Tools/IsaPlanner/rw_tools.ML \
- $(SRC)/Tools/IsaPlanner/zipper.ML \
- $(SRC)/Tools/nbe.ML \
- $(SRC)/Tools/random_word.ML \
- $(SRC)/Tools/value.ML \
- $(SRC)/Provers/blast.ML \
- $(SRC)/Provers/clasimp.ML \
- $(SRC)/Provers/classical.ML \
- $(SRC)/Provers/coherent.ML \
- $(SRC)/Provers/eqsubst.ML \
- $(SRC)/Provers/hypsubst.ML \
- $(SRC)/Provers/project_rule.ML \
- $(SRC)/Provers/quantifier1.ML \
- $(SRC)/Provers/splitter.ML \
$(OUT)/HOL-Base: base.ML $(BASE_DEPENDENCIES)
@$(ISABELLE_TOOL) usedir -b -f base.ML -d false -g false $(OUT)/Pure HOL-Base
@@ -267,11 +267,11 @@
@$(ISABELLE_TOOL) usedir -b -f main.ML -g true $(OUT)/Pure HOL-Main
$(OUT)/HOL: ROOT.ML $(MAIN_DEPENDENCIES) \
+ Archimedean_Field.thy \
Complex_Main.thy \
Complex.thy \
Deriv.thy \
Fact.thy \
- FrechetDeriv.thy \
Integration.thy \
Lim.thy \
Ln.thy \
@@ -285,7 +285,6 @@
GCD.thy \
Parity.thy \
Lubs.thy \
- Polynomial.thy \
PReal.thy \
Rational.thy \
RComplete.thy \
@@ -314,8 +313,11 @@
Library/Euclidean_Space.thy Library/Glbs.thy Library/normarith.ML \
Library/Executable_Set.thy Library/Infinite_Set.thy \
Library/FuncSet.thy Library/Permutations.thy Library/Determinants.thy\
+ Library/Bit.thy \
Library/Finite_Cartesian_Product.thy \
+ Library/FrechetDeriv.thy \
Library/Fundamental_Theorem_Algebra.thy \
+ Library/Inner_Product.thy \
Library/Library.thy Library/List_Prefix.thy Library/State_Monad.thy \
Library/Nat_Int_Bij.thy Library/Multiset.thy Library/Permutation.thy \
Library/Primes.thy Library/Pocklington.thy Library/Quotient.thy \
@@ -336,6 +338,10 @@
Library/Boolean_Algebra.thy Library/Countable.thy \
Library/RBT.thy Library/Univ_Poly.thy \
Library/Random.thy Library/Quickcheck.thy \
+ Library/Poly_Deriv.thy \
+ Library/Polynomial.thy \
+ Library/Product_plus.thy \
+ Library/Product_Vector.thy \
Library/Enum.thy Library/Float.thy $(SRC)/Tools/float.ML $(SRC)/HOL/Tools/float_arith.ML \
Library/reify_data.ML Library/reflection.ML
@cd Library; $(ISABELLE_TOOL) usedir $(OUT)/HOL Library
@@ -790,15 +796,6 @@
@$(ISABELLE_TOOL) usedir $(OUT)/HOL IOA
-## HOL-AxClasses
-
-HOL-AxClasses: HOL $(LOG)/HOL-AxClasses.gz
-
-$(LOG)/HOL-AxClasses.gz: $(OUT)/HOL AxClasses/Group.thy \
- AxClasses/Product.thy AxClasses/ROOT.ML AxClasses/Semigroups.thy
- @$(ISABELLE_TOOL) usedir $(OUT)/HOL AxClasses
-
-
## HOL-Lattice
HOL-Lattice: HOL $(LOG)/HOL-Lattice.gz
@@ -814,34 +811,31 @@
HOL-ex: HOL $(LOG)/HOL-ex.gz
$(LOG)/HOL-ex.gz: $(OUT)/HOL Library/Commutative_Ring.thy \
- Library/Primes.thy \
- ex/Abstract_NAT.thy ex/Antiquote.thy ex/Arith_Examples.thy ex/BT.thy \
- ex/BinEx.thy ex/CTL.thy ex/Chinese.thy ex/Classical.thy \
- ex/Coherent.thy ex/Dense_Linear_Order_Ex.thy ex/Eval_Examples.thy \
- ex/Groebner_Examples.thy ex/Quickcheck_Generators.thy \
- ex/Codegenerator.thy ex/Codegenerator_Pretty.thy \
- ex/CodegenSML_Test.thy ex/Formal_Power_Series_Examples.thy \
- ex/Commutative_RingEx.thy ex/Efficient_Nat_examples.thy \
- ex/Hex_Bin_Examples.thy ex/Commutative_Ring_Complete.thy \
- ex/ExecutableContent.thy ex/Fundefs.thy ex/Guess.thy ex/Hebrew.thy \
- ex/Binary.thy ex/Higher_Order_Logic.thy ex/Hilbert_Classical.thy \
+ Library/Primes.thy ex/Abstract_NAT.thy ex/Antiquote.thy \
+ ex/ApproximationEx.thy ex/Arith_Examples.thy \
+ ex/Arithmetic_Series_Complex.thy ex/BT.thy ex/BinEx.thy \
+ ex/Binary.thy ex/CTL.thy ex/Chinese.thy ex/Classical.thy \
+ ex/CodegenSML_Test.thy ex/Codegenerator.thy \
+ ex/Codegenerator_Pretty.thy ex/Coherent.thy \
+ ex/Commutative_RingEx.thy ex/Commutative_Ring_Complete.thy \
+ ex/Dense_Linear_Order_Ex.thy ex/Efficient_Nat_examples.thy \
+ ex/Eval_Examples.thy ex/ExecutableContent.thy \
+ ex/Formal_Power_Series_Examples.thy ex/Fundefs.thy \
+ ex/Groebner_Examples.thy ex/Guess.thy ex/HarmonicSeries.thy \
+ ex/Hebrew.thy ex/Hex_Bin_Examples.thy ex/Higher_Order_Logic.thy \
+ ex/Hilbert_Classical.thy ex/ImperativeQuicksort.thy \
ex/Induction_Scheme.thy ex/InductiveInvariant.thy \
ex/InductiveInvariant_examples.thy ex/Intuitionistic.thy \
- ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy \
- ex/MergeSort.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy \
+ ex/Lagrange.thy ex/LocaleTest2.thy ex/MT.thy ex/MergeSort.thy \
+ ex/Meson_Test.thy ex/MonoidGroup.thy ex/Multiquote.thy ex/NatSum.thy \
ex/Numeral.thy ex/PER.thy ex/PresburgerEx.thy ex/Primrec.thy \
- ex/Quickcheck_Examples.thy \
- ex/ReflectionEx.thy ex/ROOT.ML ex/Recdefs.thy ex/Records.thy \
+ ex/Quickcheck_Examples.thy ex/Quickcheck_Generators.thy ex/ROOT.ML \
+ ex/Recdefs.thy ex/Records.thy ex/ReflectionEx.thy \
ex/Refute_Examples.thy ex/SAT_Examples.thy ex/SVC_Oracle.thy \
- ex/Subarray.thy ex/Sublist.thy \
- ex/Sudoku.thy ex/Tarski.thy ex/Termination.thy ex/Term_Of_Syntax.thy \
- ex/Unification.thy ex/document/root.bib \
- ex/document/root.tex ex/Meson_Test.thy ex/set.thy \
- ex/svc_funcs.ML ex/svc_test.thy \
- ex/ImperativeQuicksort.thy \
- ex/Arithmetic_Series_Complex.thy ex/HarmonicSeries.thy \
- ex/Sqrt.thy ex/Sqrt_Script.thy \
- ex/ApproximationEx.thy
+ ex/Serbian.thy ex/Sqrt.thy ex/Sqrt_Script.thy ex/Subarray.thy \
+ ex/Sublist.thy ex/Sudoku.thy ex/Tarski.thy ex/Term_Of_Syntax.thy \
+ ex/Termination.thy ex/Unification.thy ex/document/root.bib \
+ ex/document/root.tex ex/set.thy ex/svc_funcs.ML ex/svc_test.thy
@$(ISABELLE_TOOL) usedir $(OUT)/HOL ex
@@ -1062,22 +1056,22 @@
## clean
clean:
- @rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL $(OUT)/HOL-Nominal $(OUT)/TLA \
- $(LOG)/HOL.gz $(LOG)/TLA.gz \
- $(LOG)/HOL-Isar_examples.gz $(LOG)/HOL-Induct.gz \
- $(LOG)/HOL-ex.gz $(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz \
- $(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz \
- $(LOG)/HOL-HoareParallel.gz \
- $(LOG)/HOL-Lex.gz $(LOG)/HOL-Algebra.gz \
- $(LOG)/HOL-Auth.gz $(LOG)/HOL-UNITY.gz \
- $(LOG)/HOL-Modelcheck.gz $(LOG)/HOL-Lambda.gz \
- $(LOG)/HOL-Bali.gz \
- $(LOG)/HOL-MicroJava.gz $(LOG)/HOL-NanoJava.gz \
- $(LOG)/HOL-Nominal-Examples.gz \
- $(LOG)/HOL-IOA.gz $(LOG)/HOL-AxClasses \
- $(LOG)/HOL-Lattice $(LOG)/HOL-Matrix \
- $(LOG)/HOL-HahnBanach.gz $(LOG)/HOL-SET-Protocol.gz \
- $(LOG)/TLA-Inc.gz $(LOG)/TLA-Buffer.gz $(LOG)/TLA-Memory.gz \
- $(LOG)/HOL-Library.gz $(LOG)/HOL-Unix.gz \
- $(OUT)/HOL-Word $(LOG)/HOL-Word.gz $(LOG)/HOL-Word-Examples.gz \
- $(OUT)/HOL-NSA $(LOG)/HOL-NSA.gz $(LOG)/HOL-NSA-Examples.gz
+ @rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL \
+ $(OUT)/HOL-Nominal $(OUT)/TLA $(LOG)/HOL.gz \
+ $(LOG)/TLA.gz $(LOG)/HOL-Isar_examples.gz \
+ $(LOG)/HOL-Induct.gz $(LOG)/HOL-ex.gz \
+ $(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz \
+ $(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz \
+ $(LOG)/HOL-HoareParallel.gz $(LOG)/HOL-Lex.gz \
+ $(LOG)/HOL-Algebra.gz $(LOG)/HOL-Auth.gz \
+ $(LOG)/HOL-UNITY.gz $(LOG)/HOL-Modelcheck.gz \
+ $(LOG)/HOL-Lambda.gz $(LOG)/HOL-Bali.gz \
+ $(LOG)/HOL-MicroJava.gz $(LOG)/HOL-NanoJava.gz \
+ $(LOG)/HOL-Nominal-Examples.gz $(LOG)/HOL-IOA.gz \
+ $(LOG)/HOL-Lattice $(LOG)/HOL-Matrix \
+ $(LOG)/HOL-HahnBanach.gz $(LOG)/HOL-SET-Protocol.gz \
+ $(LOG)/TLA-Inc.gz $(LOG)/TLA-Buffer.gz \
+ $(LOG)/TLA-Memory.gz $(LOG)/HOL-Library.gz \
+ $(LOG)/HOL-Unix.gz $(OUT)/HOL-Word $(LOG)/HOL-Word.gz \
+ $(LOG)/HOL-Word-Examples.gz $(OUT)/HOL-NSA \
+ $(LOG)/HOL-NSA.gz $(LOG)/HOL-NSA-Examples.gz
--- a/src/HOL/Library/Abstract_Rat.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Abstract_Rat.thy Wed Mar 04 10:45:52 2009 +0100
@@ -247,7 +247,7 @@
(of_int(n div d)::'a::{field, ring_char_0}) = of_int n / of_int d"
apply (frule of_int_div_aux [of d n, where ?'a = 'a])
apply simp
- apply (simp add: zdvd_iff_zmod_eq_0)
+ apply (simp add: dvd_eq_mod_eq_0)
done
--- a/src/HOL/Library/Boolean_Algebra.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Boolean_Algebra.thy Wed Mar 04 10:45:52 2009 +0100
@@ -223,7 +223,7 @@
lemma xor_left_self [simp]: "x \<oplus> (x \<oplus> y) = y"
by (simp only: xor_assoc [symmetric] xor_self xor_zero_left)
-lemma xor_compl_left: "\<sim> x \<oplus> y = \<sim> (x \<oplus> y)"
+lemma xor_compl_left [simp]: "\<sim> x \<oplus> y = \<sim> (x \<oplus> y)"
apply (simp only: xor_def de_Morgan_disj de_Morgan_conj double_compl)
apply (simp only: conj_disj_distribs)
apply (simp only: conj_cancel_right conj_cancel_left)
@@ -231,7 +231,7 @@
apply (simp only: disj_ac conj_ac)
done
-lemma xor_compl_right: "x \<oplus> \<sim> y = \<sim> (x \<oplus> y)"
+lemma xor_compl_right [simp]: "x \<oplus> \<sim> y = \<sim> (x \<oplus> y)"
apply (simp only: xor_def de_Morgan_disj de_Morgan_conj double_compl)
apply (simp only: conj_disj_distribs)
apply (simp only: conj_cancel_right conj_cancel_left)
@@ -239,11 +239,11 @@
apply (simp only: disj_ac conj_ac)
done
-lemma xor_cancel_right [simp]: "x \<oplus> \<sim> x = \<one>"
+lemma xor_cancel_right: "x \<oplus> \<sim> x = \<one>"
by (simp only: xor_compl_right xor_self compl_zero)
-lemma xor_cancel_left [simp]: "\<sim> x \<oplus> x = \<one>"
-by (subst xor_commute) (rule xor_cancel_right)
+lemma xor_cancel_left: "\<sim> x \<oplus> x = \<one>"
+by (simp only: xor_compl_left xor_self compl_zero)
lemma conj_xor_distrib: "x \<sqinter> (y \<oplus> z) = (x \<sqinter> y) \<oplus> (x \<sqinter> z)"
proof -
--- a/src/HOL/Library/Char_nat.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Char_nat.thy Wed Mar 04 10:45:52 2009 +0100
@@ -132,7 +132,7 @@
lemma Char_char_of_nat:
"Char n m = char_of_nat (nat_of_nibble n * 16 + nat_of_nibble m)"
unfolding char_of_nat_def Let_def nibble_pair_of_nat_def
- by (auto simp add: div_add1_eq mod_add1_eq nat_of_nibble_div_16 nibble_of_nat_norm nibble_of_nat_of_nibble)
+ by (auto simp add: div_add1_eq mod_add_eq nat_of_nibble_div_16 nibble_of_nat_norm nibble_of_nat_of_nibble)
lemma char_of_nat_of_char:
"char_of_nat (nat_of_char c) = c"
@@ -165,7 +165,7 @@
show ?thesis
by (simp add: nat_of_char.simps char_of_nat_def nibble_of_pair
nat_of_nibble_of_nat mod_mult_distrib
- n aux3 mod_mult_self3 l_256 aux4 mod_add1_eq [of "256 * k"] l_div_256)
+ n aux3 mod_mult_self3 l_256 aux4 mod_add_eq [of "256 * k"] l_div_256)
qed
lemma nibble_pair_of_nat_char:
--- a/src/HOL/Library/Code_Char.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Code_Char.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Library/Code_Char.thy
- ID: $Id$
Author: Florian Haftmann
*)
--- a/src/HOL/Library/Coinductive_List.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Coinductive_List.thy Wed Mar 04 10:45:52 2009 +0100
@@ -298,12 +298,12 @@
(CONS a M, CONS b N) \<in> EqLList r"
lemma EqLList_unfold:
- "EqLList r = dsum (diag {Datatype.Numb 0}) (dprod r (EqLList r))"
+ "EqLList r = dsum (Id_on {Datatype.Numb 0}) (dprod r (EqLList r))"
by (fast intro!: EqLList.intros [unfolded NIL_def CONS_def]
elim: EqLList.cases [unfolded NIL_def CONS_def])
lemma EqLList_implies_ntrunc_equality:
- "(M, N) \<in> EqLList (diag A) \<Longrightarrow> ntrunc k M = ntrunc k N"
+ "(M, N) \<in> EqLList (Id_on A) \<Longrightarrow> ntrunc k M = ntrunc k N"
apply (induct k arbitrary: M N rule: nat_less_induct)
apply (erule EqLList.cases)
apply (safe del: equalityI)
@@ -314,28 +314,28 @@
apply (simp_all add: CONS_def less_Suc_eq)
done
-lemma Domain_EqLList: "Domain (EqLList (diag A)) \<subseteq> LList A"
+lemma Domain_EqLList: "Domain (EqLList (Id_on A)) \<subseteq> LList A"
apply (rule subsetI)
apply (erule LList.coinduct)
apply (subst (asm) EqLList_unfold)
apply (auto simp add: NIL_def CONS_def)
done
-lemma EqLList_diag: "EqLList (diag A) = diag (LList A)"
+lemma EqLList_Id_on: "EqLList (Id_on A) = Id_on (LList A)"
(is "?lhs = ?rhs")
proof
show "?lhs \<subseteq> ?rhs"
apply (rule subsetI)
apply (rule_tac p = x in PairE)
apply clarify
- apply (rule diag_eqI)
+ apply (rule Id_on_eqI)
apply (rule EqLList_implies_ntrunc_equality [THEN ntrunc_equality],
assumption)
apply (erule DomainI [THEN Domain_EqLList [THEN subsetD]])
done
{
- fix M N assume "(M, N) \<in> diag (LList A)"
- then have "(M, N) \<in> EqLList (diag A)"
+ fix M N assume "(M, N) \<in> Id_on (LList A)"
+ then have "(M, N) \<in> EqLList (Id_on A)"
proof coinduct
case (EqLList M N)
then obtain L where L: "L \<in> LList A" and MN: "M = L" "N = L" by blast
@@ -344,7 +344,7 @@
case NIL with MN have ?EqNIL by simp
then show ?thesis ..
next
- case CONS with MN have ?EqCONS by (simp add: diagI)
+ case CONS with MN have ?EqCONS by (simp add: Id_onI)
then show ?thesis ..
qed
qed
@@ -352,8 +352,8 @@
then show "?rhs \<subseteq> ?lhs" by auto
qed
-lemma EqLList_diag_iff [iff]: "(p \<in> EqLList (diag A)) = (p \<in> diag (LList A))"
- by (simp only: EqLList_diag)
+lemma EqLList_Id_on_iff [iff]: "(p \<in> EqLList (Id_on A)) = (p \<in> Id_on (LList A))"
+ by (simp only: EqLList_Id_on)
text {*
@@ -367,11 +367,11 @@
and step: "\<And>M N. (M, N) \<in> r \<Longrightarrow>
M = NIL \<and> N = NIL \<or>
(\<exists>a b M' N'.
- M = CONS a M' \<and> N = CONS b N' \<and> (a, b) \<in> diag A \<and>
- ((M', N') \<in> r \<or> (M', N') \<in> EqLList (diag A)))"
+ M = CONS a M' \<and> N = CONS b N' \<and> (a, b) \<in> Id_on A \<and>
+ ((M', N') \<in> r \<or> (M', N') \<in> EqLList (Id_on A)))"
shows "M = N"
proof -
- from r have "(M, N) \<in> EqLList (diag A)"
+ from r have "(M, N) \<in> EqLList (Id_on A)"
proof coinduct
case EqLList
then show ?case by (rule step)
@@ -387,8 +387,8 @@
(f (CONS x l), g (CONS x l)) = (NIL, NIL) \<or>
(\<exists>M N a b.
(f (CONS x l), g (CONS x l)) = (CONS a M, CONS b N) \<and>
- (a, b) \<in> diag A \<and>
- (M, N) \<in> {(f u, g u) | u. u \<in> LList A} \<union> diag (LList A))"
+ (a, b) \<in> Id_on A \<and>
+ (M, N) \<in> {(f u, g u) | u. u \<in> LList A} \<union> Id_on (LList A))"
(is "\<And>x l. _ \<Longrightarrow> _ \<Longrightarrow> ?fun_CONS x l")
shows "f M = g M"
proof -
@@ -401,8 +401,8 @@
from L show ?case
proof (cases L)
case NIL
- with fun_NIL and MN have "(M, N) \<in> diag (LList A)" by auto
- then have "(M, N) \<in> EqLList (diag A)" ..
+ with fun_NIL and MN have "(M, N) \<in> Id_on (LList A)" by auto
+ then have "(M, N) \<in> EqLList (Id_on A)" ..
then show ?thesis by cases simp_all
next
case (CONS a K)
@@ -411,23 +411,23 @@
then show ?thesis
proof
assume ?NIL
- with MN CONS have "(M, N) \<in> diag (LList A)" by auto
- then have "(M, N) \<in> EqLList (diag A)" ..
+ with MN CONS have "(M, N) \<in> Id_on (LList A)" by auto
+ then have "(M, N) \<in> EqLList (Id_on A)" ..
then show ?thesis by cases simp_all
next
assume ?CONS
with CONS obtain a b M' N' where
fg: "(f L, g L) = (CONS a M', CONS b N')"
- and ab: "(a, b) \<in> diag A"
- and M'N': "(M', N') \<in> ?bisim \<union> diag (LList A)"
+ and ab: "(a, b) \<in> Id_on A"
+ and M'N': "(M', N') \<in> ?bisim \<union> Id_on (LList A)"
by blast
from M'N' show ?thesis
proof
assume "(M', N') \<in> ?bisim"
with MN fg ab show ?thesis by simp
next
- assume "(M', N') \<in> diag (LList A)"
- then have "(M', N') \<in> EqLList (diag A)" ..
+ assume "(M', N') \<in> Id_on (LList A)"
+ then have "(M', N') \<in> EqLList (Id_on A)" ..
with MN fg ab show ?thesis by simp
qed
qed
@@ -463,7 +463,7 @@
with h h' MN have "M = CONS (fst p) (h (snd p))"
and "N = CONS (fst p) (h' (snd p))"
by (simp_all split: prod.split)
- then have ?EqCONS by (auto iff: diag_iff)
+ then have ?EqCONS by (auto iff: Id_on_iff)
then show ?thesis ..
qed
qed
@@ -498,7 +498,7 @@
next
assume "?EqLCons (l1, l2)"
with MN have ?EqCONS
- by (force simp add: Rep_llist_LCons EqLList_diag intro: Rep_llist_UNIV)
+ by (force simp add: Rep_llist_LCons EqLList_Id_on intro: Rep_llist_UNIV)
then show ?thesis ..
qed
qed
--- a/src/HOL/Library/Determinants.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Determinants.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1048,7 +1048,7 @@
note th0 = this
let ?g = "\<lambda>x. if x = 0 then 0 else norm x *s f (inverse (norm x) *s x)"
{fix x:: "real ^'n" assume nx: "norm x = 1"
- have "?g x = f x" using nx by (simp add: norm_eq_0[symmetric])}
+ have "?g x = f x" using nx by auto}
hence thfg: "\<forall>x. norm x = 1 \<longrightarrow> ?g x = f x" by blast
have g0: "?g 0 = 0" by simp
{fix x y :: "real ^'n"
@@ -1057,15 +1057,15 @@
moreover
{assume "x = 0" "y \<noteq> 0"
then have "dist (?g x) (?g y) = dist x y"
- apply (simp add: dist_def norm_neg norm_mul norm_eq_0)
+ apply (simp add: dist_def norm_mul)
apply (rule f1[rule_format])
- by(simp add: norm_mul norm_eq_0 field_simps)}
+ by(simp add: norm_mul field_simps)}
moreover
{assume "x \<noteq> 0" "y = 0"
then have "dist (?g x) (?g y) = dist x y"
- apply (simp add: dist_def norm_neg norm_mul norm_eq_0)
+ apply (simp add: dist_def norm_mul)
apply (rule f1[rule_format])
- by(simp add: norm_mul norm_eq_0 field_simps)}
+ by(simp add: norm_mul field_simps)}
moreover
{assume z: "x \<noteq> 0" "y \<noteq> 0"
have th00: "x = norm x *s inverse (norm x) *s x" "y = norm y *s inverse (norm y) *s y" "norm x *s f (inverse (norm x) *s x) = norm x *s f (inverse (norm x) *s x)"
@@ -1077,7 +1077,7 @@
"norm (f (inverse (norm x) *s x) - f (inverse (norm y) *s y)) =
norm (inverse (norm x) *s x - inverse (norm y) *s y)"
using z
- by (auto simp add: norm_eq_0 vector_smult_assoc field_simps norm_mul intro: f1[rule_format] fd1[rule_format, unfolded dist_def])
+ by (auto simp add: vector_smult_assoc field_simps norm_mul intro: f1[rule_format] fd1[rule_format, unfolded dist_def])
from z th0[OF th00] have "dist (?g x) (?g y) = dist x y"
by (simp add: dist_def)}
ultimately have "dist (?g x) (?g y) = dist x y" by blast}
@@ -1148,4 +1148,4 @@
by (simp add: ring_simps)
qed
-end
\ No newline at end of file
+end
--- a/src/HOL/Library/Enum.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Enum.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Library/Enum.thy
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
*)
--- a/src/HOL/Library/Euclidean_Space.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Euclidean_Space.thy Wed Mar 04 10:45:52 2009 +0100
@@ -8,6 +8,7 @@
theory Euclidean_Space
imports "~~/src/HOL/Decision_Procs/Dense_Linear_Order" Complex_Main
Finite_Cartesian_Product Glbs Infinite_Set Numeral_Type
+ Inner_Product
uses ("normarith.ML")
begin
@@ -84,7 +85,13 @@
instance by (intro_classes)
end
-text{* Also the scalar-vector multiplication. FIXME: We should unify this with the scalar multiplication in @{text real_vector} *}
+instantiation "^" :: (scaleR, type) scaleR
+begin
+definition vector_scaleR_def: "scaleR = (\<lambda> r x. (\<chi> i. scaleR r (x$i)))"
+instance ..
+end
+
+text{* Also the scalar-vector multiplication. *}
definition vector_scalar_mult:: "'a::times \<Rightarrow> 'a ^'n \<Rightarrow> 'a ^ 'n" (infixr "*s" 75)
where "c *s x = (\<chi> i. c * (x$i))"
@@ -118,6 +125,7 @@
[@{thm vector_add_def}, @{thm vector_mult_def},
@{thm vector_minus_def}, @{thm vector_uminus_def},
@{thm vector_one_def}, @{thm vector_zero_def}, @{thm vec_def},
+ @{thm vector_scaleR_def},
@{thm Cart_lambda_beta'}, @{thm vector_scalar_mult_def}]
fun vector_arith_tac ths =
simp_tac ss1
@@ -166,9 +174,18 @@
shows "(- x)$i = - (x$i)"
using i by vector
+lemma vector_scaleR_component:
+ fixes x :: "'a::scaleR ^ 'n"
+ assumes i: "i \<in> {1 .. dimindex(UNIV :: 'n set)}"
+ shows "(scaleR r x)$i = scaleR r (x$i)"
+ using i by vector
+
lemma cond_component: "(if b then x else y)$i = (if b then x$i else y$i)" by vector
-lemmas vector_component = vec_component vector_add_component vector_mult_component vector_smult_component vector_minus_component vector_uminus_component cond_component
+lemmas vector_component =
+ vec_component vector_add_component vector_mult_component
+ vector_smult_component vector_minus_component vector_uminus_component
+ vector_scaleR_component cond_component
subsection {* Some frequently useful arithmetic lemmas over vectors. *}
@@ -199,6 +216,9 @@
apply (intro_classes)
by (vector Cart_eq)
+instance "^" :: (real_vector, type) real_vector
+ by default (vector scaleR_left_distrib scaleR_right_distrib)+
+
instance "^" :: (semigroup_mult,type) semigroup_mult
apply (intro_classes) by (vector mult_assoc)
@@ -242,6 +262,18 @@
instance "^" :: (ring,type) ring by (intro_classes)
instance "^" :: (semiring_1_cancel,type) semiring_1_cancel by (intro_classes)
instance "^" :: (comm_semiring_1,type) comm_semiring_1 by (intro_classes)
+
+instance "^" :: (ring_1,type) ring_1 ..
+
+instance "^" :: (real_algebra,type) real_algebra
+ apply intro_classes
+ apply (simp_all add: vector_scaleR_def ring_simps)
+ apply vector
+ apply vector
+ done
+
+instance "^" :: (real_algebra_1,type) real_algebra_1 ..
+
lemma of_nat_index:
"i\<in>{1 .. dimindex (UNIV :: 'n set)} \<Longrightarrow> (of_nat n :: 'a::semiring_1 ^'n)$i = of_nat n"
apply (induct n)
@@ -290,8 +322,7 @@
qed
instance "^" :: (comm_ring_1,type) comm_ring_1 by intro_classes
- (* FIXME!!! Why does the axclass package complain here !!*)
-(* instance "^" :: (ring_char_0,type) ring_char_0 by intro_classes *)
+instance "^" :: (ring_char_0,type) ring_char_0 by intro_classes
lemma vector_smult_assoc: "a *s (b *s x) = ((a::'a::semigroup_mult) * b) *s x"
by (vector mult_assoc)
@@ -314,6 +345,241 @@
apply (auto simp add: vec_def Cart_eq vec_component Cart_lambda_beta )
using dimindex_ge_1 apply auto done
+subsection {* Square root of sum of squares *}
+
+definition
+ "setL2 f A = sqrt (\<Sum>i\<in>A. (f i)\<twosuperior>)"
+
+lemma setL2_cong:
+ "\<lbrakk>A = B; \<And>x. x \<in> B \<Longrightarrow> f x = g x\<rbrakk> \<Longrightarrow> setL2 f A = setL2 g B"
+ unfolding setL2_def by simp
+
+lemma strong_setL2_cong:
+ "\<lbrakk>A = B; \<And>x. x \<in> B =simp=> f x = g x\<rbrakk> \<Longrightarrow> setL2 f A = setL2 g B"
+ unfolding setL2_def simp_implies_def by simp
+
+lemma setL2_infinite [simp]: "\<not> finite A \<Longrightarrow> setL2 f A = 0"
+ unfolding setL2_def by simp
+
+lemma setL2_empty [simp]: "setL2 f {} = 0"
+ unfolding setL2_def by simp
+
+lemma setL2_insert [simp]:
+ "\<lbrakk>finite F; a \<notin> F\<rbrakk> \<Longrightarrow>
+ setL2 f (insert a F) = sqrt ((f a)\<twosuperior> + (setL2 f F)\<twosuperior>)"
+ unfolding setL2_def by (simp add: setsum_nonneg)
+
+lemma setL2_nonneg [simp]: "0 \<le> setL2 f A"
+ unfolding setL2_def by (simp add: setsum_nonneg)
+
+lemma setL2_0': "\<forall>a\<in>A. f a = 0 \<Longrightarrow> setL2 f A = 0"
+ unfolding setL2_def by simp
+
+lemma setL2_mono:
+ assumes "\<And>i. i \<in> K \<Longrightarrow> f i \<le> g i"
+ assumes "\<And>i. i \<in> K \<Longrightarrow> 0 \<le> f i"
+ shows "setL2 f K \<le> setL2 g K"
+ unfolding setL2_def
+ by (simp add: setsum_nonneg setsum_mono power_mono prems)
+
+lemma setL2_right_distrib:
+ "0 \<le> r \<Longrightarrow> r * setL2 f A = setL2 (\<lambda>x. r * f x) A"
+ unfolding setL2_def
+ apply (simp add: power_mult_distrib)
+ apply (simp add: setsum_right_distrib [symmetric])
+ apply (simp add: real_sqrt_mult setsum_nonneg)
+ done
+
+lemma setL2_left_distrib:
+ "0 \<le> r \<Longrightarrow> setL2 f A * r = setL2 (\<lambda>x. f x * r) A"
+ unfolding setL2_def
+ apply (simp add: power_mult_distrib)
+ apply (simp add: setsum_left_distrib [symmetric])
+ apply (simp add: real_sqrt_mult setsum_nonneg)
+ done
+
+lemma setsum_nonneg_eq_0_iff:
+ fixes f :: "'a \<Rightarrow> 'b::pordered_ab_group_add"
+ shows "\<lbrakk>finite A; \<forall>x\<in>A. 0 \<le> f x\<rbrakk> \<Longrightarrow> setsum f A = 0 \<longleftrightarrow> (\<forall>x\<in>A. f x = 0)"
+ apply (induct set: finite, simp)
+ apply (simp add: add_nonneg_eq_0_iff setsum_nonneg)
+ done
+
+lemma setL2_eq_0_iff: "finite A \<Longrightarrow> setL2 f A = 0 \<longleftrightarrow> (\<forall>x\<in>A. f x = 0)"
+ unfolding setL2_def
+ by (simp add: setsum_nonneg setsum_nonneg_eq_0_iff)
+
+lemma setL2_triangle_ineq:
+ shows "setL2 (\<lambda>i. f i + g i) A \<le> setL2 f A + setL2 g A"
+proof (cases "finite A")
+ case False
+ thus ?thesis by simp
+next
+ case True
+ thus ?thesis
+ proof (induct set: finite)
+ case empty
+ show ?case by simp
+ next
+ case (insert x F)
+ hence "sqrt ((f x + g x)\<twosuperior> + (setL2 (\<lambda>i. f i + g i) F)\<twosuperior>) \<le>
+ sqrt ((f x + g x)\<twosuperior> + (setL2 f F + setL2 g F)\<twosuperior>)"
+ by (intro real_sqrt_le_mono add_left_mono power_mono insert
+ setL2_nonneg add_increasing zero_le_power2)
+ also have
+ "\<dots> \<le> sqrt ((f x)\<twosuperior> + (setL2 f F)\<twosuperior>) + sqrt ((g x)\<twosuperior> + (setL2 g F)\<twosuperior>)"
+ by (rule real_sqrt_sum_squares_triangle_ineq)
+ finally show ?case
+ using insert by simp
+ qed
+qed
+
+lemma sqrt_sum_squares_le_sum:
+ "\<lbrakk>0 \<le> x; 0 \<le> y\<rbrakk> \<Longrightarrow> sqrt (x\<twosuperior> + y\<twosuperior>) \<le> x + y"
+ apply (rule power2_le_imp_le)
+ apply (simp add: power2_sum)
+ apply (simp add: mult_nonneg_nonneg)
+ apply (simp add: add_nonneg_nonneg)
+ done
+
+lemma setL2_le_setsum [rule_format]:
+ "(\<forall>i\<in>A. 0 \<le> f i) \<longrightarrow> setL2 f A \<le> setsum f A"
+ apply (cases "finite A")
+ apply (induct set: finite)
+ apply simp
+ apply clarsimp
+ apply (erule order_trans [OF sqrt_sum_squares_le_sum])
+ apply simp
+ apply simp
+ apply simp
+ done
+
+lemma sqrt_sum_squares_le_sum_abs: "sqrt (x\<twosuperior> + y\<twosuperior>) \<le> \<bar>x\<bar> + \<bar>y\<bar>"
+ apply (rule power2_le_imp_le)
+ apply (simp add: power2_sum)
+ apply (simp add: mult_nonneg_nonneg)
+ apply (simp add: add_nonneg_nonneg)
+ done
+
+lemma setL2_le_setsum_abs: "setL2 f A \<le> (\<Sum>i\<in>A. \<bar>f i\<bar>)"
+ apply (cases "finite A")
+ apply (induct set: finite)
+ apply simp
+ apply simp
+ apply (rule order_trans [OF sqrt_sum_squares_le_sum_abs])
+ apply simp
+ apply simp
+ done
+
+lemma setL2_mult_ineq_lemma:
+ fixes a b c d :: real
+ shows "2 * (a * c) * (b * d) \<le> a\<twosuperior> * d\<twosuperior> + b\<twosuperior> * c\<twosuperior>"
+proof -
+ have "0 \<le> (a * d - b * c)\<twosuperior>" by simp
+ also have "\<dots> = a\<twosuperior> * d\<twosuperior> + b\<twosuperior> * c\<twosuperior> - 2 * (a * d) * (b * c)"
+ by (simp only: power2_diff power_mult_distrib)
+ also have "\<dots> = a\<twosuperior> * d\<twosuperior> + b\<twosuperior> * c\<twosuperior> - 2 * (a * c) * (b * d)"
+ by simp
+ finally show "2 * (a * c) * (b * d) \<le> a\<twosuperior> * d\<twosuperior> + b\<twosuperior> * c\<twosuperior>"
+ by simp
+qed
+
+lemma setL2_mult_ineq: "(\<Sum>i\<in>A. \<bar>f i\<bar> * \<bar>g i\<bar>) \<le> setL2 f A * setL2 g A"
+ apply (cases "finite A")
+ apply (induct set: finite)
+ apply simp
+ apply (rule power2_le_imp_le, simp)
+ apply (rule order_trans)
+ apply (rule power_mono)
+ apply (erule add_left_mono)
+ apply (simp add: add_nonneg_nonneg mult_nonneg_nonneg setsum_nonneg)
+ apply (simp add: power2_sum)
+ apply (simp add: power_mult_distrib)
+ apply (simp add: right_distrib left_distrib)
+ apply (rule ord_le_eq_trans)
+ apply (rule setL2_mult_ineq_lemma)
+ apply simp
+ apply (intro mult_nonneg_nonneg setL2_nonneg)
+ apply simp
+ done
+
+lemma member_le_setL2: "\<lbrakk>finite A; i \<in> A\<rbrakk> \<Longrightarrow> f i \<le> setL2 f A"
+ apply (rule_tac s="insert i (A - {i})" and t="A" in subst)
+ apply fast
+ apply (subst setL2_insert)
+ apply simp
+ apply simp
+ apply simp
+ done
+
+subsection {* Norms *}
+
+instantiation "^" :: (real_normed_vector, type) real_normed_vector
+begin
+
+definition vector_norm_def:
+ "norm (x::'a^'b) = setL2 (\<lambda>i. norm (x$i)) {1 .. dimindex (UNIV:: 'b set)}"
+
+definition vector_sgn_def:
+ "sgn (x::'a^'b) = scaleR (inverse (norm x)) x"
+
+instance proof
+ fix a :: real and x y :: "'a ^ 'b"
+ show "0 \<le> norm x"
+ unfolding vector_norm_def
+ by (rule setL2_nonneg)
+ show "norm x = 0 \<longleftrightarrow> x = 0"
+ unfolding vector_norm_def
+ by (simp add: setL2_eq_0_iff Cart_eq)
+ show "norm (x + y) \<le> norm x + norm y"
+ unfolding vector_norm_def
+ apply (rule order_trans [OF _ setL2_triangle_ineq])
+ apply (rule setL2_mono)
+ apply (simp add: vector_component norm_triangle_ineq)
+ apply simp
+ done
+ show "norm (scaleR a x) = \<bar>a\<bar> * norm x"
+ unfolding vector_norm_def
+ by (simp add: vector_component norm_scaleR setL2_right_distrib
+ cong: strong_setL2_cong)
+ show "sgn x = scaleR (inverse (norm x)) x"
+ by (rule vector_sgn_def)
+qed
+
+end
+
+subsection {* Inner products *}
+
+instantiation "^" :: (real_inner, type) real_inner
+begin
+
+definition vector_inner_def:
+ "inner x y = setsum (\<lambda>i. inner (x$i) (y$i)) {1 .. dimindex(UNIV::'b set)}"
+
+instance proof
+ fix r :: real and x y z :: "'a ^ 'b"
+ show "inner x y = inner y x"
+ unfolding vector_inner_def
+ by (simp add: inner_commute)
+ show "inner (x + y) z = inner x z + inner y z"
+ unfolding vector_inner_def
+ by (vector inner_left_distrib)
+ show "inner (scaleR r x) y = r * inner x y"
+ unfolding vector_inner_def
+ by (vector inner_scaleR_left)
+ show "0 \<le> inner x x"
+ unfolding vector_inner_def
+ by (simp add: setsum_nonneg)
+ show "inner x x = 0 \<longleftrightarrow> x = 0"
+ unfolding vector_inner_def
+ by (simp add: Cart_eq setsum_nonneg_eq_0_iff)
+ show "norm x = sqrt (inner x x)"
+ unfolding vector_inner_def vector_norm_def setL2_def
+ by (simp add: power2_norm_eq_inner)
+qed
+
+end
+
subsection{* Properties of the dot product. *}
lemma dot_sym: "(x::'a:: {comm_monoid_add, ab_semigroup_mult} ^ 'n) \<bullet> y = y \<bullet> x"
@@ -363,18 +629,7 @@
lemma dot_pos_lt: "(0 < x \<bullet> x) \<longleftrightarrow> (x::'a::{ordered_ring_strict,ring_no_zero_divisors} ^ 'n) \<noteq> 0" using dot_eq_0[of x] dot_pos_le[of x]
by (auto simp add: le_less)
-subsection {* Introduce norms, but defer many properties till we get square roots. *}
-text{* FIXME : This is ugly *}
-defs (overloaded)
- real_of_real_def [code inline, simp]: "real == id"
-
-instantiation "^" :: ("{times, comm_monoid_add}", type) norm begin
-definition real_vector_norm_def: "norm \<equiv> (\<lambda>x. sqrt (real (x \<bullet> x)))"
-instance ..
-end
-
-
-subsection{* The collapse of the general concepts to dimention one. *}
+subsection{* The collapse of the general concepts to dimension one. *}
lemma vector_one: "(x::'a ^1) = (\<chi> i. (x$1))"
by (vector dimindex_def)
@@ -385,11 +640,15 @@
apply (simp only: vector_one[symmetric])
done
+lemma norm_vector_1: "norm (x :: _^1) = norm (x$1)"
+ by (simp add: vector_norm_def dimindex_def)
+
lemma norm_real: "norm(x::real ^ 1) = abs(x$1)"
- by (simp add: real_vector_norm_def)
+ by (simp add: norm_vector_1)
text{* Metric *}
+text {* FIXME: generalize to arbitrary @{text real_normed_vector} types *}
definition dist:: "real ^ 'n \<Rightarrow> real ^ 'n \<Rightarrow> real" where
"dist x y = norm (x - y)"
@@ -501,27 +760,18 @@
text{* Hence derive more interesting properties of the norm. *}
lemma norm_0: "norm (0::real ^ 'n) = 0"
- by (simp add: real_vector_norm_def dot_eq_0)
-
-lemma norm_pos_le: "0 <= norm (x::real^'n)"
- by (simp add: real_vector_norm_def dot_pos_le)
-lemma norm_neg: " norm(-x) = norm (x:: real ^ 'n)"
- by (simp add: real_vector_norm_def dot_lneg dot_rneg)
-lemma norm_sub: "norm(x - y) = norm(y - (x::real ^ 'n))"
- by (metis norm_neg minus_diff_eq)
+ by (rule norm_zero)
+
lemma norm_mul: "norm(a *s x) = abs(a) * norm x"
- by (simp add: real_vector_norm_def dot_lmult dot_rmult mult_assoc[symmetric] real_sqrt_mult)
+ by (simp add: vector_norm_def vector_component setL2_right_distrib
+ abs_mult cong: strong_setL2_cong)
lemma norm_eq_0_dot: "(norm x = 0) \<longleftrightarrow> (x \<bullet> x = (0::real))"
+ by (simp add: vector_norm_def dot_def setL2_def power2_eq_square)
+lemma real_vector_norm_def: "norm x = sqrt (x \<bullet> x)"
+ by (simp add: vector_norm_def setL2_def dot_def power2_eq_square)
+lemma norm_pow_2: "norm x ^ 2 = x \<bullet> x"
by (simp add: real_vector_norm_def)
-lemma norm_eq_0: "norm x = 0 \<longleftrightarrow> x = (0::real ^ 'n)"
- by (simp add: real_vector_norm_def dot_eq_0)
-lemma norm_pos_lt: "0 < norm x \<longleftrightarrow> x \<noteq> (0::real ^ 'n)"
- by (metis less_le real_vector_norm_def norm_pos_le norm_eq_0)
-lemma norm_pow_2: "norm x ^ 2 = x \<bullet> x"
- by (simp add: real_vector_norm_def dot_pos_le)
-lemma norm_eq_0_imp: "norm x = 0 ==> x = (0::real ^'n)" by (metis norm_eq_0)
-lemma norm_le_0: "norm x <= 0 \<longleftrightarrow> x = (0::real ^'n)"
- by (metis norm_eq_0 norm_pos_le order_antisym)
+lemma norm_eq_0_imp: "norm x = 0 ==> x = (0::real ^'n)" by (metis norm_eq_zero)
lemma vector_mul_eq_0: "(a *s x = 0) \<longleftrightarrow> a = (0::'a::idom) \<or> x = 0"
by vector
lemma vector_mul_lcancel: "a *s x = a *s y \<longleftrightarrow> a = (0::real) \<or> x = y"
@@ -535,14 +785,14 @@
lemma norm_cauchy_schwarz: "x \<bullet> y <= norm x * norm y"
proof-
{assume "norm x = 0"
- hence ?thesis by (simp add: norm_eq_0 dot_lzero dot_rzero norm_0)}
+ hence ?thesis by (simp add: dot_lzero dot_rzero)}
moreover
{assume "norm y = 0"
- hence ?thesis by (simp add: norm_eq_0 dot_lzero dot_rzero norm_0)}
+ hence ?thesis by (simp add: dot_lzero dot_rzero)}
moreover
{assume h: "norm x \<noteq> 0" "norm y \<noteq> 0"
let ?z = "norm y *s x - norm x *s y"
- from h have p: "norm x * norm y > 0" by (metis norm_pos_le le_less zero_compare_simps)
+ from h have p: "norm x * norm y > 0" by (metis norm_ge_zero le_less zero_compare_simps)
from dot_pos_le[of ?z]
have "(norm x * norm y) * (x \<bullet> y) \<le> norm x ^2 * norm y ^2"
apply (simp add: dot_rsub dot_lsub dot_lmult dot_rmult ring_simps)
@@ -553,26 +803,16 @@
ultimately show ?thesis by metis
qed
-lemma norm_abs[simp]: "abs (norm x) = norm (x::real ^'n)"
- using norm_pos_le[of x] by (simp add: real_abs_def linorder_linear)
-
lemma norm_cauchy_schwarz_abs: "\<bar>x \<bullet> y\<bar> \<le> norm x * norm y"
using norm_cauchy_schwarz[of x y] norm_cauchy_schwarz[of x "-y"]
- by (simp add: real_abs_def dot_rneg norm_neg)
-lemma norm_triangle: "norm(x + y) <= norm x + norm (y::real ^'n)"
- unfolding real_vector_norm_def
- apply (rule real_le_lsqrt)
- apply (auto simp add: dot_pos_le real_vector_norm_def[symmetric] norm_pos_le norm_pow_2[symmetric] intro: add_nonneg_nonneg)[1]
- apply (auto simp add: dot_pos_le real_vector_norm_def[symmetric] norm_pos_le norm_pow_2[symmetric] intro: add_nonneg_nonneg)[1]
- apply (simp add: dot_ladd dot_radd dot_sym )
- by (simp add: norm_pow_2[symmetric] power2_eq_square ring_simps norm_cauchy_schwarz)
+ by (simp add: real_abs_def dot_rneg)
lemma norm_triangle_sub: "norm (x::real ^'n) <= norm(y) + norm(x - y)"
- using norm_triangle[of "y" "x - y"] by (simp add: ring_simps)
+ using norm_triangle_ineq[of "y" "x - y"] by (simp add: ring_simps)
lemma norm_triangle_le: "norm(x::real ^'n) + norm y <= e ==> norm(x + y) <= e"
- by (metis order_trans norm_triangle)
+ by (metis order_trans norm_triangle_ineq)
lemma norm_triangle_lt: "norm(x::real ^'n) + norm(y) < e ==> norm(x + y) < e"
- by (metis basic_trans_rules(21) norm_triangle)
+ by (metis basic_trans_rules(21) norm_triangle_ineq)
lemma setsum_delta:
assumes fS: "finite S"
@@ -597,19 +837,10 @@
qed
lemma component_le_norm: "i \<in> {1 .. dimindex(UNIV :: 'n set)} ==> \<bar>x$i\<bar> <= norm (x::real ^ 'n)"
-proof(simp add: real_vector_norm_def, rule real_le_rsqrt, clarsimp)
- assume i: "Suc 0 \<le> i" "i \<le> dimindex (UNIV :: 'n set)"
- let ?S = "{1 .. dimindex(UNIV :: 'n set)}"
- let ?f = "(\<lambda>k. if k = i then x$i ^2 else 0)"
- have fS: "finite ?S" by simp
- from i setsum_delta[OF fS, of i "\<lambda>k. x$i ^ 2"]
- have th: "x$i^2 = setsum ?f ?S" by simp
- let ?g = "\<lambda>k. x$k * x$k"
- {fix x assume x: "x \<in> ?S" have "?f x \<le> ?g x" by (simp add: power2_eq_square)}
- with setsum_mono[of ?S ?f ?g]
- have "setsum ?f ?S \<le> setsum ?g ?S" by blast
- then show "x$i ^2 \<le> x \<bullet> (x:: real ^ 'n)" unfolding dot_def th[symmetric] .
-qed
+ apply (simp add: vector_norm_def)
+ apply (rule member_le_setL2, simp_all)
+ done
+
lemma norm_bound_component_le: "norm(x::real ^ 'n) <= e
==> \<forall>i \<in> {1 .. dimindex(UNIV:: 'n set)}. \<bar>x$i\<bar> <= e"
by (metis component_le_norm order_trans)
@@ -619,24 +850,12 @@
by (metis component_le_norm basic_trans_rules(21))
lemma norm_le_l1: "norm (x:: real ^'n) <= setsum(\<lambda>i. \<bar>x$i\<bar>) {1..dimindex(UNIV::'n set)}"
-proof (simp add: real_vector_norm_def, rule real_le_lsqrt,simp add: dot_pos_le, simp add: setsum_mono, simp add: dot_def, induct "dimindex(UNIV::'n set)")
- case 0 thus ?case by simp
-next
- case (Suc n)
- have th: "2 * (\<bar>x$(Suc n)\<bar> * (\<Sum>i = Suc 0..n. \<bar>x$i\<bar>)) \<ge> 0"
- apply simp
- apply (rule mult_nonneg_nonneg)
- by (simp_all add: setsum_abs_ge_zero)
-
- from Suc
- show ?case using th by (simp add: power2_eq_square ring_simps)
-qed
+ by (simp add: vector_norm_def setL2_le_setsum)
lemma real_abs_norm: "\<bar> norm x\<bar> = norm (x :: real ^'n)"
- by (simp add: norm_pos_le)
+ by (rule abs_norm_cancel)
lemma real_abs_sub_norm: "\<bar>norm(x::real ^'n) - norm y\<bar> <= norm(x - y)"
- apply (simp add: abs_le_iff ring_simps)
- by (metis norm_triangle_sub norm_sub)
+ by (rule norm_triangle_ineq3)
lemma norm_le: "norm(x::real ^ 'n) <= norm(y) \<longleftrightarrow> x \<bullet> x <= y \<bullet> y"
by (simp add: real_vector_norm_def)
lemma norm_lt: "norm(x::real ^'n) < norm(y) \<longleftrightarrow> x \<bullet> x < y \<bullet> y"
@@ -652,13 +871,7 @@
by (simp add: real_vector_norm_def dot_pos_le )
lemma norm_eq_square: "norm(x) = a \<longleftrightarrow> 0 <= a \<and> x \<bullet> x = a^2"
-proof-
- have th: "\<And>x y::real. x^2 = y^2 \<longleftrightarrow> x = y \<or> x = -y" by algebra
- show ?thesis using norm_pos_le[of x]
- apply (simp add: dot_square_norm th)
- apply arith
- done
-qed
+ by (auto simp add: real_vector_norm_def)
lemma real_abs_le_square_iff: "\<bar>x\<bar> \<le> \<bar>y\<bar> \<longleftrightarrow> (x::real)^2 \<le> y^2"
proof-
@@ -668,14 +881,14 @@
qed
lemma norm_le_square: "norm(x) <= a \<longleftrightarrow> 0 <= a \<and> x \<bullet> x <= a^2"
- using norm_pos_le[of x]
apply (simp add: dot_square_norm real_abs_le_square_iff[symmetric])
+ using norm_ge_zero[of x]
apply arith
done
lemma norm_ge_square: "norm(x) >= a \<longleftrightarrow> a <= 0 \<or> x \<bullet> x >= a ^ 2"
- using norm_pos_le[of x]
apply (simp add: dot_square_norm real_abs_le_square_iff[symmetric])
+ using norm_ge_zero[of x]
apply arith
done
@@ -746,14 +959,14 @@
lemma pth_d: "x + (0::real ^'n) == x" by (atomize (full)) vector
lemma norm_imp_pos_and_ge: "norm (x::real ^ 'n) == n \<Longrightarrow> norm x \<ge> 0 \<and> n \<ge> norm x"
- by (atomize) (auto simp add: norm_pos_le)
+ by (atomize) (auto simp add: norm_ge_zero)
lemma real_eq_0_iff_le_ge_0: "(x::real) = 0 == x \<ge> 0 \<and> -x \<ge> 0" by arith
lemma norm_pths:
"(x::real ^'n) = y \<longleftrightarrow> norm (x - y) \<le> 0"
"x \<noteq> y \<longleftrightarrow> \<not> (norm (x - y) \<le> 0)"
- using norm_pos_le[of "x - y"] by (auto simp add: norm_0 norm_eq_0)
+ using norm_ge_zero[of "x - y"] by auto
use "normarith.ML"
@@ -797,11 +1010,6 @@
lemma dist_le_0: "dist x y <= 0 \<longleftrightarrow> x = y" by norm
-instantiation "^" :: (monoid_add,type) monoid_add
-begin
- instance by (intro_classes)
-end
-
lemma setsum_eq: "setsum f S = (\<chi> i. setsum (\<lambda>x. (f x)$i ) S)"
apply vector
apply auto
@@ -873,7 +1081,7 @@
assumes fS: "finite S"
shows "norm (setsum f S) <= setsum (\<lambda>x. norm(f x)) S"
proof(induct rule: finite_induct[OF fS])
- case 1 thus ?case by (simp add: norm_zero)
+ case 1 thus ?case by simp
next
case (2 x S)
from "2.hyps" have "norm (setsum f (insert x S)) \<le> norm (f x) + norm (setsum f S)" by (simp add: norm_triangle_ineq)
@@ -887,10 +1095,10 @@
assumes fS: "finite S"
shows "norm (setsum f S) <= setsum (\<lambda>x. norm(f x)) S"
proof(induct rule: finite_induct[OF fS])
- case 1 thus ?case by simp norm
+ case 1 thus ?case by simp
next
case (2 x S)
- from "2.hyps" have "norm (setsum f (insert x S)) \<le> norm (f x) + norm (setsum f S)" apply (simp add: norm_triangle_ineq) by norm
+ from "2.hyps" have "norm (setsum f (insert x S)) \<le> norm (f x) + norm (setsum f S)" by (simp add: norm_triangle_ineq)
also have "\<dots> \<le> norm (f x) + setsum (\<lambda>x. norm(f x)) S"
using "2.hyps" by simp
finally show ?case using "2.hyps" by simp
@@ -936,45 +1144,6 @@
using real_setsum_norm_le[OF fS K] setsum_constant[symmetric]
by simp
-instantiation "^" :: ("{scaleR, one, times}",type) scaleR
-begin
-
-definition vector_scaleR_def: "(scaleR :: real \<Rightarrow> 'a ^'b \<Rightarrow> 'a ^'b) \<equiv> (\<lambda> c x . (scaleR c 1) *s x)"
-instance ..
-end
-
-instantiation "^" :: ("ring_1",type) ring_1
-begin
-instance by intro_classes
-end
-
-instantiation "^" :: (real_algebra_1,type) real_vector
-begin
-
-instance
- apply intro_classes
- apply (simp_all add: vector_scaleR_def)
- apply (simp_all add: vector_sadd_rdistrib vector_add_ldistrib vector_smult_lid vector_smult_assoc scaleR_left_distrib mult_commute)
- done
-end
-
-instantiation "^" :: (real_algebra_1,type) real_algebra
-begin
-
-instance
- apply intro_classes
- apply (simp_all add: vector_scaleR_def ring_simps)
- apply vector
- apply vector
- done
-end
-
-instantiation "^" :: (real_algebra_1,type) real_algebra_1
-begin
-
-instance ..
-end
-
lemma setsum_vmul:
fixes f :: "'a \<Rightarrow> 'b::{real_normed_vector,semiring, mult_zero}"
assumes fS: "finite S"
@@ -1211,7 +1380,7 @@
by (auto simp add: setsum_component intro: abs_le_D1)
have Pne: "setsum (\<lambda>x. \<bar>f x $ i\<bar>) ?Pn \<le> e"
using i component_le_norm[OF i, of "setsum (\<lambda>x. - f x) ?Pn"] fPs[OF PnP]
- by (auto simp add: setsum_negf norm_neg setsum_component vector_component intro: abs_le_D1)
+ by (auto simp add: setsum_negf setsum_component vector_component intro: abs_le_D1)
have "setsum (\<lambda>x. \<bar>f x $ i\<bar>) P = setsum (\<lambda>x. \<bar>f x $ i\<bar>) ?Pp + setsum (\<lambda>x. \<bar>f x $ i\<bar>) ?Pn"
apply (subst thp)
apply (rule setsum_Un_nonzero)
@@ -1535,7 +1704,7 @@
unfolding norm_mul
apply (simp only: mult_commute)
apply (rule mult_mono)
- by (auto simp add: ring_simps norm_pos_le) }
+ by (auto simp add: ring_simps norm_ge_zero) }
then have th: "\<forall>i\<in> ?S. norm ((x$i) *s f (basis i :: real ^'m)) \<le> norm (f (basis i)) * norm x" by metis
from real_setsum_norm_le[OF fS, of "\<lambda>i. (x$i) *s (f (basis i))", OF th]
have "norm (f x) \<le> ?B * norm x" unfolding th0 setsum_left_distrib by metis}
@@ -1552,16 +1721,18 @@
let ?K = "\<bar>B\<bar> + 1"
have Kp: "?K > 0" by arith
{assume C: "B < 0"
- have "norm (1::real ^ 'n) > 0" by (simp add: norm_pos_lt)
+ have "norm (1::real ^ 'n) > 0" by (simp add: zero_less_norm_iff)
with C have "B * norm (1:: real ^ 'n) < 0"
by (simp add: zero_compare_simps)
- with B[rule_format, of 1] norm_pos_le[of "f 1"] have False by simp
+ with B[rule_format, of 1] norm_ge_zero[of "f 1"] have False by simp
}
then have Bp: "B \<ge> 0" by ferrack
{fix x::"real ^ 'n"
have "norm (f x) \<le> ?K * norm x"
- using B[rule_format, of x] norm_pos_le[of x] norm_pos_le[of "f x"] Bp
- by (auto simp add: ring_simps split add: abs_split)
+ using B[rule_format, of x] norm_ge_zero[of x] norm_ge_zero[of "f x"] Bp
+ apply (auto simp add: ring_simps split add: abs_split)
+ apply (erule order_trans, simp)
+ done
}
then show ?thesis using Kp by blast
qed
@@ -1641,9 +1812,9 @@
apply simp
apply (auto simp add: bilinear_rmul[OF bh] bilinear_lmul[OF bh] norm_mul ring_simps)
apply (rule mult_mono)
- apply (auto simp add: norm_pos_le zero_le_mult_iff component_le_norm)
+ apply (auto simp add: norm_ge_zero zero_le_mult_iff component_le_norm)
apply (rule mult_mono)
- apply (auto simp add: norm_pos_le zero_le_mult_iff component_le_norm)
+ apply (auto simp add: norm_ge_zero zero_le_mult_iff component_le_norm)
done}
then show ?thesis by metis
qed
@@ -1663,7 +1834,7 @@
have "B * norm x * norm y \<le> ?K * norm x * norm y"
apply -
apply (rule mult_right_mono, rule mult_right_mono)
- by (auto simp add: norm_pos_le)
+ by (auto simp add: norm_ge_zero)
then have "norm (h x y) \<le> ?K * norm x * norm y"
using B[rule_format, of x y] by simp}
with Kp show ?thesis by blast
@@ -2276,21 +2447,21 @@
moreover
{assume H: ?lhs
from H[rule_format, of "basis 1"]
- have bp: "b \<ge> 0" using norm_pos_le[of "f (basis 1)"] dimindex_ge_1[of "UNIV:: 'n set"]
- by (auto simp add: norm_basis)
+ have bp: "b \<ge> 0" using norm_ge_zero[of "f (basis 1)"] dimindex_ge_1[of "UNIV:: 'n set"]
+ by (auto simp add: norm_basis elim: order_trans [OF norm_ge_zero])
{fix x :: "real ^'n"
{assume "x = 0"
- then have "norm (f x) \<le> b * norm x" by (simp add: linear_0[OF lf] norm_0 bp)}
+ then have "norm (f x) \<le> b * norm x" by (simp add: linear_0[OF lf] bp)}
moreover
{assume x0: "x \<noteq> 0"
- hence n0: "norm x \<noteq> 0" by (metis norm_eq_0)
+ hence n0: "norm x \<noteq> 0" by (metis norm_eq_zero)
let ?c = "1/ norm x"
- have "norm (?c*s x) = 1" by (simp add: n0 norm_mul)
+ have "norm (?c*s x) = 1" using x0 by (simp add: n0 norm_mul)
with H have "norm (f(?c*s x)) \<le> b" by blast
hence "?c * norm (f x) \<le> b"
by (simp add: linear_cmul[OF lf] norm_mul)
hence "norm (f x) \<le> b * norm x"
- using n0 norm_pos_le[of x] by (auto simp add: field_simps)}
+ using n0 norm_ge_zero[of x] by (auto simp add: field_simps)}
ultimately have "norm (f x) \<le> b * norm x" by blast}
then have ?rhs by blast}
ultimately show ?thesis by blast
@@ -2322,12 +2493,12 @@
qed
lemma onorm_pos_le: assumes lf: "linear (f::real ^'n \<Rightarrow> real ^'m)" shows "0 <= onorm f"
- using order_trans[OF norm_pos_le onorm(1)[OF lf, of "basis 1"], unfolded norm_basis_1] by simp
+ using order_trans[OF norm_ge_zero onorm(1)[OF lf, of "basis 1"], unfolded norm_basis_1] by simp
lemma onorm_eq_0: assumes lf: "linear (f::real ^'n \<Rightarrow> real ^'m)"
shows "onorm f = 0 \<longleftrightarrow> (\<forall>x. f x = 0)"
using onorm[OF lf]
- apply (auto simp add: norm_0 onorm_pos_le norm_le_0)
+ apply (auto simp add: onorm_pos_le)
apply atomize
apply (erule allE[where x="0::real"])
using onorm_pos_le[OF lf]
@@ -2365,7 +2536,7 @@
lemma onorm_neg_lemma: assumes lf: "linear (f::real ^'n \<Rightarrow> real^'m)"
shows "onorm (\<lambda>x. - f x) \<le> onorm f"
using onorm[OF linear_compose_neg[OF lf]] onorm[OF lf]
- unfolding norm_neg by metis
+ unfolding norm_minus_cancel by metis
lemma onorm_neg: assumes lf: "linear (f::real ^'n \<Rightarrow> real^'m)"
shows "onorm (\<lambda>x. - f x) = onorm f"
@@ -2377,7 +2548,7 @@
shows "onorm (\<lambda>x. f x + g x) <= onorm f + onorm g"
apply(rule onorm(2)[OF linear_compose_add[OF lf lg], rule_format])
apply (rule order_trans)
- apply (rule norm_triangle)
+ apply (rule norm_triangle_ineq)
apply (simp add: distrib)
apply (rule add_mono)
apply (rule onorm(1)[OF lf])
@@ -2594,7 +2765,7 @@
by (simp add: dot_def setsum_add_split[OF th_0, of _ ?m] pastecart_def dimindex_finite_sum Cart_lambda_beta setsum_nonneg zero_le_square del: One_nat_def)
then show ?thesis
unfolding th0
- unfolding real_vector_norm_def real_sqrt_le_iff real_of_real_def id_def
+ unfolding real_vector_norm_def real_sqrt_le_iff id_def
by (simp add: dot_def dimindex_finite_sum Cart_lambda_beta)
qed
@@ -2626,7 +2797,7 @@
by (simp add: dot_def setsum_add_split[OF th_0, of _ ?m] pastecart_def dimindex_finite_sum Cart_lambda_beta setsum_nonneg zero_le_square setsum_reindex[OF finj, unfolded fS] del: One_nat_def)
then show ?thesis
unfolding th0
- unfolding real_vector_norm_def real_sqrt_le_iff real_of_real_def id_def
+ unfolding real_vector_norm_def real_sqrt_le_iff id_def
by (simp add: dot_def dimindex_finite_sum Cart_lambda_beta)
qed
@@ -2683,7 +2854,7 @@
qed
lemma norm_pastecart: "norm(pastecart x y) <= norm(x :: real ^ _) + norm(y)"
- unfolding real_vector_norm_def dot_pastecart real_sqrt_le_iff real_of_real_def id_def
+ unfolding real_vector_norm_def dot_pastecart real_sqrt_le_iff id_def
apply (rule power2_le_imp_le)
apply (simp add: real_sqrt_pow2[OF add_nonneg_nonneg[OF dot_pos_le[of x] dot_pos_le[of y]]])
apply (auto simp add: power2_eq_square ring_simps)
@@ -5007,7 +5178,7 @@
apply blast
by (rule abs_ge_zero)
from real_le_lsqrt[OF dot_pos_le th th1]
- show ?thesis unfolding real_vector_norm_def real_of_real_def id_def .
+ show ?thesis unfolding real_vector_norm_def id_def .
qed
(* Equality in Cauchy-Schwarz and triangle inequalities. *)
@@ -5015,10 +5186,10 @@
lemma norm_cauchy_schwarz_eq: "(x::real ^'n) \<bullet> y = norm x * norm y \<longleftrightarrow> norm x *s y = norm y *s x" (is "?lhs \<longleftrightarrow> ?rhs")
proof-
{assume h: "x = 0"
- hence ?thesis by (simp add: norm_0)}
+ hence ?thesis by simp}
moreover
{assume h: "y = 0"
- hence ?thesis by (simp add: norm_0)}
+ hence ?thesis by simp}
moreover
{assume x: "x \<noteq> 0" and y: "y \<noteq> 0"
from dot_eq_0[of "norm y *s x - norm x *s y"]
@@ -5032,7 +5203,7 @@
also have "\<dots> \<longleftrightarrow> (2 * norm x * norm y * (norm x * norm y - x \<bullet> y) = 0)" using x y
by (simp add: ring_simps dot_sym)
also have "\<dots> \<longleftrightarrow> ?lhs" using x y
- apply (simp add: norm_eq_0)
+ apply simp
by metis
finally have ?thesis by blast}
ultimately show ?thesis by blast
@@ -5043,14 +5214,14 @@
proof-
have th: "\<And>(x::real) a. a \<ge> 0 \<Longrightarrow> abs x = a \<longleftrightarrow> x = a \<or> x = - a" by arith
have "?rhs \<longleftrightarrow> norm x *s y = norm y *s x \<or> norm (- x) *s y = norm y *s (- x)"
- apply (simp add: norm_neg) by vector
+ apply simp by vector
also have "\<dots> \<longleftrightarrow>(x \<bullet> y = norm x * norm y \<or>
(-x) \<bullet> y = norm x * norm y)"
unfolding norm_cauchy_schwarz_eq[symmetric]
- unfolding norm_neg
+ unfolding norm_minus_cancel
norm_mul by blast
also have "\<dots> \<longleftrightarrow> ?lhs"
- unfolding th[OF mult_nonneg_nonneg, OF norm_pos_le[of x] norm_pos_le[of y]] dot_lneg
+ unfolding th[OF mult_nonneg_nonneg, OF norm_ge_zero[of x] norm_ge_zero[of y]] dot_lneg
by arith
finally show ?thesis ..
qed
@@ -5058,17 +5229,17 @@
lemma norm_triangle_eq: "norm(x + y) = norm x + norm y \<longleftrightarrow> norm x *s y = norm y *s x"
proof-
{assume x: "x =0 \<or> y =0"
- hence ?thesis by (cases "x=0", simp_all add: norm_0)}
+ hence ?thesis by (cases "x=0", simp_all)}
moreover
{assume x: "x \<noteq> 0" and y: "y \<noteq> 0"
hence "norm x \<noteq> 0" "norm y \<noteq> 0"
- by (simp_all add: norm_eq_0)
+ by simp_all
hence n: "norm x > 0" "norm y > 0"
- using norm_pos_le[of x] norm_pos_le[of y]
+ using norm_ge_zero[of x] norm_ge_zero[of y]
by arith+
have th: "\<And>(a::real) b c. a + b + c \<noteq> 0 ==> (a = b + c \<longleftrightarrow> a^2 = (b + c)^2)" by algebra
have "norm(x + y) = norm x + norm y \<longleftrightarrow> norm(x + y)^ 2 = (norm x + norm y) ^2"
- apply (rule th) using n norm_pos_le[of "x + y"]
+ apply (rule th) using n norm_ge_zero[of "x + y"]
by arith
also have "\<dots> \<longleftrightarrow> norm x *s y = norm y *s x"
unfolding norm_cauchy_schwarz_eq[symmetric]
@@ -5138,8 +5309,8 @@
lemma norm_cauchy_schwarz_equal: "abs(x \<bullet> y) = norm x * norm y \<longleftrightarrow> collinear {(0::real^'n),x,y}"
unfolding norm_cauchy_schwarz_abs_eq
-apply (cases "x=0", simp_all add: collinear_2 norm_0)
-apply (cases "y=0", simp_all add: collinear_2 norm_0 insert_commute)
+apply (cases "x=0", simp_all add: collinear_2)
+apply (cases "y=0", simp_all add: collinear_2 insert_commute)
unfolding collinear_lemma
apply simp
apply (subgoal_tac "norm x \<noteq> 0")
@@ -5164,8 +5335,8 @@
apply (simp add: ring_simps)
apply (case_tac "c <= 0", simp add: ring_simps)
apply (simp add: ring_simps)
-apply (simp add: norm_eq_0)
-apply (simp add: norm_eq_0)
+apply simp
+apply simp
done
-end
\ No newline at end of file
+end
--- a/src/HOL/Library/Float.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Float.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,10 @@
-(* Title: HOL/Library/Float.thy
- * Author: Steven Obua 2008
- * Johannes Hölzl, TU Muenchen <hoelzl@in.tum.de> 2008 / 2009
- *)
+(* Title: HOL/Library/Float.thy
+ Author: Steven Obua 2008
+ Author: Johannes Hoelzl, TU Muenchen <hoelzl@in.tum.de> 2008 / 2009
+*)
+
+header {* Floating-Point Numbers *}
+
theory Float
imports Complex_Main
begin
@@ -792,7 +795,7 @@
have "x \<noteq> y"
proof (rule ccontr)
assume "\<not> x \<noteq> y" hence "x = y" by auto
- have "?X mod y = 0" unfolding `x = y` using zmod_zmult_self2 by auto
+ have "?X mod y = 0" unfolding `x = y` using mod_mult_self1_is_0 by auto
thus False using False by auto
qed
hence "x < y" using `x \<le> y` by auto
@@ -1090,7 +1093,7 @@
{ have "2^(prec - 1) * m \<le> 2^(prec - 1) * 2^?b" using `m < 2^?b`[THEN less_imp_le] by (rule mult_left_mono, auto)
also have "\<dots> = 2 ^ nat (int prec + bitlen m - 1)" unfolding pow_split zpower_zadd_distrib by auto
finally have "2^(prec - 1) * m div m \<le> 2 ^ nat (int prec + bitlen m - 1) div m" using `0 < m` by (rule zdiv_mono1)
- hence "2^(prec - 1) \<le> 2 ^ nat (int prec + bitlen m - 1) div m" unfolding zdiv_zmult_self1[OF `m \<noteq> 0`] .
+ hence "2^(prec - 1) \<le> 2 ^ nat (int prec + bitlen m - 1) div m" unfolding div_mult_self2_is_id[OF `m \<noteq> 0`] .
hence "2^(prec - 1) * inverse (2 ^ nat (int prec + bitlen m - 1)) \<le> ?d"
unfolding real_of_int_le_iff[of "2^(prec - 1)", symmetric] by auto }
from mult_left_mono[OF this[unfolded pow_split power_add inverse_mult_distrib real_mult_assoc[symmetric] right_inverse[OF pow_not0] real_mult_1], of "2^?e"]
--- a/src/HOL/Library/Fundamental_Theorem_Algebra.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Fundamental_Theorem_Algebra.thy Wed Mar 04 10:45:52 2009 +0100
@@ -177,151 +177,6 @@
thus ?thesis by blast
qed
-
-subsection{* Some theorems about Sequences*}
-text{* Given a binary function @{text "f:: nat \<Rightarrow> 'a \<Rightarrow> 'a"}, its values are uniquely determined by a function g *}
-
-lemma num_Axiom: "EX! g. g 0 = e \<and> (\<forall>n. g (Suc n) = f n (g n))"
- unfolding Ex1_def
- apply (rule_tac x="nat_rec e f" in exI)
- apply (rule conjI)+
-apply (rule def_nat_rec_0, simp)
-apply (rule allI, rule def_nat_rec_Suc, simp)
-apply (rule allI, rule impI, rule ext)
-apply (erule conjE)
-apply (induct_tac x)
-apply (simp add: nat_rec_0)
-apply (erule_tac x="n" in allE)
-apply (simp)
-done
-
-text{* for any sequence, there is a mootonic subsequence *}
-lemma seq_monosub: "\<exists>f. subseq f \<and> monoseq (\<lambda> n. (s (f n)))"
-proof-
- {assume H: "\<forall>n. \<exists>p >n. \<forall> m\<ge>p. s m \<le> s p"
- let ?P = "\<lambda> p n. p > n \<and> (\<forall>m \<ge> p. s m \<le> s p)"
- from num_Axiom[of "SOME p. ?P p 0" "\<lambda>p n. SOME p. ?P p n"]
- obtain f where f: "f 0 = (SOME p. ?P p 0)" "\<forall>n. f (Suc n) = (SOME p. ?P p (f n))" by blast
- have "?P (f 0) 0" unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p 0"]
- using H apply -
- apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI)
- unfolding order_le_less by blast
- hence f0: "f 0 > 0" "\<forall>m \<ge> f 0. s m \<le> s (f 0)" by blast+
- {fix n
- have "?P (f (Suc n)) (f n)"
- unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
- using H apply -
- apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI)
- unfolding order_le_less by blast
- hence "f (Suc n) > f n" "\<forall>m \<ge> f (Suc n). s m \<le> s (f (Suc n))" by blast+}
- note fSuc = this
- {fix p q assume pq: "p \<ge> f q"
- have "s p \<le> s(f(q))" using f0(2)[rule_format, of p] pq fSuc
- by (cases q, simp_all) }
- note pqth = this
- {fix q
- have "f (Suc q) > f q" apply (induct q)
- using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))}
- note fss = this
- from fss have th1: "subseq f" unfolding subseq_Suc_iff ..
- {fix a b
- have "f a \<le> f (a + b)"
- proof(induct b)
- case 0 thus ?case by simp
- next
- case (Suc b)
- from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp
- qed}
- note fmon0 = this
- have "monoseq (\<lambda>n. s (f n))"
- proof-
- {fix n
- have "s (f n) \<ge> s (f (Suc n))"
- proof(cases n)
- case 0
- assume n0: "n = 0"
- from fSuc(1)[of 0] have th0: "f 0 \<le> f (Suc 0)" by simp
- from f0(2)[rule_format, OF th0] show ?thesis using n0 by simp
- next
- case (Suc m)
- assume m: "n = Suc m"
- from fSuc(1)[of n] m have th0: "f (Suc m) \<le> f (Suc (Suc m))" by simp
- from m fSuc(2)[rule_format, OF th0] show ?thesis by simp
- qed}
- thus "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc by blast
- qed
- with th1 have ?thesis by blast}
- moreover
- {fix N assume N: "\<forall>p >N. \<exists> m\<ge>p. s m > s p"
- {fix p assume p: "p \<ge> Suc N"
- hence pN: "p > N" by arith with N obtain m where m: "m \<ge> p" "s m > s p" by blast
- have "m \<noteq> p" using m(2) by auto
- with m have "\<exists>m>p. s p < s m" by - (rule exI[where x=m], auto)}
- note th0 = this
- let ?P = "\<lambda>m x. m > x \<and> s x < s m"
- from num_Axiom[of "SOME x. ?P x (Suc N)" "\<lambda>m x. SOME y. ?P y x"]
- obtain f where f: "f 0 = (SOME x. ?P x (Suc N))"
- "\<forall>n. f (Suc n) = (SOME m. ?P m (f n))" by blast
- have "?P (f 0) (Suc N)" unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p (Suc N)"]
- using N apply -
- apply (erule allE[where x="Suc N"], clarsimp)
- apply (rule_tac x="m" in exI)
- apply auto
- apply (subgoal_tac "Suc N \<noteq> m")
- apply simp
- apply (rule ccontr, simp)
- done
- hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+
- {fix n
- have "f n > N \<and> ?P (f (Suc n)) (f n)"
- unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
- proof (induct n)
- case 0 thus ?case
- using f0 N apply auto
- apply (erule allE[where x="f 0"], clarsimp)
- apply (rule_tac x="m" in exI, simp)
- by (subgoal_tac "f 0 \<noteq> m", auto)
- next
- case (Suc n)
- from Suc.hyps have Nfn: "N < f n" by blast
- from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast
- with Nfn have mN: "m > N" by arith
- note key = Suc.hyps[unfolded some_eq_ex[of "\<lambda>p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]]
-
- from key have th0: "f (Suc n) > N" by simp
- from N[rule_format, OF th0]
- obtain m' where m': "m' \<ge> f (Suc n)" "s (f (Suc n)) < s m'" by blast
- have "m' \<noteq> f (Suc (n))" apply (rule ccontr) using m'(2) by auto
- hence "m' > f (Suc n)" using m'(1) by simp
- with key m'(2) show ?case by auto
- qed}
- note fSuc = this
- {fix n
- have "f n \<ge> Suc N \<and> f(Suc n) > f n \<and> s(f n) < s(f(Suc n))" using fSuc[of n] by auto
- hence "f n \<ge> Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+}
- note thf = this
- have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp
- have "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc using thf
- apply -
- apply (rule disjI1)
- apply auto
- apply (rule order_less_imp_le)
- apply blast
- done
- then have ?thesis using sqf by blast}
- ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast
-qed
-
-lemma seq_suble: assumes sf: "subseq f" shows "n \<le> f n"
-proof(induct n)
- case 0 thus ?case by simp
-next
- case (Suc n)
- from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps
- have "n < f (Suc n)" by arith
- thus ?case by arith
-qed
-
subsection {* Fundamental theorem of algebra *}
lemma unimodular_reduce_norm:
assumes md: "cmod z = 1"
@@ -407,7 +262,6 @@
ultimately show "\<exists>z. ?P z n" by blast
qed
-
text{* Bolzano-Weierstrass type property for closed disc in complex plane. *}
lemma metric_bound_lemma: "cmod (x - y) <= \<bar>Re x - Re y\<bar> + \<bar>Im x - Im y\<bar>"
@@ -946,90 +800,6 @@
ultimately show ?case by blast
qed simp
-subsection {* Order of polynomial roots *}
-
-definition
- order :: "'a::{idom,recpower} \<Rightarrow> 'a poly \<Rightarrow> nat"
-where
- [code del]:
- "order a p = (LEAST n. \<not> [:-a, 1:] ^ Suc n dvd p)"
-
-lemma degree_power_le: "degree (p ^ n) \<le> degree p * n"
-by (induct n, simp, auto intro: order_trans degree_mult_le)
-
-lemma coeff_linear_power:
- fixes a :: "'a::{comm_semiring_1,recpower}"
- shows "coeff ([:a, 1:] ^ n) n = 1"
-apply (induct n, simp_all)
-apply (subst coeff_eq_0)
-apply (auto intro: le_less_trans degree_power_le)
-done
-
-lemma degree_linear_power:
- fixes a :: "'a::{comm_semiring_1,recpower}"
- shows "degree ([:a, 1:] ^ n) = n"
-apply (rule order_antisym)
-apply (rule ord_le_eq_trans [OF degree_power_le], simp)
-apply (rule le_degree, simp add: coeff_linear_power)
-done
-
-lemma order_1: "[:-a, 1:] ^ order a p dvd p"
-apply (cases "p = 0", simp)
-apply (cases "order a p", simp)
-apply (subgoal_tac "nat < (LEAST n. \<not> [:-a, 1:] ^ Suc n dvd p)")
-apply (drule not_less_Least, simp)
-apply (fold order_def, simp)
-done
-
-lemma order_2: "p \<noteq> 0 \<Longrightarrow> \<not> [:-a, 1:] ^ Suc (order a p) dvd p"
-unfolding order_def
-apply (rule LeastI_ex)
-apply (rule_tac x="degree p" in exI)
-apply (rule notI)
-apply (drule (1) dvd_imp_degree_le)
-apply (simp only: degree_linear_power)
-done
-
-lemma order:
- "p \<noteq> 0 \<Longrightarrow> [:-a, 1:] ^ order a p dvd p \<and> \<not> [:-a, 1:] ^ Suc (order a p) dvd p"
-by (rule conjI [OF order_1 order_2])
-
-lemma order_degree:
- assumes p: "p \<noteq> 0"
- shows "order a p \<le> degree p"
-proof -
- have "order a p = degree ([:-a, 1:] ^ order a p)"
- by (simp only: degree_linear_power)
- also have "\<dots> \<le> degree p"
- using order_1 p by (rule dvd_imp_degree_le)
- finally show ?thesis .
-qed
-
-lemma order_root: "poly p a = 0 \<longleftrightarrow> p = 0 \<or> order a p \<noteq> 0"
-apply (cases "p = 0", simp_all)
-apply (rule iffI)
-apply (rule ccontr, simp)
-apply (frule order_2 [where a=a], simp)
-apply (simp add: poly_eq_0_iff_dvd)
-apply (simp add: poly_eq_0_iff_dvd)
-apply (simp only: order_def)
-apply (drule not_less_Least, simp)
-done
-
-lemma poly_zero:
- fixes p :: "'a::{idom,ring_char_0} poly"
- shows "poly p = poly 0 \<longleftrightarrow> p = 0"
-apply (cases "p = 0", simp_all)
-apply (drule poly_roots_finite)
-apply (auto simp add: infinite_UNIV_char_0)
-done
-
-lemma poly_eq_iff:
- fixes p q :: "'a::{idom,ring_char_0} poly"
- shows "poly p = poly q \<longleftrightarrow> p = q"
- using poly_zero [of "p - q"]
- by (simp add: expand_fun_eq)
-
subsection{* Nullstellenstatz, degrees and divisibility of polynomials *}
--- a/src/HOL/Library/Library.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Library.thy Wed Mar 04 10:45:52 2009 +0100
@@ -5,6 +5,7 @@
AssocList
BigO
Binomial
+ Bit
Boolean_Algebra
Char_ord
Code_Char_chr
@@ -22,9 +23,11 @@
Executable_Set
Float
Formal_Power_Series
+ FrechetDeriv
FuncSet
Fundamental_Theorem_Algebra
Infinite_Set
+ Inner_Product
ListVector
Mapping
Multiset
@@ -35,7 +38,10 @@
Option_ord
Permutation
Pocklington
+ Poly_Deriv
+ Polynomial
Primes
+ Product_Vector
Quickcheck
Quicksort
Quotient
--- a/src/HOL/Library/Numeral_Type.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Numeral_Type.thy Wed Mar 04 10:45:52 2009 +0100
@@ -42,36 +42,87 @@
end
*}
-lemma card_unit: "CARD(unit) = 1"
+lemma card_unit [simp]: "CARD(unit) = 1"
unfolding UNIV_unit by simp
-lemma card_bool: "CARD(bool) = 2"
+lemma card_bool [simp]: "CARD(bool) = 2"
unfolding UNIV_bool by simp
-lemma card_prod: "CARD('a::finite \<times> 'b::finite) = CARD('a) * CARD('b)"
+lemma card_prod [simp]: "CARD('a \<times> 'b) = CARD('a::finite) * CARD('b::finite)"
unfolding UNIV_Times_UNIV [symmetric] by (simp only: card_cartesian_product)
-lemma card_sum: "CARD('a::finite + 'b::finite) = CARD('a) + CARD('b)"
+lemma card_sum [simp]: "CARD('a + 'b) = CARD('a::finite) + CARD('b::finite)"
unfolding UNIV_Plus_UNIV [symmetric] by (simp only: finite card_Plus)
-lemma card_option: "CARD('a::finite option) = Suc CARD('a)"
+lemma card_option [simp]: "CARD('a option) = Suc CARD('a::finite)"
unfolding insert_None_conv_UNIV [symmetric]
apply (subgoal_tac "(None::'a option) \<notin> range Some")
- apply (simp add: finite card_image)
+ apply (simp add: card_image)
apply fast
done
-lemma card_set: "CARD('a::finite set) = 2 ^ CARD('a)"
+lemma card_set [simp]: "CARD('a set) = 2 ^ CARD('a::finite)"
unfolding Pow_UNIV [symmetric]
by (simp only: card_Pow finite numeral_2_eq_2)
+lemma card_nat [simp]: "CARD(nat) = 0"
+ by (simp add: infinite_UNIV_nat card_eq_0_iff)
+
+
+subsection {* Classes with at least 1 and 2 *}
+
+text {* Class finite already captures "at least 1" *}
+
+lemma zero_less_card_finite [simp]: "0 < CARD('a::finite)"
+ unfolding neq0_conv [symmetric] by simp
+
+lemma one_le_card_finite [simp]: "Suc 0 \<le> CARD('a::finite)"
+ by (simp add: less_Suc_eq_le [symmetric])
+
+text {* Class for cardinality "at least 2" *}
+
+class card2 = finite +
+ assumes two_le_card: "2 \<le> CARD('a)"
+
+lemma one_less_card: "Suc 0 < CARD('a::card2)"
+ using two_le_card [where 'a='a] by simp
+
+lemma one_less_int_card: "1 < int CARD('a::card2)"
+ using one_less_card [where 'a='a] by simp
+
subsection {* Numeral Types *}
typedef (open) num0 = "UNIV :: nat set" ..
typedef (open) num1 = "UNIV :: unit set" ..
-typedef (open) 'a bit0 = "UNIV :: (bool * 'a) set" ..
-typedef (open) 'a bit1 = "UNIV :: (bool * 'a) option set" ..
+
+typedef (open) 'a bit0 = "{0 ..< 2 * int CARD('a::finite)}"
+proof
+ show "0 \<in> {0 ..< 2 * int CARD('a)}"
+ by simp
+qed
+
+typedef (open) 'a bit1 = "{0 ..< 1 + 2 * int CARD('a::finite)}"
+proof
+ show "0 \<in> {0 ..< 1 + 2 * int CARD('a)}"
+ by simp
+qed
+
+lemma card_num0 [simp]: "CARD (num0) = 0"
+ unfolding type_definition.card [OF type_definition_num0]
+ by simp
+
+lemma card_num1 [simp]: "CARD(num1) = 1"
+ unfolding type_definition.card [OF type_definition_num1]
+ by (simp only: card_unit)
+
+lemma card_bit0 [simp]: "CARD('a bit0) = 2 * CARD('a::finite)"
+ unfolding type_definition.card [OF type_definition_bit0]
+ by simp
+
+lemma card_bit1 [simp]: "CARD('a bit1) = Suc (2 * CARD('a::finite))"
+ unfolding type_definition.card [OF type_definition_bit1]
+ by simp
instance num1 :: finite
proof
@@ -80,46 +131,263 @@
using finite by (rule finite_imageI)
qed
-instance bit0 :: (finite) finite
+instance bit0 :: (finite) card2
proof
show "finite (UNIV::'a bit0 set)"
unfolding type_definition.univ [OF type_definition_bit0]
- using finite by (rule finite_imageI)
+ by simp
+ show "2 \<le> CARD('a bit0)"
+ by simp
qed
-instance bit1 :: (finite) finite
+instance bit1 :: (finite) card2
proof
show "finite (UNIV::'a bit1 set)"
unfolding type_definition.univ [OF type_definition_bit1]
- using finite by (rule finite_imageI)
+ by simp
+ show "2 \<le> CARD('a bit1)"
+ by simp
qed
-lemma card_num1: "CARD(num1) = 1"
- unfolding type_definition.card [OF type_definition_num1]
- by (simp only: card_unit)
+
+subsection {* Locale for modular arithmetic subtypes *}
+
+locale mod_type =
+ fixes n :: int
+ and Rep :: "'a::{zero,one,plus,times,uminus,minus,power} \<Rightarrow> int"
+ and Abs :: "int \<Rightarrow> 'a::{zero,one,plus,times,uminus,minus,power}"
+ assumes type: "type_definition Rep Abs {0..<n}"
+ and size1: "1 < n"
+ and zero_def: "0 = Abs 0"
+ and one_def: "1 = Abs 1"
+ and add_def: "x + y = Abs ((Rep x + Rep y) mod n)"
+ and mult_def: "x * y = Abs ((Rep x * Rep y) mod n)"
+ and diff_def: "x - y = Abs ((Rep x - Rep y) mod n)"
+ and minus_def: "- x = Abs ((- Rep x) mod n)"
+ and power_def: "x ^ k = Abs (Rep x ^ k mod n)"
+begin
+
+lemma size0: "0 < n"
+by (cut_tac size1, simp)
+
+lemmas definitions =
+ zero_def one_def add_def mult_def minus_def diff_def power_def
+
+lemma Rep_less_n: "Rep x < n"
+by (rule type_definition.Rep [OF type, simplified, THEN conjunct2])
+
+lemma Rep_le_n: "Rep x \<le> n"
+by (rule Rep_less_n [THEN order_less_imp_le])
+
+lemma Rep_inject_sym: "x = y \<longleftrightarrow> Rep x = Rep y"
+by (rule type_definition.Rep_inject [OF type, symmetric])
+
+lemma Rep_inverse: "Abs (Rep x) = x"
+by (rule type_definition.Rep_inverse [OF type])
+
+lemma Abs_inverse: "m \<in> {0..<n} \<Longrightarrow> Rep (Abs m) = m"
+by (rule type_definition.Abs_inverse [OF type])
+
+lemma Rep_Abs_mod: "Rep (Abs (m mod n)) = m mod n"
+by (simp add: Abs_inverse IntDiv.pos_mod_conj [OF size0])
+
+lemma Rep_Abs_0: "Rep (Abs 0) = 0"
+by (simp add: Abs_inverse size0)
+
+lemma Rep_0: "Rep 0 = 0"
+by (simp add: zero_def Rep_Abs_0)
+
+lemma Rep_Abs_1: "Rep (Abs 1) = 1"
+by (simp add: Abs_inverse size1)
+
+lemma Rep_1: "Rep 1 = 1"
+by (simp add: one_def Rep_Abs_1)
-lemma card_bit0: "CARD('a::finite bit0) = 2 * CARD('a)"
- unfolding type_definition.card [OF type_definition_bit0]
- by (simp only: card_prod card_bool)
+lemma Rep_mod: "Rep x mod n = Rep x"
+apply (rule_tac x=x in type_definition.Abs_cases [OF type])
+apply (simp add: type_definition.Abs_inverse [OF type])
+apply (simp add: mod_pos_pos_trivial)
+done
+
+lemmas Rep_simps =
+ Rep_inject_sym Rep_inverse Rep_Abs_mod Rep_mod Rep_Abs_0 Rep_Abs_1
+
+lemma comm_ring_1: "OFCLASS('a, comm_ring_1_class)"
+apply (intro_classes, unfold definitions)
+apply (simp_all add: Rep_simps zmod_simps ring_simps)
+done
+
+lemma recpower: "OFCLASS('a, recpower_class)"
+apply (intro_classes, unfold definitions)
+apply (simp_all add: Rep_simps zmod_simps add_ac mult_assoc
+ mod_pos_pos_trivial size1)
+done
+
+end
+
+locale mod_ring = mod_type +
+ constrains n :: int
+ and Rep :: "'a::{number_ring,power} \<Rightarrow> int"
+ and Abs :: "int \<Rightarrow> 'a::{number_ring,power}"
+begin
-lemma card_bit1: "CARD('a::finite bit1) = Suc (2 * CARD('a))"
- unfolding type_definition.card [OF type_definition_bit1]
- by (simp only: card_prod card_option card_bool)
+lemma of_nat_eq: "of_nat k = Abs (int k mod n)"
+apply (induct k)
+apply (simp add: zero_def)
+apply (simp add: Rep_simps add_def one_def zmod_simps add_ac)
+done
+
+lemma of_int_eq: "of_int z = Abs (z mod n)"
+apply (cases z rule: int_diff_cases)
+apply (simp add: Rep_simps of_nat_eq diff_def zmod_simps)
+done
+
+lemma Rep_number_of:
+ "Rep (number_of w) = number_of w mod n"
+by (simp add: number_of_eq of_int_eq Rep_Abs_mod)
+
+lemma iszero_number_of:
+ "iszero (number_of w::'a) \<longleftrightarrow> number_of w mod n = 0"
+by (simp add: Rep_simps number_of_eq of_int_eq iszero_def zero_def)
+
+lemma cases:
+ assumes 1: "\<And>z. \<lbrakk>(x::'a) = of_int z; 0 \<le> z; z < n\<rbrakk> \<Longrightarrow> P"
+ shows "P"
+apply (cases x rule: type_definition.Abs_cases [OF type])
+apply (rule_tac z="y" in 1)
+apply (simp_all add: of_int_eq mod_pos_pos_trivial)
+done
+
+lemma induct:
+ "(\<And>z. \<lbrakk>0 \<le> z; z < n\<rbrakk> \<Longrightarrow> P (of_int z)) \<Longrightarrow> P (x::'a)"
+by (cases x rule: cases) simp
+
+end
+
+
+subsection {* Number ring instances *}
-lemma card_num0: "CARD (num0) = 0"
- by (simp add: infinite_UNIV_nat card_eq_0_iff type_definition.card [OF type_definition_num0])
+text {*
+ Unfortunately a number ring instance is not possible for
+ @{typ num1}, since 0 and 1 are not distinct.
+*}
+
+instantiation num1 :: "{comm_ring,comm_monoid_mult,number,recpower}"
+begin
+
+lemma num1_eq_iff: "(x::num1) = (y::num1) \<longleftrightarrow> True"
+ by (induct x, induct y) simp
+
+instance proof
+qed (simp_all add: num1_eq_iff)
+
+end
+
+instantiation
+ bit0 and bit1 :: (finite) "{zero,one,plus,times,uminus,minus,power}"
+begin
+
+definition Abs_bit0' :: "int \<Rightarrow> 'a bit0" where
+ "Abs_bit0' x = Abs_bit0 (x mod int CARD('a bit0))"
+
+definition Abs_bit1' :: "int \<Rightarrow> 'a bit1" where
+ "Abs_bit1' x = Abs_bit1 (x mod int CARD('a bit1))"
+
+definition "0 = Abs_bit0 0"
+definition "1 = Abs_bit0 1"
+definition "x + y = Abs_bit0' (Rep_bit0 x + Rep_bit0 y)"
+definition "x * y = Abs_bit0' (Rep_bit0 x * Rep_bit0 y)"
+definition "x - y = Abs_bit0' (Rep_bit0 x - Rep_bit0 y)"
+definition "- x = Abs_bit0' (- Rep_bit0 x)"
+definition "x ^ k = Abs_bit0' (Rep_bit0 x ^ k)"
+
+definition "0 = Abs_bit1 0"
+definition "1 = Abs_bit1 1"
+definition "x + y = Abs_bit1' (Rep_bit1 x + Rep_bit1 y)"
+definition "x * y = Abs_bit1' (Rep_bit1 x * Rep_bit1 y)"
+definition "x - y = Abs_bit1' (Rep_bit1 x - Rep_bit1 y)"
+definition "- x = Abs_bit1' (- Rep_bit1 x)"
+definition "x ^ k = Abs_bit1' (Rep_bit1 x ^ k)"
+
+instance ..
+
+end
-lemmas card_univ_simps [simp] =
- card_unit
- card_bool
- card_prod
- card_sum
- card_option
- card_set
- card_num1
- card_bit0
- card_bit1
- card_num0
+interpretation bit0!:
+ mod_type "int CARD('a::finite bit0)"
+ "Rep_bit0 :: 'a::finite bit0 \<Rightarrow> int"
+ "Abs_bit0 :: int \<Rightarrow> 'a::finite bit0"
+apply (rule mod_type.intro)
+apply (simp add: int_mult type_definition_bit0)
+apply (rule one_less_int_card)
+apply (rule zero_bit0_def)
+apply (rule one_bit0_def)
+apply (rule plus_bit0_def [unfolded Abs_bit0'_def])
+apply (rule times_bit0_def [unfolded Abs_bit0'_def])
+apply (rule minus_bit0_def [unfolded Abs_bit0'_def])
+apply (rule uminus_bit0_def [unfolded Abs_bit0'_def])
+apply (rule power_bit0_def [unfolded Abs_bit0'_def])
+done
+
+interpretation bit1!:
+ mod_type "int CARD('a::finite bit1)"
+ "Rep_bit1 :: 'a::finite bit1 \<Rightarrow> int"
+ "Abs_bit1 :: int \<Rightarrow> 'a::finite bit1"
+apply (rule mod_type.intro)
+apply (simp add: int_mult type_definition_bit1)
+apply (rule one_less_int_card)
+apply (rule zero_bit1_def)
+apply (rule one_bit1_def)
+apply (rule plus_bit1_def [unfolded Abs_bit1'_def])
+apply (rule times_bit1_def [unfolded Abs_bit1'_def])
+apply (rule minus_bit1_def [unfolded Abs_bit1'_def])
+apply (rule uminus_bit1_def [unfolded Abs_bit1'_def])
+apply (rule power_bit1_def [unfolded Abs_bit1'_def])
+done
+
+instance bit0 :: (finite) "{comm_ring_1,recpower}"
+ by (rule bit0.comm_ring_1 bit0.recpower)+
+
+instance bit1 :: (finite) "{comm_ring_1,recpower}"
+ by (rule bit1.comm_ring_1 bit1.recpower)+
+
+instantiation bit0 and bit1 :: (finite) number_ring
+begin
+
+definition "(number_of w :: _ bit0) = of_int w"
+
+definition "(number_of w :: _ bit1) = of_int w"
+
+instance proof
+qed (rule number_of_bit0_def number_of_bit1_def)+
+
+end
+
+interpretation bit0!:
+ mod_ring "int CARD('a::finite bit0)"
+ "Rep_bit0 :: 'a::finite bit0 \<Rightarrow> int"
+ "Abs_bit0 :: int \<Rightarrow> 'a::finite bit0"
+ ..
+
+interpretation bit1!:
+ mod_ring "int CARD('a::finite bit1)"
+ "Rep_bit1 :: 'a::finite bit1 \<Rightarrow> int"
+ "Abs_bit1 :: int \<Rightarrow> 'a::finite bit1"
+ ..
+
+text {* Set up cases, induction, and arithmetic *}
+
+lemmas bit0_cases [case_names of_int, cases type: bit0] = bit0.cases
+lemmas bit1_cases [case_names of_int, cases type: bit1] = bit1.cases
+
+lemmas bit0_induct [case_names of_int, induct type: bit0] = bit0.induct
+lemmas bit1_induct [case_names of_int, induct type: bit1] = bit1.induct
+
+lemmas bit0_iszero_number_of [simp] = bit0.iszero_number_of
+lemmas bit1_iszero_number_of [simp] = bit1.iszero_number_of
+
+declare power_Suc [where ?'a="'a::finite bit0", standard, simp]
+declare power_Suc [where ?'a="'a::finite bit1", standard, simp]
subsection {* Syntax *}
@@ -184,42 +452,10 @@
in [("bit0", bit_tr' 0), ("bit1", bit_tr' 1)] end;
*}
-
-subsection {* Classes with at least 1 and 2 *}
-
-text {* Class finite already captures "at least 1" *}
-
-lemma zero_less_card_finite [simp]:
- "0 < CARD('a::finite)"
-proof (cases "CARD('a::finite) = 0")
- case False thus ?thesis by (simp del: card_0_eq)
-next
- case True
- thus ?thesis by (simp add: finite)
-qed
-
-lemma one_le_card_finite [simp]:
- "Suc 0 <= CARD('a::finite)"
- by (simp add: less_Suc_eq_le [symmetric] zero_less_card_finite)
-
-
-text {* Class for cardinality "at least 2" *}
-
-class card2 = finite +
- assumes two_le_card: "2 <= CARD('a)"
-
-lemma one_less_card: "Suc 0 < CARD('a::card2)"
- using two_le_card [where 'a='a] by simp
-
-instance bit0 :: (finite) card2
- by intro_classes (simp add: one_le_card_finite)
-
-instance bit1 :: (finite) card2
- by intro_classes (simp add: one_le_card_finite)
-
subsection {* Examples *}
lemma "CARD(0) = 0" by simp
lemma "CARD(17) = 17" by simp
+lemma "8 * 11 ^ 3 - 6 = (2::5)" by simp
end
--- a/src/HOL/Library/Order_Relation.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Order_Relation.thy Wed Mar 04 10:45:52 2009 +0100
@@ -10,7 +10,7 @@
subsection{* Orders on a set *}
-definition "preorder_on A r \<equiv> refl A r \<and> trans r"
+definition "preorder_on A r \<equiv> refl_on A r \<and> trans r"
definition "partial_order_on A r \<equiv> preorder_on A r \<and> antisym r"
@@ -57,7 +57,7 @@
subsection{* Orders on the field *}
-abbreviation "Refl r \<equiv> refl (Field r) r"
+abbreviation "Refl r \<equiv> refl_on (Field r) r"
abbreviation "Preorder r \<equiv> preorder_on (Field r) r"
@@ -73,7 +73,7 @@
lemma subset_Image_Image_iff:
"\<lbrakk> Preorder r; A \<subseteq> Field r; B \<subseteq> Field r\<rbrakk> \<Longrightarrow>
r `` A \<subseteq> r `` B \<longleftrightarrow> (\<forall>a\<in>A.\<exists>b\<in>B. (b,a):r)"
-apply(auto simp add: subset_eq preorder_on_def refl_def Image_def)
+apply(auto simp add: subset_eq preorder_on_def refl_on_def Image_def)
apply metis
by(metis trans_def)
@@ -83,7 +83,7 @@
lemma Refl_antisym_eq_Image1_Image1_iff:
"\<lbrakk>Refl r; antisym r; a:Field r; b:Field r\<rbrakk> \<Longrightarrow> r `` {a} = r `` {b} \<longleftrightarrow> a=b"
-by(simp add: expand_set_eq antisym_def refl_def) metis
+by(simp add: expand_set_eq antisym_def refl_on_def) metis
lemma Partial_order_eq_Image1_Image1_iff:
"\<lbrakk>Partial_order r; a:Field r; b:Field r\<rbrakk> \<Longrightarrow> r `` {a} = r `` {b} \<longleftrightarrow> a=b"
--- a/src/HOL/Library/Permutations.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Permutations.thy Wed Mar 04 10:45:52 2009 +0100
@@ -6,7 +6,7 @@
header {* Permutations, both general and specifically on finite sets.*}
theory Permutations
-imports Main Finite_Cartesian_Product Parity
+imports Main Finite_Cartesian_Product Parity Fact
begin
(* Why should I import Main just to solve the Typerep problem! *)
@@ -683,13 +683,13 @@
(* ------------------------------------------------------------------------- *)
lemma permutes_natset_le:
- assumes p: "p permutes (S:: nat set)" and le: "\<forall>i \<in> S. p i <= i" shows "p = id"
+ assumes p: "p permutes (S::'a::wellorder set)" and le: "\<forall>i \<in> S. p i <= i" shows "p = id"
proof-
{fix n
have "p n = n"
using p le
- proof(induct n arbitrary: S rule: nat_less_induct)
- fix n S assume H: "\<forall> m< n. \<forall>S. p permutes S \<longrightarrow> (\<forall>i\<in>S. p i \<le> i) \<longrightarrow> p m = m"
+ proof(induct n arbitrary: S rule: less_induct)
+ fix n S assume H: "\<And>m S. \<lbrakk>m < n; p permutes S; \<forall>i\<in>S. p i \<le> i\<rbrakk> \<Longrightarrow> p m = m"
"p permutes S" "\<forall>i \<in>S. p i \<le> i"
{assume "n \<notin> S"
with H(2) have "p n = n" unfolding permutes_def by metis}
@@ -699,7 +699,7 @@
moreover{assume h: "p n < n"
from H h have "p (p n) = p n" by metis
with permutes_inj[OF H(2)] have "p n = n" unfolding inj_on_def by blast
- with h have False by arith}
+ with h have False by simp}
ultimately have "p n = n" by blast }
ultimately show "p n = n" by blast
qed}
@@ -707,7 +707,7 @@
qed
lemma permutes_natset_ge:
- assumes p: "p permutes (S:: nat set)" and le: "\<forall>i \<in> S. p i \<ge> i" shows "p = id"
+ assumes p: "p permutes (S::'a::wellorder set)" and le: "\<forall>i \<in> S. p i \<ge> i" shows "p = id"
proof-
{fix i assume i: "i \<in> S"
from i permutes_in_image[OF permutes_inv[OF p]] have "inv p i \<in> S" by simp
@@ -757,13 +757,13 @@
done
term setsum
-lemma setsum_permutations_inverse: "setsum f {p. p permutes {m..n}} = setsum (\<lambda>p. f(inv p)) {p. p permutes {m..n}}" (is "?lhs = ?rhs")
+lemma setsum_permutations_inverse: "setsum f {p. p permutes S} = setsum (\<lambda>p. f(inv p)) {p. p permutes S}" (is "?lhs = ?rhs")
proof-
- let ?S = "{p . p permutes {m .. n}}"
+ let ?S = "{p . p permutes S}"
have th0: "inj_on inv ?S"
proof(auto simp add: inj_on_def)
fix q r
- assume q: "q permutes {m .. n}" and r: "r permutes {m .. n}" and qr: "inv q = inv r"
+ assume q: "q permutes S" and r: "r permutes S" and qr: "inv q = inv r"
hence "inv (inv q) = inv (inv r)" by simp
with permutes_inv_inv[OF q] permutes_inv_inv[OF r]
show "q = r" by metis
@@ -774,17 +774,17 @@
qed
lemma setum_permutations_compose_left:
- assumes q: "q permutes {m..n}"
- shows "setsum f {p. p permutes {m..n}} =
- setsum (\<lambda>p. f(q o p)) {p. p permutes {m..n}}" (is "?lhs = ?rhs")
+ assumes q: "q permutes S"
+ shows "setsum f {p. p permutes S} =
+ setsum (\<lambda>p. f(q o p)) {p. p permutes S}" (is "?lhs = ?rhs")
proof-
- let ?S = "{p. p permutes {m..n}}"
+ let ?S = "{p. p permutes S}"
have th0: "?rhs = setsum (f o (op o q)) ?S" by (simp add: o_def)
have th1: "inj_on (op o q) ?S"
apply (auto simp add: inj_on_def)
proof-
fix p r
- assume "p permutes {m..n}" and r:"r permutes {m..n}" and rp: "q \<circ> p = q \<circ> r"
+ assume "p permutes S" and r:"r permutes S" and rp: "q \<circ> p = q \<circ> r"
hence "inv q o q o p = inv q o q o r" by (simp add: o_assoc[symmetric])
with permutes_inj[OF q, unfolded inj_iff]
@@ -796,17 +796,17 @@
qed
lemma sum_permutations_compose_right:
- assumes q: "q permutes {m..n}"
- shows "setsum f {p. p permutes {m..n}} =
- setsum (\<lambda>p. f(p o q)) {p. p permutes {m..n}}" (is "?lhs = ?rhs")
+ assumes q: "q permutes S"
+ shows "setsum f {p. p permutes S} =
+ setsum (\<lambda>p. f(p o q)) {p. p permutes S}" (is "?lhs = ?rhs")
proof-
- let ?S = "{p. p permutes {m..n}}"
+ let ?S = "{p. p permutes S}"
have th0: "?rhs = setsum (f o (\<lambda>p. p o q)) ?S" by (simp add: o_def)
have th1: "inj_on (\<lambda>p. p o q) ?S"
apply (auto simp add: inj_on_def)
proof-
fix p r
- assume "p permutes {m..n}" and r:"r permutes {m..n}" and rp: "p o q = r o q"
+ assume "p permutes S" and r:"r permutes S" and rp: "p o q = r o q"
hence "p o (q o inv q) = r o (q o inv q)" by (simp add: o_assoc)
with permutes_surj[OF q, unfolded surj_iff]
--- a/src/HOL/Library/Pocklington.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Pocklington.thy Wed Mar 04 10:45:52 2009 +0100
@@ -142,10 +142,10 @@
shows "[x * y = x' * y'] (mod n)"
proof-
have "(x * y) mod n = (x mod n) * (y mod n) mod n"
- by (simp add: mod_mult1_eq'[of x y n] mod_mult1_eq[of "x mod n" y n])
+ by (simp add: mod_mult_left_eq[of x y n] mod_mult_right_eq[of "x mod n" y n])
also have "\<dots> = (x' mod n) * (y' mod n) mod n" using xx'[unfolded modeq_def] yy'[unfolded modeq_def] by simp
also have "\<dots> = (x' * y') mod n"
- by (simp add: mod_mult1_eq'[of x' y' n] mod_mult1_eq[of "x' mod n" y' n])
+ by (simp add: mod_mult_left_eq[of x' y' n] mod_mult_right_eq[of "x' mod n" y' n])
finally show ?thesis unfolding modeq_def .
qed
@@ -296,7 +296,7 @@
from cong_solve[OF an] obtain x where x: "[a*x = b] (mod n)" by blast
let ?x = "x mod n"
from x have th: "[a * ?x = b] (mod n)"
- by (simp add: modeq_def mod_mult1_eq[of a x n])
+ by (simp add: modeq_def mod_mult_right_eq[of a x n])
from mod_less_divisor[ of n x] nz th have Px: "?P ?x" by simp
{fix y assume Py: "y < n" "[a * y = b] (mod n)"
from Py(2) th have "[a * y = a*?x] (mod n)" by (simp add: modeq_def)
@@ -753,10 +753,10 @@
next
case (Suc n)
have "(x mod m)^(Suc n) mod m = ((x mod m) * (((x mod m) ^ n) mod m)) mod m"
- by (simp add: mod_mult1_eq[symmetric])
+ by (simp add: mod_mult_right_eq[symmetric])
also have "\<dots> = ((x mod m) * (x^n mod m)) mod m" using Suc.hyps by simp
also have "\<dots> = x^(Suc n) mod m"
- by (simp add: mod_mult1_eq'[symmetric] mod_mult1_eq[symmetric])
+ by (simp add: mod_mult_left_eq[symmetric] mod_mult_right_eq[symmetric])
finally show ?case .
qed
@@ -873,7 +873,7 @@
from lh[unfolded nat_mod]
obtain q1 q2 where q12:"a ^ d + n * q1 = 1 + n * q2" by blast
hence "a ^ d + n * q1 - n * q2 = 1" by simp
- with dvd_diff [OF dvd_add [OF divides_rexp[OF p(2), of d'] dvd_mult2[OF p(1), of q1]] dvd_mult2[OF p(1), of q2]] d' have "p dvd 1" by simp
+ with nat_dvd_diff [OF dvd_add [OF divides_rexp[OF p(2), of d'] dvd_mult2[OF p(1), of q1]] dvd_mult2[OF p(1), of q2]] d' have "p dvd 1" by simp
with p(3) have False by simp
hence ?rhs ..}
ultimately have ?rhs by blast}
@@ -891,9 +891,9 @@
hence "[(a^?o)^?q * (a^?r) = 1] (mod n)"
by (simp add: modeq_def power_mult[symmetric] power_add[symmetric])
hence th: "[a^?r = 1] (mod n)"
- using eqo mod_mult1_eq'[of "(a^?o)^?q" "a^?r" n]
+ using eqo mod_mult_left_eq[of "(a^?o)^?q" "a^?r" n]
apply (simp add: modeq_def del: One_nat_def)
- by (simp add: mod_mult1_eq'[symmetric])
+ by (simp add: mod_mult_left_eq[symmetric])
{assume r: "?r = 0" hence ?rhs by (simp add: dvd_eq_mod_eq_0)}
moreover
{assume r: "?r \<noteq> 0"
--- a/src/HOL/Library/Primes.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Primes.thy Wed Mar 04 10:45:52 2009 +0100
@@ -45,12 +45,14 @@
by (rule prime_dvd_square) (simp_all add: power2_eq_square)
-lemma exp_eq_1:"(x::nat)^n = 1 \<longleftrightarrow> x = 1 \<or> n = 0" by (induct n, auto)
+lemma exp_eq_1:"(x::nat)^n = 1 \<longleftrightarrow> x = 1 \<or> n = 0"
+by (induct n, auto)
+
lemma exp_mono_lt: "(x::nat) ^ (Suc n) < y ^ (Suc n) \<longleftrightarrow> x < y"
- using power_less_imp_less_base[of x "Suc n" y] power_strict_mono[of x y "Suc n"]
- by auto
+by(metis linorder_not_less not_less0 power_le_imp_le_base power_less_imp_less_base)
+
lemma exp_mono_le: "(x::nat) ^ (Suc n) \<le> y ^ (Suc n) \<longleftrightarrow> x \<le> y"
- by (simp only: linorder_not_less[symmetric] exp_mono_lt)
+by (simp only: linorder_not_less[symmetric] exp_mono_lt)
lemma exp_mono_eq: "(x::nat) ^ Suc n = y ^ Suc n \<longleftrightarrow> x = y"
using power_inject_base[of x n y] by auto
@@ -307,8 +309,8 @@
{fix e assume H: "e dvd a^n" "e dvd b^n"
from bezout_gcd_pow[of a n b] obtain x y
where xy: "a ^ n * x - b ^ n * y = ?gn \<or> b ^ n * x - a ^ n * y = ?gn" by blast
- from dvd_diff [OF dvd_mult2[OF H(1), of x] dvd_mult2[OF H(2), of y]]
- dvd_diff [OF dvd_mult2[OF H(2), of x] dvd_mult2[OF H(1), of y]] xy
+ from nat_dvd_diff [OF dvd_mult2[OF H(1), of x] dvd_mult2[OF H(2), of y]]
+ nat_dvd_diff [OF dvd_mult2[OF H(2), of x] dvd_mult2[OF H(1), of y]] xy
have "e dvd ?gn" by (cases "a ^ n * x - b ^ n * y = gcd a b ^ n", simp_all)}
hence th: "\<forall>e. e dvd a^n \<and> e dvd b^n \<longrightarrow> e dvd ?gn" by blast
from divides_exp[OF gcd_dvd1[of a b], of n] divides_exp[OF gcd_dvd2[of a b], of n] th
--- a/src/HOL/Library/Word.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Word.thy Wed Mar 04 10:45:52 2009 +0100
@@ -575,7 +575,7 @@
have "?lhs = (1 + 2 * bv_to_nat w) mod 2"
by (simp add: add_commute)
also have "... = 1"
- by (subst mod_add1_eq) simp
+ by (subst mod_add_eq) simp
finally have eq1: "?lhs = 1" .
have "?rhs = 0" by simp
with orig and eq1
--- a/src/HOL/Library/Zorn.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/Zorn.thy Wed Mar 04 10:45:52 2009 +0100
@@ -297,7 +297,7 @@
fix a B assume aB: "B:C" "a:B"
with 1 obtain x where "x:Field r" "B = r^-1 `` {x}" by auto
thus "(a,u) : r" using uA aB `Preorder r`
- by (auto simp add: preorder_on_def refl_def) (metis transD)
+ by (auto simp add: preorder_on_def refl_on_def) (metis transD)
qed
thus "EX u:Field r. ?P u" using `u:Field r` by blast
qed
@@ -322,7 +322,7 @@
(infix "initial'_segment'_of" 55) where
"r initial_segment_of s == (r,s):init_seg_of"
-lemma refl_init_seg_of[simp]: "r initial_segment_of r"
+lemma refl_on_init_seg_of[simp]: "r initial_segment_of r"
by(simp add:init_seg_of_def)
lemma trans_init_seg_of:
@@ -411,7 +411,7 @@
by(simp add:Chain_def I_def) blast
have FI: "Field I = ?WO" by(auto simp add:I_def init_seg_of_def Field_def)
hence 0: "Partial_order I"
- by(auto simp: partial_order_on_def preorder_on_def antisym_def antisym_init_seg_of refl_def trans_def I_def elim!: trans_init_seg_of)
+ by(auto simp: partial_order_on_def preorder_on_def antisym_def antisym_init_seg_of refl_on_def trans_def I_def elim!: trans_init_seg_of)
-- {*I-chains have upper bounds in ?WO wrt I: their Union*}
{ fix R assume "R \<in> Chain I"
hence Ris: "R \<in> Chain init_seg_of" using mono_Chain[OF I_init] by blast
@@ -420,7 +420,7 @@
have "\<forall>r\<in>R. Refl r" "\<forall>r\<in>R. trans r" "\<forall>r\<in>R. antisym r" "\<forall>r\<in>R. Total r"
"\<forall>r\<in>R. wf(r-Id)"
using Chain_wo[OF `R \<in> Chain I`] by(simp_all add:order_on_defs)
- have "Refl (\<Union>R)" using `\<forall>r\<in>R. Refl r` by(auto simp:refl_def)
+ have "Refl (\<Union>R)" using `\<forall>r\<in>R. Refl r` by(auto simp:refl_on_def)
moreover have "trans (\<Union>R)"
by(rule chain_subset_trans_Union[OF subch `\<forall>r\<in>R. trans r`])
moreover have "antisym(\<Union>R)"
@@ -452,7 +452,7 @@
proof
assume "m={}"
moreover have "Well_order {(x,x)}"
- by(simp add:order_on_defs refl_def trans_def antisym_def total_on_def Field_def Domain_def Range_def)
+ by(simp add:order_on_defs refl_on_def trans_def antisym_def total_on_def Field_def Domain_def Range_def)
ultimately show False using max
by (auto simp:I_def init_seg_of_def simp del:Field_insert)
qed
@@ -467,7 +467,7 @@
have "Refl m" "trans m" "antisym m" "Total m" "wf(m-Id)"
using `Well_order m` by(simp_all add:order_on_defs)
--{*We show that the extension is a well-order*}
- have "Refl ?m" using `Refl m` Fm by(auto simp:refl_def)
+ have "Refl ?m" using `Refl m` Fm by(auto simp:refl_on_def)
moreover have "trans ?m" using `trans m` `x \<notin> Field m`
unfolding trans_def Field_def Domain_def Range_def by blast
moreover have "antisym ?m" using `antisym m` `x \<notin> Field m`
@@ -500,10 +500,10 @@
using well_ordering[where 'a = "'a"] by blast
let ?r = "{(x,y). x:A & y:A & (x,y):r}"
have 1: "Field ?r = A" using wo univ
- by(fastsimp simp: Field_def Domain_def Range_def order_on_defs refl_def)
+ by(fastsimp simp: Field_def Domain_def Range_def order_on_defs refl_on_def)
have "Refl r" "trans r" "antisym r" "Total r" "wf(r-Id)"
using `Well_order r` by(simp_all add:order_on_defs)
- have "Refl ?r" using `Refl r` by(auto simp:refl_def 1 univ)
+ have "Refl ?r" using `Refl r` by(auto simp:refl_on_def 1 univ)
moreover have "trans ?r" using `trans r`
unfolding trans_def by blast
moreover have "antisym ?r" using `antisym r`
--- a/src/HOL/Library/reflection.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Library/reflection.ML Wed Mar 04 10:45:52 2009 +0100
@@ -88,17 +88,12 @@
fun dest_listT (Type ("List.list", [T])) = T;
-fun partition P [] = ([],[])
- | partition P (x::xs) =
- let val (yes,no) = partition P xs
- in if P x then (x::yes,no) else (yes, x::no) end
-
fun rearrange congs =
let
fun P (_, th) =
let val @{term "Trueprop"}$(Const ("op =",_) $l$_) = concl_of th
in can dest_Var l end
- val (yes,no) = partition P congs
+ val (yes,no) = List.partition P congs
in no @ yes end
fun genreif ctxt raw_eqs t =
--- a/src/HOL/List.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/List.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1461,6 +1461,12 @@
declare take_Cons [simp del] and drop_Cons [simp del]
+lemma take_1_Cons [simp]: "take 1 (x # xs) = [x]"
+ unfolding One_nat_def by simp
+
+lemma drop_1_Cons [simp]: "drop 1 (x # xs) = xs"
+ unfolding One_nat_def by simp
+
lemma take_Suc: "xs ~= [] ==> take (Suc n) xs = hd xs # take n (tl xs)"
by(clarsimp simp add:neq_Nil_conv)
@@ -1592,13 +1598,13 @@
by (simp add: butlast_conv_take min_max.inf_absorb1 min_max.inf_absorb2)
lemma butlast_drop: "butlast (drop n xs) = drop n (butlast xs)"
-by (simp add: butlast_conv_take drop_take)
+by (simp add: butlast_conv_take drop_take add_ac)
lemma take_butlast: "n < length xs ==> take n (butlast xs) = take n xs"
by (simp add: butlast_conv_take min_max.inf_absorb1)
lemma drop_butlast: "drop n (butlast xs) = butlast (drop n xs)"
-by (simp add: butlast_conv_take drop_take)
+by (simp add: butlast_conv_take drop_take add_ac)
lemma hd_drop_conv_nth: "\<lbrakk> xs \<noteq> []; n < length xs \<rbrakk> \<Longrightarrow> hd(drop n xs) = xs!n"
by(simp add: hd_conv_nth)
@@ -1639,7 +1645,7 @@
done
lemma take_hd_drop:
- "n < length xs \<Longrightarrow> take n xs @ [hd (drop n xs)] = take (n+1) xs"
+ "n < length xs \<Longrightarrow> take n xs @ [hd (drop n xs)] = take (Suc n) xs"
apply(induct xs arbitrary: n)
apply simp
apply(simp add:drop_Cons split:nat.split)
@@ -3220,7 +3226,7 @@
lemma lenlex_conv:
"lenlex r = {(xs,ys). length xs < length ys |
length xs = length ys \<and> (xs, ys) : lex r}"
-by (simp add: lenlex_def diag_def lex_prod_def inv_image_def)
+by (simp add: lenlex_def Id_on_def lex_prod_def inv_image_def)
lemma Nil_notin_lex [iff]: "([], ys) \<notin> lex r"
by (simp add: lex_conv)
@@ -3386,8 +3392,8 @@
apply (erule listrel.induct, auto)
done
-lemma listrel_refl: "refl A r \<Longrightarrow> refl (lists A) (listrel r)"
-apply (simp add: refl_def listrel_subset Ball_def)
+lemma listrel_refl_on: "refl_on A r \<Longrightarrow> refl_on (lists A) (listrel r)"
+apply (simp add: refl_on_def listrel_subset Ball_def)
apply (rule allI)
apply (induct_tac x)
apply (auto intro: listrel.intros)
@@ -3408,7 +3414,7 @@
done
theorem equiv_listrel: "equiv A r \<Longrightarrow> equiv (lists A) (listrel r)"
-by (simp add: equiv_def listrel_refl listrel_sym listrel_trans)
+by (simp add: equiv_def listrel_refl_on listrel_sym listrel_trans)
lemma listrel_Nil [simp]: "listrel r `` {[]} = {[]}"
by (blast intro: listrel.intros)
@@ -3564,52 +3570,51 @@
open Basic_Code_Thingol;
-fun implode_list (nil', cons') t =
- let
- fun dest_cons (IConst (c, _) `$ t1 `$ t2) =
- if c = cons'
- then SOME (t1, t2)
- else NONE
- | dest_cons _ = NONE;
- val (ts, t') = Code_Thingol.unfoldr dest_cons t;
- in case t'
- of IConst (c, _) => if c = nil' then SOME ts else NONE
+fun implode_list naming t = case pairself
+ (Code_Thingol.lookup_const naming) (@{const_name Nil}, @{const_name Cons})
+ of (SOME nil', SOME cons') => let
+ fun dest_cons (IConst (c, _) `$ t1 `$ t2) =
+ if c = cons'
+ then SOME (t1, t2)
+ else NONE
+ | dest_cons _ = NONE;
+ val (ts, t') = Code_Thingol.unfoldr dest_cons t;
+ in case t'
+ of IConst (c, _) => if c = nil' then SOME ts else NONE
+ | _ => NONE
+ end
| _ => NONE
- end;
-
-fun decode_char nibbles' (IConst (c1, _), IConst (c2, _)) =
- let
- fun idx c = find_index (curry (op =) c) nibbles';
- fun decode ~1 _ = NONE
- | decode _ ~1 = NONE
- | decode n m = SOME (chr (n * 16 + m));
- in decode (idx c1) (idx c2) end
- | decode_char _ _ = NONE;
-
-fun implode_string (char', nibbles') mk_char mk_string ts =
- let
- fun implode_char (IConst (c, _) `$ t1 `$ t2) =
- if c = char' then decode_char nibbles' (t1, t2) else NONE
- | implode_char _ = NONE;
- val ts' = map implode_char ts;
- in if forall is_some ts'
- then (SOME o Code_Printer.str o mk_string o implode o map_filter I) ts'
- else NONE
- end;
-
-fun list_names naming = pairself (the o Code_Thingol.lookup_const naming)
- (@{const_name Nil}, @{const_name Cons});
-fun char_name naming = (the o Code_Thingol.lookup_const naming)
- @{const_name Char}
-fun nibble_names naming = map (the o Code_Thingol.lookup_const naming)
- [@{const_name Nibble0}, @{const_name Nibble1},
+
+fun decode_char naming (IConst (c1, _), IConst (c2, _)) = (case map_filter
+ (Code_Thingol.lookup_const naming)[@{const_name Nibble0}, @{const_name Nibble1},
@{const_name Nibble2}, @{const_name Nibble3},
@{const_name Nibble4}, @{const_name Nibble5},
@{const_name Nibble6}, @{const_name Nibble7},
@{const_name Nibble8}, @{const_name Nibble9},
@{const_name NibbleA}, @{const_name NibbleB},
@{const_name NibbleC}, @{const_name NibbleD},
- @{const_name NibbleE}, @{const_name NibbleF}];
+ @{const_name NibbleE}, @{const_name NibbleF}]
+ of nibbles' as [_, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _] => let
+ fun idx c = find_index (curry (op =) c) nibbles';
+ fun decode ~1 _ = NONE
+ | decode _ ~1 = NONE
+ | decode n m = SOME (chr (n * 16 + m));
+ in decode (idx c1) (idx c2) end
+ | _ => NONE)
+ | decode_char _ _ = NONE
+
+fun implode_string naming mk_char mk_string ts = case
+ Code_Thingol.lookup_const naming @{const_name Char}
+ of SOME char' => let
+ fun implode_char (IConst (c, _) `$ t1 `$ t2) =
+ if c = char' then decode_char naming (t1, t2) else NONE
+ | implode_char _ = NONE;
+ val ts' = map implode_char ts;
+ in if forall is_some ts'
+ then (SOME o Code_Printer.str o mk_string o implode o map_filter I) ts'
+ else NONE
+ end
+ | _ => NONE;
fun default_list (target_fxy, target_cons) pr fxy t1 t2 =
Code_Printer.brackify_infix (target_fxy, Code_Printer.R) fxy [
@@ -3622,7 +3627,7 @@
let
val mk_list = Code_Printer.literal_list literals;
fun pretty pr naming thm vars fxy [(t1, _), (t2, _)] =
- case Option.map (cons t1) (implode_list (list_names naming) t2)
+ case Option.map (cons t1) (implode_list naming t2)
of SOME ts => mk_list (map (pr vars Code_Printer.NOBR) ts)
| NONE => default_list (Code_Printer.infix_cons literals) (pr vars) fxy t1 t2;
in (2, pretty) end;
@@ -3633,8 +3638,8 @@
val mk_char = Code_Printer.literal_char literals;
val mk_string = Code_Printer.literal_string literals;
fun pretty pr naming thm vars fxy [(t1, _), (t2, _)] =
- case Option.map (cons t1) (implode_list (list_names naming) t2)
- of SOME ts => (case implode_string (char_name naming, nibble_names naming) mk_char mk_string ts
+ case Option.map (cons t1) (implode_list naming t2)
+ of SOME ts => (case implode_string naming mk_char mk_string ts
of SOME p => p
| NONE => mk_list (map (pr vars Code_Printer.NOBR) ts))
| NONE => default_list (Code_Printer.infix_cons literals) (pr vars) fxy t1 t2;
@@ -3644,7 +3649,7 @@
let
val mk_char = Code_Printer.literal_char literals;
fun pretty _ naming thm _ _ [(t1, _), (t2, _)] =
- case decode_char (nibble_names naming) (t1, t2)
+ case decode_char naming (t1, t2)
of SOME c => (Code_Printer.str o mk_char) c
| NONE => Code_Printer.nerror thm "Illegal character expression";
in (2, pretty) end;
@@ -3654,8 +3659,8 @@
val mk_char = Code_Printer.literal_char literals;
val mk_string = Code_Printer.literal_string literals;
fun pretty _ naming thm _ _ [(t, _)] =
- case implode_list (list_names naming) t
- of SOME ts => (case implode_string (char_name naming, nibble_names naming) mk_char mk_string ts
+ case implode_list naming t
+ of SOME ts => (case implode_string naming mk_char mk_string ts
of SOME p => p
| NONE => Code_Printer.nerror thm "Illegal message expression")
| NONE => Code_Printer.nerror thm "Illegal message expression";
--- a/src/HOL/MacLaurin.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/MacLaurin.thy Wed Mar 04 10:45:52 2009 +0100
@@ -81,7 +81,7 @@
prefer 2 apply simp
apply (frule less_iff_Suc_add [THEN iffD1], clarify)
apply (simp del: setsum_op_ivl_Suc)
- apply (insert sumr_offset4 [of 1])
+ apply (insert sumr_offset4 [of "Suc 0"])
apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc)
apply (rule lemma_DERIV_subst)
apply (rule DERIV_add)
@@ -124,7 +124,7 @@
have g2: "g 0 = 0 & g h = 0"
apply (simp add: m f_h g_def del: setsum_op_ivl_Suc)
- apply (cut_tac n = m and k = 1 in sumr_offset2)
+ apply (cut_tac n = m and k = "Suc 0" in sumr_offset2)
apply (simp add: eq_diff_eq' diff_0 del: setsum_op_ivl_Suc)
done
@@ -144,7 +144,7 @@
apply (simp add: m difg_def)
apply (frule less_iff_Suc_add [THEN iffD1], clarify)
apply (simp del: setsum_op_ivl_Suc)
- apply (insert sumr_offset4 [of 1])
+ apply (insert sumr_offset4 [of "Suc 0"])
apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc)
done
@@ -552,6 +552,10 @@
"[|x = y; abs u \<le> (v::real) |] ==> \<bar>(x + u) - y\<bar> \<le> v"
by auto
+text {* TODO: move to Parity.thy *}
+lemma nat_odd_1 [simp]: "odd (1::nat)"
+ unfolding even_nat_def by simp
+
lemma Maclaurin_sin_bound:
"abs(sin x - (\<Sum>m=0..<n. (if even m then 0 else (-1 ^ ((m - Suc 0) div 2)) / real (fact m)) *
x ^ m)) \<le> inverse(real (fact n)) * \<bar>x\<bar> ^ n"
--- a/src/HOL/MetisExamples/Tarski.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/MetisExamples/Tarski.thy Wed Mar 04 10:45:52 2009 +0100
@@ -61,7 +61,7 @@
"Top po == greatest (%x. True) po"
PartialOrder :: "('a potype) set"
- "PartialOrder == {P. refl (pset P) (order P) & antisym (order P) &
+ "PartialOrder == {P. refl_on (pset P) (order P) & antisym (order P) &
trans (order P)}"
CompleteLattice :: "('a potype) set"
@@ -126,7 +126,7 @@
subsection {* Partial Order *}
-lemma (in PO) PO_imp_refl: "refl A r"
+lemma (in PO) PO_imp_refl_on: "refl_on A r"
apply (insert cl_po)
apply (simp add: PartialOrder_def A_def r_def)
done
@@ -143,7 +143,7 @@
lemma (in PO) reflE: "x \<in> A ==> (x, x) \<in> r"
apply (insert cl_po)
-apply (simp add: PartialOrder_def refl_def A_def r_def)
+apply (simp add: PartialOrder_def refl_on_def A_def r_def)
done
lemma (in PO) antisymE: "[| (a, b) \<in> r; (b, a) \<in> r |] ==> a = b"
@@ -166,7 +166,7 @@
apply (simp (no_asm) add: PartialOrder_def)
apply auto
-- {* refl *}
-apply (simp add: refl_def induced_def)
+apply (simp add: refl_on_def induced_def)
apply (blast intro: reflE)
-- {* antisym *}
apply (simp add: antisym_def induced_def)
@@ -203,7 +203,7 @@
lemma (in PO) dualPO: "dual cl \<in> PartialOrder"
apply (insert cl_po)
-apply (simp add: PartialOrder_def dual_def refl_converse
+apply (simp add: PartialOrder_def dual_def refl_on_converse
trans_converse antisym_converse)
done
@@ -230,12 +230,12 @@
lemmas CL_imp_PO = CL_subset_PO [THEN subsetD]
-declare PO.PO_imp_refl [OF PO.intro [OF CL_imp_PO], simp]
+declare PO.PO_imp_refl_on [OF PO.intro [OF CL_imp_PO], simp]
declare PO.PO_imp_sym [OF PO.intro [OF CL_imp_PO], simp]
declare PO.PO_imp_trans [OF PO.intro [OF CL_imp_PO], simp]
-lemma (in CL) CO_refl: "refl A r"
-by (rule PO_imp_refl)
+lemma (in CL) CO_refl_on: "refl_on A r"
+by (rule PO_imp_refl_on)
lemma (in CL) CO_antisym: "antisym r"
by (rule PO_imp_sym)
@@ -501,10 +501,10 @@
apply (rule conjI)
ML_command{*AtpWrapper.problem_name:="Tarski__CLF_flubH_le_lubH_simpler"*}
(*??no longer terminates, with combinators
-apply (metis CO_refl lubH_le_flubH monotone_def monotone_f reflD1 reflD2)
+apply (metis CO_refl_on lubH_le_flubH monotone_def monotone_f reflD1 reflD2)
*)
-apply (metis CO_refl lubH_le_flubH monotoneE [OF monotone_f] reflD1 reflD2)
-apply (metis CO_refl lubH_le_flubH reflD2)
+apply (metis CO_refl_on lubH_le_flubH monotoneE [OF monotone_f] refl_onD1 refl_onD2)
+apply (metis CO_refl_on lubH_le_flubH refl_onD2)
done
declare CLF.f_in_funcset[rule del] funcset_mem[rule del]
CL.lub_in_lattice[rule del] PO.monotoneE[rule del]
@@ -542,12 +542,12 @@
by (metis 5 3)
have 7: "(lub H cl, lub H cl) \<in> r"
by (metis 6 4)
-have 8: "\<And>X1. lub H cl \<in> X1 \<or> \<not> refl X1 r"
- by (metis 7 reflD2)
-have 9: "\<not> refl A r"
+have 8: "\<And>X1. lub H cl \<in> X1 \<or> \<not> refl_on X1 r"
+ by (metis 7 refl_onD2)
+have 9: "\<not> refl_on A r"
by (metis 8 2)
show "False"
- by (metis CO_refl 9);
+ by (metis CO_refl_on 9);
next --{*apparently the way to insert a second structured proof*}
show "H = {x. (x, f x) \<in> r \<and> x \<in> A} \<Longrightarrow>
f (lub {x. (x, f x) \<in> r \<and> x \<in> A} cl) = lub {x. (x, f x) \<in> r \<and> x \<in> A} cl"
@@ -589,13 +589,13 @@
apply (simp add: fix_def)
apply (rule conjI)
ML_command{*AtpWrapper.problem_name:="Tarski__CLF_lubH_is_fixp_simpler"*}
-apply (metis CO_refl lubH_le_flubH reflD1)
+apply (metis CO_refl_on lubH_le_flubH refl_onD1)
apply (metis antisymE flubH_le_lubH lubH_le_flubH)
done
lemma (in CLF) fix_in_H:
"[| H = {x. (x, f x) \<in> r & x \<in> A}; x \<in> P |] ==> x \<in> H"
-by (simp add: P_def fix_imp_eq [of _ f A] reflE CO_refl
+by (simp add: P_def fix_imp_eq [of _ f A] reflE CO_refl_on
fix_subset [of f A, THEN subsetD])
@@ -678,16 +678,16 @@
ML{*AtpWrapper.problem_name:="Tarski__rel_imp_elem"*}
- declare (in CLF) CO_refl[simp] refl_def [simp]
+ declare (in CLF) CO_refl_on[simp] refl_on_def [simp]
lemma (in CLF) rel_imp_elem: "(x, y) \<in> r ==> x \<in> A"
-by (metis CO_refl reflD1)
- declare (in CLF) CO_refl[simp del] refl_def [simp del]
+by (metis CO_refl_on refl_onD1)
+ declare (in CLF) CO_refl_on[simp del] refl_on_def [simp del]
ML{*AtpWrapper.problem_name:="Tarski__interval_subset"*}
declare (in CLF) rel_imp_elem[intro]
declare interval_def [simp]
lemma (in CLF) interval_subset: "[| a \<in> A; b \<in> A |] ==> interval r a b \<subseteq> A"
-by (metis CO_refl interval_imp_mem reflD reflD2 rel_imp_elem subset_eq)
+by (metis CO_refl_on interval_imp_mem refl_onD refl_onD2 rel_imp_elem subset_eq)
declare (in CLF) rel_imp_elem[rule del]
declare interval_def [simp del]
--- a/src/HOL/NSA/NSA.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NSA/NSA.thy Wed Mar 04 10:45:52 2009 +0100
@@ -157,7 +157,7 @@
by transfer (rule norm_divide)
lemma hypreal_hnorm_def [simp]:
- "\<And>r::hypreal. hnorm r \<equiv> \<bar>r\<bar>"
+ "\<And>r::hypreal. hnorm r = \<bar>r\<bar>"
by transfer (rule real_norm_def)
lemma hnorm_add_less:
--- a/src/HOL/NSA/StarDef.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NSA/StarDef.thy Wed Mar 04 10:45:52 2009 +0100
@@ -64,7 +64,7 @@
lemma equiv_starrel: "equiv UNIV starrel"
proof (rule equiv.intro)
- show "reflexive starrel" by (simp add: refl_def)
+ show "refl starrel" by (simp add: refl_on_def)
show "sym starrel" by (simp add: sym_def eq_commute)
show "trans starrel" by (auto intro: transI elim!: ultra)
qed
--- a/src/HOL/Nat.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nat.thy Wed Mar 04 10:45:52 2009 +0100
@@ -196,8 +196,8 @@
instance proof
fix n m q :: nat
- show "0 \<noteq> (1::nat)" by simp
- show "1 * n = n" by simp
+ show "0 \<noteq> (1::nat)" unfolding One_nat_def by simp
+ show "1 * n = n" unfolding One_nat_def by simp
show "n * m = m * n" by (induct n) simp_all
show "(n * m) * q = n * (m * q)" by (induct n) (simp_all add: add_mult_distrib)
show "(n + m) * q = n * q + m * q" by (rule add_mult_distrib)
@@ -280,6 +280,9 @@
lemma diff_add_0: "n - (n + m) = (0::nat)"
by (induct n) simp_all
+lemma diff_Suc_1 [simp]: "Suc n - 1 = n"
+ unfolding One_nat_def by simp
+
text {* Difference distributes over multiplication *}
lemma diff_mult_distrib: "((m::nat) - n) * k = (m * k) - (n * k)"
@@ -307,18 +310,24 @@
lemmas nat_distrib =
add_mult_distrib add_mult_distrib2 diff_mult_distrib diff_mult_distrib2
-lemma mult_eq_1_iff [simp]: "(m * n = Suc 0) = (m = 1 & n = 1)"
+lemma mult_eq_1_iff [simp]: "(m * n = Suc 0) = (m = Suc 0 & n = Suc 0)"
apply (induct m)
apply simp
apply (induct n)
apply auto
done
-lemma one_eq_mult_iff [simp,noatp]: "(Suc 0 = m * n) = (m = 1 & n = 1)"
+lemma one_eq_mult_iff [simp,noatp]: "(Suc 0 = m * n) = (m = Suc 0 & n = Suc 0)"
apply (rule trans)
apply (rule_tac [2] mult_eq_1_iff, fastsimp)
done
+lemma nat_mult_eq_1_iff [simp]: "m * n = (1::nat) \<longleftrightarrow> m = 1 \<and> n = 1"
+ unfolding One_nat_def by (rule mult_eq_1_iff)
+
+lemma nat_1_eq_mult_iff [simp]: "(1::nat) = m * n \<longleftrightarrow> m = 1 \<and> n = 1"
+ unfolding One_nat_def by (rule one_eq_mult_iff)
+
lemma mult_cancel1 [simp]: "(k * m = k * n) = (m = n | (k = (0::nat)))"
proof -
have "k \<noteq> 0 \<Longrightarrow> k * m = k * n \<Longrightarrow> m = n"
@@ -465,11 +474,11 @@
lemma less_Suc_eq: "(m < Suc n) = (m < n | m = n)"
unfolding less_Suc_eq_le le_less ..
-lemma less_one [iff, noatp]: "(n < (1::nat)) = (n = 0)"
+lemma less_Suc0 [iff]: "(n < Suc 0) = (n = 0)"
by (simp add: less_Suc_eq)
-lemma less_Suc0 [iff]: "(n < Suc 0) = (n = 0)"
- by (simp add: less_Suc_eq)
+lemma less_one [iff, noatp]: "(n < (1::nat)) = (n = 0)"
+ unfolding One_nat_def by (rule less_Suc0)
lemma Suc_mono: "m < n ==> Suc m < Suc n"
by simp
@@ -692,6 +701,9 @@
lemma Suc_pred [simp]: "n>0 ==> Suc (n - Suc 0) = n"
by (simp add: diff_Suc split: nat.split)
+lemma Suc_diff_1 [simp]: "0 < n ==> Suc (n - 1) = n"
+unfolding One_nat_def by (rule Suc_pred)
+
lemma nat_add_left_cancel_le [simp]: "(k + m \<le> k + n) = (m\<le>(n::nat))"
by (induct k) simp_all
@@ -735,6 +747,11 @@
show "i < j ==> 0 < k ==> k * i < k * j" by (simp add: mult_less_mono2)
qed
+instance nat :: no_zero_divisors
+proof
+ fix a::nat and b::nat show "a ~= 0 \<Longrightarrow> b ~= 0 \<Longrightarrow> a * b ~= 0" by auto
+qed
+
lemma nat_mult_1: "(1::nat) * n = n"
by simp
@@ -795,6 +812,7 @@
done
lemma ex_least_nat_less: "\<not>P(0) \<Longrightarrow> P(n::nat) \<Longrightarrow> \<exists>k<n. (\<forall>i\<le>k. \<not>P i) & P(k+1)"
+ unfolding One_nat_def
apply (cases n)
apply blast
apply (frule (1) ex_least_nat_le)
@@ -1084,7 +1102,7 @@
apply simp_all
done
-lemma one_le_mult_iff [simp]: "(Suc 0 \<le> m * n) = (1 \<le> m & 1 \<le> n)"
+lemma one_le_mult_iff [simp]: "(Suc 0 \<le> m * n) = (Suc 0 \<le> m & Suc 0 \<le> n)"
apply (induct m)
apply simp
apply (case_tac n)
@@ -1159,7 +1177,7 @@
| of_nat_Suc: "of_nat (Suc m) = 1 + of_nat m"
lemma of_nat_1 [simp]: "of_nat 1 = 1"
- by simp
+ unfolding One_nat_def by simp
lemma of_nat_add [simp]: "of_nat (m + n) = of_nat m + of_nat n"
by (induct m) (simp_all add: add_ac)
@@ -1271,7 +1289,7 @@
end
lemma of_nat_id [simp]: "of_nat n = n"
- by (induct n) auto
+ by (induct n) (auto simp add: One_nat_def)
lemma of_nat_eq_id [simp]: "of_nat = id"
by (auto simp add: expand_fun_eq)
@@ -1376,7 +1394,7 @@
apply(induct_tac k)
apply simp
apply(erule_tac x="m+n" in meta_allE)
-apply(erule_tac x="m+n+1" in meta_allE)
+apply(erule_tac x="Suc(m+n)" in meta_allE)
apply simp
done
--- a/src/HOL/NatBin.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NatBin.thy Wed Mar 04 10:45:52 2009 +0100
@@ -159,6 +159,21 @@
unfolding nat_number_of_def number_of_is_id numeral_simps
by (simp add: nat_add_distrib)
+lemma nat_number_of_add_1 [simp]:
+ "number_of v + (1::nat) =
+ (if v < Int.Pls then 1 else number_of (Int.succ v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by (simp add: nat_add_distrib)
+
+lemma nat_1_add_number_of [simp]:
+ "(1::nat) + number_of v =
+ (if v < Int.Pls then 1 else number_of (Int.succ v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by (simp add: nat_add_distrib)
+
+lemma nat_1_add_1 [simp]: "1 + 1 = (2::nat)"
+ by (rule int_int_eq [THEN iffD1]) simp
+
subsubsection{*Subtraction *}
@@ -178,6 +193,12 @@
unfolding nat_number_of_def number_of_is_id numeral_simps neg_def
by auto
+lemma nat_number_of_diff_1 [simp]:
+ "number_of v - (1::nat) =
+ (if v \<le> Int.Pls then 0 else number_of (Int.pred v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by auto
+
subsubsection{*Multiplication *}
@@ -362,9 +383,14 @@
unfolding numeral_2_eq_2 by (erule (2) power_eq_imp_eq_base, simp)
lemma power_minus1_even[simp]: "(- 1) ^ (2*n) = (1::'a::{comm_ring_1,recpower})"
-apply (induct "n")
-apply (auto simp add: power_Suc power_add)
-done
+proof (induct n)
+ case 0 show ?case by simp
+next
+ case (Suc n) then show ?case by (simp add: power_Suc power_add)
+qed
+
+lemma power_minus1_odd: "(- 1) ^ Suc(2*n) = -(1::'a::{comm_ring_1,recpower})"
+ by (simp add: power_Suc)
lemma power_even_eq: "(a::'a::recpower) ^ (2*n) = (a^n)^2"
by (subst mult_commute) (simp add: power_mult)
@@ -437,19 +463,13 @@
(* These two can be useful when m = number_of... *)
lemma add_eq_if: "(m::nat) + n = (if m=0 then n else Suc ((m - 1) + n))"
-apply (case_tac "m")
-apply (simp_all add: numerals)
-done
+ unfolding One_nat_def by (cases m) simp_all
lemma mult_eq_if: "(m::nat) * n = (if m=0 then 0 else n + ((m - 1) * n))"
-apply (case_tac "m")
-apply (simp_all add: numerals)
-done
+ unfolding One_nat_def by (cases m) simp_all
lemma power_eq_if: "(p ^ m :: nat) = (if m=0 then 1 else p * (p ^ (m - 1)))"
-apply (case_tac "m")
-apply (simp_all add: numerals)
-done
+ unfolding One_nat_def by (cases m) simp_all
subsection{*Comparisons involving (0::nat) *}
--- a/src/HOL/Nominal/Examples/Fsub.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/Examples/Fsub.thy Wed Mar 04 10:45:52 2009 +0100
@@ -7,13 +7,18 @@
text{* Authors: Christian Urban,
Benjamin Pierce,
Dimitrios Vytiniotis
- Stephanie Weirich and
+ Stephanie Weirich
Steve Zdancewic
+ Julien Narboux
+ Stefan Berghofer
- with great help from Stefan Berghofer and Markus Wenzel. *}
+ with great help from Markus Wenzel. *}
section {* Types for Names, Nominal Datatype Declaration for Types and Terms *}
+no_syntax
+ "_Map" :: "maplets => 'a ~=> 'b" ("(1[_])")
+
text {* The main point of this solution is to use names everywhere (be they bound,
binding or free). In System \FSUB{} there are two kinds of names corresponding to
type-variables and to term-variables. These two kinds of names are represented in
@@ -31,30 +36,35 @@
nominal_datatype ty =
Tvar "tyvrs"
| Top
- | Arrow "ty" "ty" ("_ \<rightarrow> _" [100,100] 100)
+ | Arrow "ty" "ty" (infixr "\<rightarrow>" 200)
| Forall "\<guillemotleft>tyvrs\<guillemotright>ty" "ty"
nominal_datatype trm =
Var "vrs"
- | Lam "\<guillemotleft>vrs\<guillemotright>trm" "ty"
- | Tabs "\<guillemotleft>tyvrs\<guillemotright>trm" "ty"
- | App "trm" "trm"
- | Tapp "trm" "ty"
+ | Abs "\<guillemotleft>vrs\<guillemotright>trm" "ty"
+ | TAbs "\<guillemotleft>tyvrs\<guillemotright>trm" "ty"
+ | App "trm" "trm" (infixl "\<cdot>" 200)
+ | TApp "trm" "ty" (infixl "\<cdot>\<^sub>\<tau>" 200)
text {* To be polite to the eye, some more familiar notation is introduced.
Because of the change in the order of arguments, one needs to use
translation rules, instead of syntax annotations at the term-constructors
as given above for @{term "Arrow"}. *}
-syntax
- Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty" ("\<forall>[_<:_]._" [100,100,100] 100)
- Lam_syn :: "vrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("Lam [_:_]._" [100,100,100] 100)
- Tabs_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("Tabs [_<:_]._" [100,100,100] 100)
+abbreviation
+ Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty" ("(3\<forall>_<:_./ _)" [0, 0, 10] 10)
+where
+ "\<forall>X<:T\<^isub>1. T\<^isub>2 \<equiv> ty.Forall X T\<^isub>2 T\<^isub>1"
-translations
- "\<forall>[X<:T\<^isub>1].T\<^isub>2" \<rightleftharpoons> "ty.Forall X T\<^isub>2 T\<^isub>1"
- "Lam [x:T].t" \<rightleftharpoons> "trm.Lam x t T"
- "Tabs [X<:T].t" \<rightleftharpoons> "trm.Tabs X t T"
+abbreviation
+ Abs_syn :: "vrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("(3\<lambda>_:_./ _)" [0, 0, 10] 10)
+where
+ "\<lambda>x:T. t \<equiv> trm.Abs x t T"
+
+abbreviation
+ TAbs_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("(3\<lambda>_<:_./ _)" [0, 0, 10] 10)
+where
+ "\<lambda>X<:T. t \<equiv> trm.TAbs X t T"
text {* Again there are numerous facts that are proved automatically for @{typ "ty"}
and @{typ "trm"}: for example that the set of free variables, i.e.~the @{text "support"},
@@ -64,13 +74,17 @@
and @{typ "trm"}s are equal: *}
lemma alpha_illustration:
- shows "\<forall>[X<:T].(Tvar X) = \<forall>[Y<:T].(Tvar Y)"
- and "Lam [x:T].(Var x) = Lam [y:T].(Var y)"
+ shows "(\<forall>X<:T. Tvar X) = (\<forall>Y<:T. Tvar Y)"
+ and "(\<lambda>x:T. Var x) = (\<lambda>y:T. Var y)"
by (simp_all add: ty.inject trm.inject alpha calc_atm fresh_atm)
section {* SubTyping Contexts *}
-types ty_context = "(tyvrs\<times>ty) list"
+nominal_datatype binding =
+ VarB vrs ty
+ | TVarB tyvrs ty
+
+types env = "binding list"
text {* Typing contexts are represented as lists that ``grow" on the left; we
thereby deviating from the convention in the POPLmark-paper. The lists contain
@@ -79,66 +93,139 @@
text {* In order to state validity-conditions for typing-contexts, the notion of
a @{text "domain"} of a typing-context is handy. *}
+nominal_primrec
+ "tyvrs_of" :: "binding \<Rightarrow> tyvrs set"
+where
+ "tyvrs_of (VarB x y) = {}"
+| "tyvrs_of (TVarB x y) = {x}"
+by auto
+
+nominal_primrec
+ "vrs_of" :: "binding \<Rightarrow> vrs set"
+where
+ "vrs_of (VarB x y) = {x}"
+| "vrs_of (TVarB x y) = {}"
+by auto
+
consts
- "domain" :: "ty_context \<Rightarrow> tyvrs set"
+ "ty_domain" :: "env \<Rightarrow> tyvrs set"
primrec
- "domain [] = {}"
- "domain (X#\<Gamma>) = {fst X}\<union>(domain \<Gamma>)"
+ "ty_domain [] = {}"
+ "ty_domain (X#\<Gamma>) = (tyvrs_of X)\<union>(ty_domain \<Gamma>)"
+
+consts
+ "trm_domain" :: "env \<Rightarrow> vrs set"
+primrec
+ "trm_domain [] = {}"
+ "trm_domain (X#\<Gamma>) = (vrs_of X)\<union>(trm_domain \<Gamma>)"
-lemma domain_eqvt[eqvt]:
+lemma vrs_of_eqvt[eqvt]:
+ fixes pi ::"tyvrs prm"
+ and pi'::"vrs prm"
+ shows "pi \<bullet>(tyvrs_of x) = tyvrs_of (pi\<bullet>x)"
+ and "pi'\<bullet>(tyvrs_of x) = tyvrs_of (pi'\<bullet>x)"
+ and "pi \<bullet>(vrs_of x) = vrs_of (pi\<bullet>x)"
+ and "pi'\<bullet>(vrs_of x) = vrs_of (pi'\<bullet>x)"
+by (nominal_induct x rule: binding.strong_induct) (simp_all add: tyvrs_of.simps eqvts)
+
+lemma domains_eqvt[eqvt]:
fixes pi::"tyvrs prm"
and pi'::"vrs prm"
- shows "pi\<bullet>(domain \<Gamma>) = domain (pi\<bullet>\<Gamma>)"
- and "pi'\<bullet>(domain \<Gamma>) = domain (pi'\<bullet>\<Gamma>)"
- by (induct \<Gamma>) (simp_all add: eqvts)
+ shows "pi \<bullet>(ty_domain \<Gamma>) = ty_domain (pi\<bullet>\<Gamma>)"
+ and "pi'\<bullet>(ty_domain \<Gamma>) = ty_domain (pi'\<bullet>\<Gamma>)"
+ and "pi \<bullet>(trm_domain \<Gamma>) = trm_domain (pi\<bullet>\<Gamma>)"
+ and "pi'\<bullet>(trm_domain \<Gamma>) = trm_domain (pi'\<bullet>\<Gamma>)"
+by (induct \<Gamma>) (simp_all add: eqvts)
+
+lemma finite_vrs:
+ shows "finite (tyvrs_of x)"
+ and "finite (vrs_of x)"
+by (nominal_induct rule:binding.strong_induct, auto)
+
+lemma finite_domains:
+ shows "finite (ty_domain \<Gamma>)"
+ and "finite (trm_domain \<Gamma>)"
+by (induct \<Gamma>, auto simp add: finite_vrs)
+
+lemma ty_domain_supp:
+ shows "(supp (ty_domain \<Gamma>)) = (ty_domain \<Gamma>)"
+ and "(supp (trm_domain \<Gamma>)) = (trm_domain \<Gamma>)"
+by (simp only: at_fin_set_supp at_tyvrs_inst at_vrs_inst finite_domains)+
-lemma finite_domain:
- shows "finite (domain \<Gamma>)"
+lemma ty_domain_inclusion:
+ assumes a: "(TVarB X T)\<in>set \<Gamma>"
+ shows "X\<in>(ty_domain \<Gamma>)"
+using a by (induct \<Gamma>, auto)
+
+lemma ty_binding_existence:
+ assumes "X \<in> (tyvrs_of a)"
+ shows "\<exists>T.(TVarB X T=a)"
+ using assms
+by (nominal_induct a rule: binding.strong_induct, auto)
+
+lemma ty_domain_existence:
+ assumes a: "X\<in>(ty_domain \<Gamma>)"
+ shows "\<exists>T.(TVarB X T)\<in>set \<Gamma>"
+ using a
+ apply (induct \<Gamma>, auto)
+ apply (subgoal_tac "\<exists>T.(TVarB X T=a)")
+ apply (auto)
+ apply (auto simp add: ty_binding_existence)
+done
+
+lemma domains_append:
+ shows "ty_domain (\<Gamma>@\<Delta>) = ((ty_domain \<Gamma>) \<union> (ty_domain \<Delta>))"
+ and "trm_domain (\<Gamma>@\<Delta>) = ((trm_domain \<Gamma>) \<union> (trm_domain \<Delta>))"
by (induct \<Gamma>, auto)
-lemma domain_supp:
- shows "(supp (domain \<Gamma>)) = (domain \<Gamma>)"
- by (simp only: at_fin_set_supp at_tyvrs_inst finite_domain)
-
-lemma domain_inclusion:
- assumes a: "(X,T)\<in>set \<Gamma>"
- shows "X\<in>(domain \<Gamma>)"
- using a by (induct \<Gamma>, auto)
+lemma ty_vrs_prm_simp:
+ fixes pi::"vrs prm"
+ and S::"ty"
+ shows "pi\<bullet>S = S"
+by (induct S rule: ty.induct) (auto simp add: calc_atm)
-lemma domain_existence:
- assumes a: "X\<in>(domain \<Gamma>)"
- shows "\<exists>T.(X,T)\<in>set \<Gamma>"
- using a by (induct \<Gamma>, auto)
+lemma fresh_ty_domain_cons:
+ fixes X::"tyvrs"
+ shows "X\<sharp>(ty_domain (Y#\<Gamma>)) = (X\<sharp>(tyvrs_of Y) \<and> X\<sharp>(ty_domain \<Gamma>))"
+ apply (nominal_induct rule:binding.strong_induct)
+ apply (auto)
+ apply (simp add: fresh_def supp_def eqvts)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)
+ apply (simp add: fresh_def supp_def eqvts)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)+
+ done
-lemma domain_append:
- shows "domain (\<Gamma>@\<Delta>) = ((domain \<Gamma>) \<union> (domain \<Delta>))"
- by (induct \<Gamma>, auto)
-
-lemma fresh_domain_cons:
- fixes X::"tyvrs"
- shows "X\<sharp>(domain (Y#\<Gamma>)) = (X\<sharp>(fst Y) \<and> X\<sharp>(domain \<Gamma>))"
- by (simp add: fresh_fin_insert pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst finite_domain)
+lemma tyvrs_fresh:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> a"
+ shows "X \<sharp> tyvrs_of a"
+ and "X \<sharp> vrs_of a"
+ using assms
+ apply (nominal_induct a rule:binding.strong_induct)
+ apply (auto)
+ apply (fresh_guess)+
+done
lemma fresh_domain:
fixes X::"tyvrs"
assumes a: "X\<sharp>\<Gamma>"
- shows "X\<sharp>(domain \<Gamma>)"
+ shows "X\<sharp>(ty_domain \<Gamma>)"
using a
apply(induct \<Gamma>)
apply(simp add: fresh_set_empty)
-apply(simp only: fresh_domain_cons)
-apply(auto simp add: fresh_prod fresh_list_cons)
+apply(simp only: fresh_ty_domain_cons)
+apply(auto simp add: fresh_prod fresh_list_cons tyvrs_fresh)
done
-text {* Not all lists of type @{typ "ty_context"} are well-formed. One condition
- requires that in @{term "(X,S)#\<Gamma>"} all free variables of @{term "S"} must be
- in the @{term "domain"} of @{term "\<Gamma>"}, that is @{term "S"} must be @{text "closed"}
+text {* Not all lists of type @{typ "env"} are well-formed. One condition
+ requires that in @{term "TVarB X S#\<Gamma>"} all free variables of @{term "S"} must be
+ in the @{term "ty_domain"} of @{term "\<Gamma>"}, that is @{term "S"} must be @{text "closed"}
in @{term "\<Gamma>"}. The set of free variables of @{term "S"} is the
@{text "support"} of @{term "S"}. *}
constdefs
- "closed_in" :: "ty \<Rightarrow> ty_context \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100)
- "S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(domain \<Gamma>)"
+ "closed_in" :: "ty \<Rightarrow> env \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100)
+ "S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(ty_domain \<Gamma>)"
lemma closed_in_eqvt[eqvt]:
fixes pi::"tyvrs prm"
@@ -150,80 +237,148 @@
then show "(pi\<bullet>S) closed_in (pi\<bullet>\<Gamma>)" by (simp add: closed_in_def eqvts)
qed
-lemma ty_vrs_prm_simp:
+lemma tyvrs_vrs_prm_simp:
fixes pi::"vrs prm"
- and S::"ty"
- shows "pi\<bullet>S = S"
-by (induct S rule: ty.induct) (auto simp add: calc_atm)
+ shows "tyvrs_of (pi\<bullet>a) = tyvrs_of a"
+ apply (nominal_induct rule:binding.strong_induct)
+ apply (simp_all add: eqvts)
+ apply (simp add: dj_perm_forget[OF dj_tyvrs_vrs])
+ done
-lemma ty_context_vrs_prm_simp:
+lemma ty_vrs_fresh[fresh]:
+ fixes x::"vrs"
+ and T::"ty"
+ shows "x \<sharp> T"
+by (simp add: fresh_def supp_def ty_vrs_prm_simp)
+
+lemma ty_domain_vrs_prm_simp:
fixes pi::"vrs prm"
- and \<Gamma>::"ty_context"
- shows "pi\<bullet>\<Gamma> = \<Gamma>"
-by (induct \<Gamma>)
- (auto simp add: calc_atm ty_vrs_prm_simp)
+ and \<Gamma>::"env"
+ shows "(ty_domain (pi\<bullet>\<Gamma>)) = (ty_domain \<Gamma>)"
+ apply(induct \<Gamma>)
+ apply (simp add: eqvts)
+ apply(simp add: tyvrs_vrs_prm_simp)
+done
lemma closed_in_eqvt'[eqvt]:
fixes pi::"vrs prm"
assumes a: "S closed_in \<Gamma>"
shows "(pi\<bullet>S) closed_in (pi\<bullet>\<Gamma>)"
using a
-by (simp add: ty_vrs_prm_simp ty_context_vrs_prm_simp)
+by (simp add: closed_in_def ty_domain_vrs_prm_simp ty_vrs_prm_simp)
+
+lemma fresh_vrs_of:
+ fixes x::"vrs"
+ shows "x\<sharp>vrs_of b = x\<sharp>b"
+ by (nominal_induct b rule: binding.strong_induct)
+ (simp_all add: fresh_singleton [OF pt_vrs_inst at_vrs_inst] fresh_set_empty ty_vrs_fresh fresh_atm)
+
+lemma fresh_trm_domain:
+ fixes x::"vrs"
+ shows "x\<sharp> trm_domain \<Gamma> = x\<sharp>\<Gamma>"
+ by (induct \<Gamma>)
+ (simp_all add: fresh_set_empty fresh_list_cons
+ fresh_fin_union [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_domains finite_vrs fresh_vrs_of fresh_list_nil)
+
+lemma closed_in_fresh: "(X::tyvrs) \<sharp> ty_domain \<Gamma> \<Longrightarrow> T closed_in \<Gamma> \<Longrightarrow> X \<sharp> T"
+ by (auto simp add: closed_in_def fresh_def ty_domain_supp)
text {* Now validity of a context is a straightforward inductive definition. *}
-inductive
- valid_rel :: "ty_context \<Rightarrow> bool" ("\<turnstile> _ ok" [100] 100)
+inductive
+ valid_rel :: "env \<Rightarrow> bool" ("\<turnstile> _ ok" [100] 100)
where
- valid_nil[simp]: "\<turnstile> [] ok"
-| valid_cons[simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; X\<sharp>(domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> ((X,T)#\<Gamma>) ok"
+ valid_nil[simp]: "\<turnstile> [] ok"
+| valid_consT[simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; X\<sharp>(ty_domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> (TVarB X T#\<Gamma>) ok"
+| valid_cons [simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; x\<sharp>(trm_domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> (VarB x T#\<Gamma>) ok"
equivariance valid_rel
-lemma validE:
- assumes a: "\<turnstile> ((X,T)#\<Gamma>) ok"
- shows "\<turnstile> \<Gamma> ok \<and> X\<sharp>(domain \<Gamma>) \<and> T closed_in \<Gamma>"
-using a by (cases, auto)
+declare binding.inject [simp add]
+declare trm.inject [simp add]
+
+inductive_cases validE[elim]: "\<turnstile> (TVarB X T#\<Gamma>) ok" "\<turnstile> (VarB x T#\<Gamma>) ok" "\<turnstile> (b#\<Gamma>) ok"
+
+declare binding.inject [simp del]
+declare trm.inject [simp del]
lemma validE_append:
assumes a: "\<turnstile> (\<Delta>@\<Gamma>) ok"
shows "\<turnstile> \<Gamma> ok"
- using a by (induct \<Delta>, auto dest: validE)
+ using a
+proof (induct \<Delta>)
+ case (Cons a \<Gamma>')
+ then show ?case
+ by (nominal_induct a rule:binding.strong_induct)
+ (auto elim: validE)
+qed (auto)
lemma replace_type:
- assumes a: "\<turnstile> (\<Delta>@(X,T)#\<Gamma>) ok"
+ assumes a: "\<turnstile> (\<Delta>@(TVarB X T)#\<Gamma>) ok"
and b: "S closed_in \<Gamma>"
- shows "\<turnstile> (\<Delta>@(X,S)#\<Gamma>) ok"
+ shows "\<turnstile> (\<Delta>@(TVarB X S)#\<Gamma>) ok"
using a b
-apply(induct \<Delta>)
-apply(auto dest!: validE intro!: valid_cons simp add: domain_append closed_in_def)
-done
+proof(induct \<Delta>)
+ case Nil
+ then show ?case by (auto elim: validE intro: valid_cons simp add: domains_append closed_in_def)
+next
+ case (Cons a \<Gamma>')
+ then show ?case
+ by (nominal_induct a rule:binding.strong_induct)
+ (auto elim: validE intro!: valid_cons simp add: domains_append closed_in_def)
+qed
text {* Well-formed contexts have a unique type-binding for a type-variable. *}
lemma uniqueness_of_ctxt:
- fixes \<Gamma>::"ty_context"
+ fixes \<Gamma>::"env"
assumes a: "\<turnstile> \<Gamma> ok"
- and b: "(X,T)\<in>set \<Gamma>"
- and c: "(X,S)\<in>set \<Gamma>"
+ and b: "(TVarB X T)\<in>set \<Gamma>"
+ and c: "(TVarB X S)\<in>set \<Gamma>"
shows "T=S"
using a b c
proof (induct)
- case valid_nil thus "T=S" by simp
-next
- case valid_cons
+ case (valid_consT \<Gamma> X' T')
moreover
- { fix \<Gamma>::"ty_context"
- assume a: "X\<sharp>(domain \<Gamma>)"
- have "\<not>(\<exists>T.(X,T)\<in>(set \<Gamma>))" using a
- proof (induct \<Gamma>)
- case (Cons Y \<Gamma>)
- thus "\<not> (\<exists>T.(X,T)\<in>set(Y#\<Gamma>))"
- by (simp only: fresh_domain_cons, auto simp add: fresh_atm)
+ { fix \<Gamma>'::"env"
+ assume a: "X'\<sharp>(ty_domain \<Gamma>')"
+ have "\<not>(\<exists>T.(TVarB X' T)\<in>(set \<Gamma>'))" using a
+ proof (induct \<Gamma>')
+ case (Cons Y \<Gamma>')
+ thus "\<not> (\<exists>T.(TVarB X' T)\<in>set(Y#\<Gamma>'))"
+ by (simp add: fresh_ty_domain_cons
+ fresh_fin_union[OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ finite_vrs finite_domains,
+ auto simp add: fresh_atm fresh_singleton [OF pt_tyvrs_inst at_tyvrs_inst])
qed (simp)
}
- ultimately show "T=S" by auto
-qed
+ ultimately show "T=S" by (auto simp add: binding.inject)
+qed (auto)
+
+lemma uniqueness_of_ctxt':
+ fixes \<Gamma>::"env"
+ assumes a: "\<turnstile> \<Gamma> ok"
+ and b: "(VarB x T)\<in>set \<Gamma>"
+ and c: "(VarB x S)\<in>set \<Gamma>"
+ shows "T=S"
+using a b c
+proof (induct)
+ case (valid_cons \<Gamma> x' T')
+ moreover
+ { fix \<Gamma>'::"env"
+ assume a: "x'\<sharp>(trm_domain \<Gamma>')"
+ have "\<not>(\<exists>T.(VarB x' T)\<in>(set \<Gamma>'))" using a
+ proof (induct \<Gamma>')
+ case (Cons y \<Gamma>')
+ thus "\<not> (\<exists>T.(VarB x' T)\<in>set(y#\<Gamma>'))"
+ by (simp add: fresh_fin_union[OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_vrs finite_domains,
+ auto simp add: fresh_atm fresh_singleton [OF pt_vrs_inst at_vrs_inst])
+ qed (simp)
+ }
+ ultimately show "T=S" by (auto simp add: binding.inject)
+qed (auto)
section {* Size and Capture-Avoiding Substitution for Types *}
@@ -233,7 +388,7 @@
"size_ty (Tvar X) = 1"
| "size_ty (Top) = 1"
| "size_ty (T1 \<rightarrow> T2) = (size_ty T1) + (size_ty T2) + 1"
-| "X\<sharp>T1 \<Longrightarrow> size_ty (\<forall>[X<:T1].T2) = (size_ty T1) + (size_ty T2) + 1"
+| "X \<sharp> T1 \<Longrightarrow> size_ty (\<forall>X<:T1. T2) = (size_ty T1) + (size_ty T2) + 1"
apply (finite_guess)+
apply (rule TrueI)+
apply (simp add: fresh_nat)
@@ -241,24 +396,195 @@
done
nominal_primrec
- subst_ty :: "ty \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty" ("_[_:=_]\<^isub>t\<^isub>y" [100,100,100] 100)
+ subst_ty :: "ty \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty" ("_[_ \<mapsto> _]\<^sub>\<tau>" [300, 0, 0] 300)
where
- "(Tvar X)[Y:=T]\<^isub>t\<^isub>y= (if X=Y then T else (Tvar X))"
-| "(Top)[Y:=T]\<^isub>t\<^isub>y = Top"
-| "(T\<^isub>1 \<rightarrow> T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (T\<^isub>1[Y:=T]\<^isub>t\<^isub>y) \<rightarrow> (T\<^isub>2[Y:=T]\<^isub>t\<^isub>y)"
-| "\<lbrakk>X\<sharp>(Y,T); X\<sharp>T\<^isub>1\<rbrakk> \<Longrightarrow> (\<forall>[X<:T\<^isub>1].T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (\<forall>[X<:(T\<^isub>1[Y:=T]\<^isub>t\<^isub>y)].(T\<^isub>2[Y:=T]\<^isub>t\<^isub>y))"
+ "(Tvar X)[Y \<mapsto> T]\<^sub>\<tau> = (if X=Y then T else Tvar X)"
+| "(Top)[Y \<mapsto> T]\<^sub>\<tau> = Top"
+| "(T\<^isub>1 \<rightarrow> T\<^isub>2)[Y \<mapsto> T]\<^sub>\<tau> = T\<^isub>1[Y \<mapsto> T]\<^sub>\<tau> \<rightarrow> T\<^isub>2[Y \<mapsto> T]\<^sub>\<tau>"
+| "\<lbrakk>X\<sharp>(Y,T); X\<sharp>T\<^isub>1\<rbrakk> \<Longrightarrow> (\<forall>X<:T\<^isub>1. T\<^isub>2)[Y \<mapsto> T]\<^sub>\<tau> = (\<forall>X<:T\<^isub>1[Y \<mapsto> T]\<^sub>\<tau>. T\<^isub>2[Y \<mapsto> T]\<^sub>\<tau>)"
apply (finite_guess)+
apply (rule TrueI)+
apply (simp add: abs_fresh)
apply (fresh_guess)+
done
+lemma subst_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and T::"ty"
+ shows "pi\<bullet>(T[X \<mapsto> T']\<^sub>\<tau>) = (pi\<bullet>T)[(pi\<bullet>X) \<mapsto> (pi\<bullet>T')]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (perm_simp add: fresh_bij)+
+
+lemma subst_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and T::"ty"
+ shows "pi\<bullet>(T[X \<mapsto> T']\<^sub>\<tau>) = (pi\<bullet>T)[(pi\<bullet>X) \<mapsto> (pi\<bullet>T')]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma type_subst_fresh[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> T" and "X \<sharp> P"
+ shows "X \<sharp> T[Y \<mapsto> P]\<^sub>\<tau>"
+using assms
+by (nominal_induct T avoiding: X Y P rule:ty.strong_induct)
+ (auto simp add: abs_fresh)
+
+lemma fresh_type_subst_fresh[fresh]:
+ assumes "X\<sharp>T'"
+ shows "X\<sharp>T[X \<mapsto> T']\<^sub>\<tau>"
+using assms
+by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (auto simp add: fresh_atm abs_fresh fresh_nat)
+
+lemma type_subst_identity: "X \<sharp> T \<Longrightarrow> T[X \<mapsto> U]\<^sub>\<tau> = T"
+ by (nominal_induct T avoiding: X U rule: ty.strong_induct)
+ (simp_all add: fresh_atm abs_fresh)
+
+lemma type_substitution_lemma:
+ "X \<noteq> Y \<Longrightarrow> X \<sharp> L \<Longrightarrow> M[X \<mapsto> N]\<^sub>\<tau>[Y \<mapsto> L]\<^sub>\<tau> = M[Y \<mapsto> L]\<^sub>\<tau>[X \<mapsto> N[Y \<mapsto> L]\<^sub>\<tau>]\<^sub>\<tau>"
+ by (nominal_induct M avoiding: X Y N L rule: ty.strong_induct)
+ (auto simp add: type_subst_fresh type_subst_identity)
+
+lemma type_subst_rename:
+ "Y \<sharp> T \<Longrightarrow> ([(Y, X)] \<bullet> T)[Y \<mapsto> U]\<^sub>\<tau> = T[X \<mapsto> U]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X Y U rule: ty.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux)
+
+nominal_primrec
+ subst_tyb :: "binding \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> binding" ("_[_ \<mapsto> _]\<^sub>b" [100,100,100] 100)
+where
+ "(TVarB X U)[Y \<mapsto> T]\<^sub>b = TVarB X (U[Y \<mapsto> T]\<^sub>\<tau>)"
+| "(VarB X U)[Y \<mapsto> T]\<^sub>b = VarB X (U[Y \<mapsto> T]\<^sub>\<tau>)"
+by auto
+
+lemma binding_subst_fresh[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> a"
+ and "X \<sharp> P"
+ shows "X \<sharp> a[Y \<mapsto> P]\<^sub>b"
+using assms
+by (nominal_induct a rule:binding.strong_induct)
+ (auto simp add: freshs)
+
+lemma binding_subst_identity: "X \<sharp> B \<Longrightarrow> B[X \<mapsto> U]\<^sub>b = B"
+ by (induct B rule: binding.induct)
+ (simp_all add: fresh_atm type_subst_identity)
+
consts
- subst_tyc :: "ty_context \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty_context" ("_[_:=_]\<^isub>t\<^isub>y\<^isub>c" [100,100,100] 100)
+ subst_tyc :: "env \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> env" ("_[_ \<mapsto> _]\<^sub>e" [100,100,100] 100)
+
primrec
-"([])[Y:=T]\<^isub>t\<^isub>y\<^isub>c= []"
-"(XT#\<Gamma>)[Y:=T]\<^isub>t\<^isub>y\<^isub>c = (fst XT,(snd XT)[Y:=T]\<^isub>t\<^isub>y)#(\<Gamma>[Y:=T]\<^isub>t\<^isub>y\<^isub>c)"
-
+"([])[Y \<mapsto> T]\<^sub>e= []"
+"(B#\<Gamma>)[Y \<mapsto> T]\<^sub>e = (B[Y \<mapsto> T]\<^sub>b)#(\<Gamma>[Y \<mapsto> T]\<^sub>e)"
+
+lemma ctxt_subst_fresh'[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> \<Gamma>"
+ and "X \<sharp> P"
+ shows "X \<sharp> \<Gamma>[Y \<mapsto> P]\<^sub>e"
+using assms
+by (induct \<Gamma>)
+ (auto simp add: fresh_list_cons freshs)
+
+lemma ctxt_subst_mem_TVarB: "TVarB X T \<in> set \<Gamma> \<Longrightarrow> TVarB X (T[Y \<mapsto> U]\<^sub>\<tau>) \<in> set (\<Gamma>[Y \<mapsto> U]\<^sub>e)"
+ by (induct \<Gamma>) auto
+
+lemma ctxt_subst_mem_VarB: "VarB x T \<in> set \<Gamma> \<Longrightarrow> VarB x (T[Y \<mapsto> U]\<^sub>\<tau>) \<in> set (\<Gamma>[Y \<mapsto> U]\<^sub>e)"
+ by (induct \<Gamma>) auto
+
+lemma ctxt_subst_identity: "X \<sharp> \<Gamma> \<Longrightarrow> \<Gamma>[X \<mapsto> U]\<^sub>e = \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: fresh_list_cons binding_subst_identity)
+
+lemma ctxt_subst_append: "(\<Delta> @ \<Gamma>)[X \<mapsto> T]\<^sub>e = \<Delta>[X \<mapsto> T]\<^sub>e @ \<Gamma>[X \<mapsto> T]\<^sub>e"
+ by (induct \<Delta>) simp_all
+
+nominal_primrec
+ subst_trm :: "trm \<Rightarrow> vrs \<Rightarrow> trm \<Rightarrow> trm" ("_[_ \<mapsto> _]" [300, 0, 0] 300)
+where
+ "(Var x)[y \<mapsto> t'] = (if x=y then t' else (Var x))"
+| "(t1 \<cdot> t2)[y \<mapsto> t'] = t1[y \<mapsto> t'] \<cdot> t2[y \<mapsto> t']"
+| "(t \<cdot>\<^sub>\<tau> T)[y \<mapsto> t'] = t[y \<mapsto> t'] \<cdot>\<^sub>\<tau> T"
+| "X\<sharp>(T,t') \<Longrightarrow> (\<lambda>X<:T. t)[y \<mapsto> t'] = (\<lambda>X<:T. t[y \<mapsto> t'])"
+| "x\<sharp>(y,t') \<Longrightarrow> (\<lambda>x:T. t)[y \<mapsto> t'] = (\<lambda>x:T. t[y \<mapsto> t'])"
+apply(finite_guess)+
+apply(rule TrueI)+
+apply(simp add: abs_fresh)+
+apply(fresh_guess add: ty_vrs_fresh abs_fresh)+
+done
+
+lemma subst_trm_fresh_tyvar:
+ "(X::tyvrs) \<sharp> t \<Longrightarrow> X \<sharp> u \<Longrightarrow> X \<sharp> t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (auto simp add: trm.fresh abs_fresh)
+
+lemma subst_trm_fresh_var: "x \<sharp> u \<Longrightarrow> x \<sharp> t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (simp_all add: abs_fresh fresh_atm ty_vrs_fresh)
+
+lemma subst_trm_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[x \<mapsto> u]) = (pi\<bullet>t)[(pi\<bullet>x) \<mapsto> (pi\<bullet>u)]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma subst_trm_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[x \<mapsto> u]) = (pi\<bullet>t)[(pi\<bullet>x) \<mapsto> (pi\<bullet>u)]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma subst_trm_rename:
+ "y \<sharp> t \<Longrightarrow> ([(y, x)] \<bullet> t)[y \<mapsto> u] = t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x y u rule: trm.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux ty_vrs_fresh perm_fresh_fresh)
+
+nominal_primrec (freshness_context: "T2::ty")
+ subst_trm_ty :: "trm \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> trm" ("_[_ \<mapsto>\<^sub>\<tau> _]" [300, 0, 0] 300)
+where
+ "(Var x)[Y \<mapsto>\<^sub>\<tau> T2] = Var x"
+| "(t1 \<cdot> t2)[Y \<mapsto>\<^sub>\<tau> T2] = t1[Y \<mapsto>\<^sub>\<tau> T2] \<cdot> t2[Y \<mapsto>\<^sub>\<tau> T2]"
+| "(t1 \<cdot>\<^sub>\<tau> T)[Y \<mapsto>\<^sub>\<tau> T2] = t1[Y \<mapsto>\<^sub>\<tau> T2] \<cdot>\<^sub>\<tau> T[Y \<mapsto> T2]\<^sub>\<tau>"
+| "X\<sharp>(Y,T,T2) \<Longrightarrow> (\<lambda>X<:T. t)[Y \<mapsto>\<^sub>\<tau> T2] = (\<lambda>X<:T[Y \<mapsto> T2]\<^sub>\<tau>. t[Y \<mapsto>\<^sub>\<tau> T2])"
+| "(\<lambda>x:T. t)[Y \<mapsto>\<^sub>\<tau> T2] = (\<lambda>x:T[Y \<mapsto> T2]\<^sub>\<tau>. t[Y \<mapsto>\<^sub>\<tau> T2])"
+apply(finite_guess)+
+apply(rule TrueI)+
+apply(simp add: abs_fresh ty_vrs_fresh)+
+apply(simp add: type_subst_fresh)
+apply(fresh_guess add: ty_vrs_fresh abs_fresh)+
+done
+
+lemma subst_trm_ty_fresh:
+ "(X::tyvrs) \<sharp> t \<Longrightarrow> X \<sharp> T \<Longrightarrow> X \<sharp> t[Y \<mapsto>\<^sub>\<tau> T]"
+ by (nominal_induct t avoiding: Y T rule: trm.strong_induct)
+ (auto simp add: abs_fresh type_subst_fresh)
+
+lemma subst_trm_ty_fresh':
+ "X \<sharp> T \<Longrightarrow> X \<sharp> t[X \<mapsto>\<^sub>\<tau> T]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (simp_all add: abs_fresh fresh_type_subst_fresh fresh_atm)
+
+lemma subst_trm_ty_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[X \<mapsto>\<^sub>\<tau> T]) = (pi\<bullet>t)[(pi\<bullet>X) \<mapsto>\<^sub>\<tau> (pi\<bullet>T)]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (perm_simp add: fresh_bij subst_eqvt)+
+
+lemma subst_trm_ty_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[X \<mapsto>\<^sub>\<tau> T]) = (pi\<bullet>t)[(pi\<bullet>X) \<mapsto>\<^sub>\<tau> (pi\<bullet>T)]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (perm_simp add: fresh_left subst_eqvt')+
+
+lemma subst_trm_ty_rename:
+ "Y \<sharp> t \<Longrightarrow> ([(Y, X)] \<bullet> t)[Y \<mapsto>\<^sub>\<tau> U] = t[X \<mapsto>\<^sub>\<tau> U]"
+ by (nominal_induct t avoiding: X Y U rule: trm.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux type_subst_rename)
+
section {* Subtyping-Relation *}
text {* The definition for the subtyping-relation follows quite closely what is written
@@ -269,13 +595,13 @@
$\alpha$-equivalence classes.) *}
inductive
- subtype_of :: "ty_context \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool" ("_\<turnstile>_<:_" [100,100,100] 100)
+ subtype_of :: "env \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool" ("_\<turnstile>_<:_" [100,100,100] 100)
where
- S_Top[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; S closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> S <: Top"
-| S_Var[intro]: "\<lbrakk>(X,S) \<in> set \<Gamma>; \<Gamma> \<turnstile> S <: T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (Tvar X) <: T"
-| S_Refl[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; X \<in> domain \<Gamma>\<rbrakk>\<Longrightarrow> \<Gamma> \<turnstile> Tvar X <: Tvar X"
-| S_Arrow[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (S\<^isub>1 \<rightarrow> S\<^isub>2) <: (T\<^isub>1 \<rightarrow> T\<^isub>2)"
-| S_Forall[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; X\<sharp>\<Gamma>; ((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"
+ SA_Top[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; S closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> S <: Top"
+| SA_refl_TVar[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; X \<in> ty_domain \<Gamma>\<rbrakk>\<Longrightarrow> \<Gamma> \<turnstile> Tvar X <: Tvar X"
+| SA_trans_TVar[intro]: "\<lbrakk>(TVarB X S) \<in> set \<Gamma>; \<Gamma> \<turnstile> S <: T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (Tvar X) <: T"
+| SA_arrow[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (S\<^isub>1 \<rightarrow> S\<^isub>2) <: (T\<^isub>1 \<rightarrow> T\<^isub>2)"
+| SA_all[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; ((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"
lemma subtype_implies_ok:
fixes X::"tyvrs"
@@ -288,15 +614,15 @@
shows "S closed_in \<Gamma> \<and> T closed_in \<Gamma>"
using a
proof (induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
have "Top closed_in \<Gamma>" by (simp add: closed_in_def ty.supp)
moreover
have "S closed_in \<Gamma>" by fact
ultimately show "S closed_in \<Gamma> \<and> Top closed_in \<Gamma>" by simp
next
- case (S_Var X S \<Gamma> T)
- have "(X,S)\<in>set \<Gamma>" by fact
- hence "X \<in> domain \<Gamma>" by (rule domain_inclusion)
+ case (SA_trans_TVar X S \<Gamma> T)
+ have "(TVarB X S)\<in>set \<Gamma>" by fact
+ hence "X \<in> ty_domain \<Gamma>" by (rule ty_domain_inclusion)
hence "(Tvar X) closed_in \<Gamma>" by (simp add: closed_in_def ty.supp supp_atm)
moreover
have "S closed_in \<Gamma> \<and> T closed_in \<Gamma>" by fact
@@ -311,20 +637,33 @@
shows "X\<sharp>S \<and> X\<sharp>T"
proof -
from a1 have "\<turnstile> \<Gamma> ok" by (rule subtype_implies_ok)
- with a2 have "X\<sharp>domain(\<Gamma>)" by (simp add: fresh_domain)
+ with a2 have "X\<sharp>ty_domain(\<Gamma>)" by (simp add: fresh_domain)
moreover
from a1 have "S closed_in \<Gamma> \<and> T closed_in \<Gamma>" by (rule subtype_implies_closed)
- hence "supp S \<subseteq> ((supp (domain \<Gamma>))::tyvrs set)"
- and "supp T \<subseteq> ((supp (domain \<Gamma>))::tyvrs set)" by (simp_all add: domain_supp closed_in_def)
+ hence "supp S \<subseteq> ((supp (ty_domain \<Gamma>))::tyvrs set)"
+ and "supp T \<subseteq> ((supp (ty_domain \<Gamma>))::tyvrs set)" by (simp_all add: ty_domain_supp closed_in_def)
ultimately show "X\<sharp>S \<and> X\<sharp>T" by (force simp add: supp_prod fresh_def)
qed
+lemma valid_ty_domain_fresh:
+ fixes X::"tyvrs"
+ assumes valid: "\<turnstile> \<Gamma> ok"
+ shows "X\<sharp>(ty_domain \<Gamma>) = X\<sharp>\<Gamma>"
+ using valid
+ apply induct
+ apply (simp add: fresh_list_nil fresh_set_empty)
+ apply (simp_all add: binding.fresh fresh_list_cons
+ fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains fresh_atm)
+ apply (auto simp add: closed_in_fresh)
+ done
+
equivariance subtype_of
-nominal_inductive subtype_of
- by (simp_all add: abs_fresh subtype_implies_fresh)
-
-thm subtype_of.strong_induct
+nominal_inductive subtype_of
+ apply (simp_all add: abs_fresh)
+ apply (fastsimp simp add: valid_ty_domain_fresh dest: subtype_implies_ok)
+ apply (force simp add: closed_in_fresh dest: subtype_implies_closed subtype_implies_ok)+
+ done
section {* Reflexivity of Subtyping *}
@@ -338,17 +677,17 @@
have ih_T\<^isub>1: "\<And>\<Gamma>. \<lbrakk>\<turnstile> \<Gamma> ok; T\<^isub>1 closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> T\<^isub>1 <: T\<^isub>1" by fact
have ih_T\<^isub>2: "\<And>\<Gamma>. \<lbrakk>\<turnstile> \<Gamma> ok; T\<^isub>2 closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>2" by fact
have fresh_cond: "X\<sharp>\<Gamma>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Gamma>)" by (simp add: fresh_domain)
- have "(\<forall>[X<:T\<^isub>2].T\<^isub>1) closed_in \<Gamma>" by fact
- hence closed\<^isub>T\<^isub>2: "T\<^isub>2 closed_in \<Gamma>" and closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in ((X,T\<^isub>2)#\<Gamma>)"
+ hence fresh_ty_domain: "X\<sharp>(ty_domain \<Gamma>)" by (simp add: fresh_domain)
+ have "(\<forall>X<:T\<^isub>2. T\<^isub>1) closed_in \<Gamma>" by fact
+ hence closed\<^isub>T\<^isub>2: "T\<^isub>2 closed_in \<Gamma>" and closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in ((TVarB X T\<^isub>2)#\<Gamma>)"
by (auto simp add: closed_in_def ty.supp abs_supp)
have ok: "\<turnstile> \<Gamma> ok" by fact
- hence ok': "\<turnstile> ((X,T\<^isub>2)#\<Gamma>) ok" using closed\<^isub>T\<^isub>2 fresh_domain by simp
+ hence ok': "\<turnstile> ((TVarB X T\<^isub>2)#\<Gamma>) ok" using closed\<^isub>T\<^isub>2 fresh_ty_domain by simp
have "\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>2" using ih_T\<^isub>2 closed\<^isub>T\<^isub>2 ok by simp
moreover
- have "((X,T\<^isub>2)#\<Gamma>) \<turnstile> T\<^isub>1 <: T\<^isub>1" using ih_T\<^isub>1 closed\<^isub>T\<^isub>1 ok' by simp
- ultimately show "\<Gamma> \<turnstile> \<forall>[X<:T\<^isub>2].T\<^isub>1 <: \<forall>[X<:T\<^isub>2].T\<^isub>1" using fresh_cond
- by (simp add: subtype_of.S_Forall)
+ have "((TVarB X T\<^isub>2)#\<Gamma>) \<turnstile> T\<^isub>1 <: T\<^isub>1" using ih_T\<^isub>1 closed\<^isub>T\<^isub>1 ok' by simp
+ ultimately show "\<Gamma> \<turnstile> (\<forall>X<:T\<^isub>2. T\<^isub>1) <: (\<forall>X<:T\<^isub>2. T\<^isub>1)" using fresh_cond
+ by (simp add: subtype_of.SA_all)
qed (auto simp add: closed_in_def ty.supp supp_atm)
lemma subtype_reflexivity_semiautomated:
@@ -361,11 +700,10 @@
--{* Too bad that this instantiation cannot be found automatically by
\isakeyword{auto}; \isakeyword{blast} would find it if we had not used
an explicit definition for @{text "closed_in_def"}. *}
-apply(drule_tac x="(tyvrs, ty2)#\<Gamma>" in meta_spec)
+apply(drule_tac x="(TVarB tyvrs ty2)#\<Gamma>" in meta_spec)
apply(force dest: fresh_domain simp add: closed_in_def)
done
-
section {* Weakening *}
text {* In order to prove weakening we introduce the notion of a type-context extending
@@ -373,16 +711,16 @@
smoother than if we had strictly adhered to the version in the POPLmark-paper. *}
constdefs
- extends :: "ty_context \<Rightarrow> ty_context \<Rightarrow> bool" ("_ extends _" [100,100] 100)
- "\<Delta> extends \<Gamma> \<equiv> \<forall>X Q. (X,Q)\<in>set \<Gamma> \<longrightarrow> (X,Q)\<in>set \<Delta>"
+ extends :: "env \<Rightarrow> env \<Rightarrow> bool" ("_ extends _" [100,100] 100)
+ "\<Delta> extends \<Gamma> \<equiv> \<forall>X Q. (TVarB X Q)\<in>set \<Gamma> \<longrightarrow> (TVarB X Q)\<in>set \<Delta>"
-lemma extends_domain:
+lemma extends_ty_domain:
assumes a: "\<Delta> extends \<Gamma>"
- shows "domain \<Gamma> \<subseteq> domain \<Delta>"
+ shows "ty_domain \<Gamma> \<subseteq> ty_domain \<Delta>"
using a
apply (auto simp add: extends_def)
- apply (drule domain_existence)
- apply (force simp add: domain_inclusion)
+ apply (drule ty_domain_existence)
+ apply (force simp add: ty_domain_inclusion)
done
lemma extends_closed:
@@ -390,12 +728,12 @@
and a2: "\<Delta> extends \<Gamma>"
shows "T closed_in \<Delta>"
using a1 a2
- by (auto dest: extends_domain simp add: closed_in_def)
+ by (auto dest: extends_ty_domain simp add: closed_in_def)
lemma extends_memb:
assumes a: "\<Delta> extends \<Gamma>"
- and b: "(X,T) \<in> set \<Gamma>"
- shows "(X,T) \<in> set \<Delta>"
+ and b: "(TVarB X T) \<in> set \<Gamma>"
+ shows "(TVarB X T) \<in> set \<Delta>"
using a b by (simp add: extends_def)
lemma weakening:
@@ -405,7 +743,7 @@
shows "\<Delta> \<turnstile> S <: T"
using a b c
proof (nominal_induct \<Gamma> S T avoiding: \<Delta> rule: subtype_of.strong_induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
have lh_drv_prem: "S closed_in \<Gamma>" by fact
have "\<turnstile> \<Delta> ok" by fact
moreover
@@ -413,43 +751,43 @@
hence "S closed_in \<Delta>" using lh_drv_prem by (simp only: extends_closed)
ultimately show "\<Delta> \<turnstile> S <: Top" by force
next
- case (S_Var X S \<Gamma> T)
- have lh_drv_prem: "(X,S) \<in> set \<Gamma>" by fact
+ case (SA_trans_TVar X S \<Gamma> T)
+ have lh_drv_prem: "(TVarB X S) \<in> set \<Gamma>" by fact
have ih: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> S <: T" by fact
have ok: "\<turnstile> \<Delta> ok" by fact
have extends: "\<Delta> extends \<Gamma>" by fact
- have "(X,S) \<in> set \<Delta>" using lh_drv_prem extends by (simp only: extends_memb)
+ have "(TVarB X S) \<in> set \<Delta>" using lh_drv_prem extends by (simp only: extends_memb)
moreover
have "\<Delta> \<turnstile> S <: T" using ok extends ih by simp
ultimately show "\<Delta> \<turnstile> Tvar X <: T" using ok by force
next
- case (S_Refl \<Gamma> X)
- have lh_drv_prem: "X \<in> domain \<Gamma>" by fact
+ case (SA_refl_TVar \<Gamma> X)
+ have lh_drv_prem: "X \<in> ty_domain \<Gamma>" by fact
have "\<turnstile> \<Delta> ok" by fact
moreover
have "\<Delta> extends \<Gamma>" by fact
- hence "X \<in> domain \<Delta>" using lh_drv_prem by (force dest: extends_domain)
+ hence "X \<in> ty_domain \<Delta>" using lh_drv_prem by (force dest: extends_ty_domain)
ultimately show "\<Delta> \<turnstile> Tvar X <: Tvar X" by force
next
- case (S_Arrow \<Gamma> T\<^isub>1 S\<^isub>1 S\<^isub>2 T\<^isub>2) thus "\<Delta> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by blast
+ case (SA_arrow \<Gamma> T\<^isub>1 S\<^isub>1 S\<^isub>2 T\<^isub>2) thus "\<Delta> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by blast
next
- case (S_Forall \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
+ case (SA_all \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
have fresh_cond: "X\<sharp>\<Delta>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Delta>)" by (simp add: fresh_domain)
+ hence fresh_domain: "X\<sharp>(ty_domain \<Delta>)" by (simp add: fresh_domain)
have ih\<^isub>1: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
- have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((X,T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
+ have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((TVarB X T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
have lh_drv_prem: "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
hence closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in \<Gamma>" by (simp add: subtype_implies_closed)
have ok: "\<turnstile> \<Delta> ok" by fact
have ext: "\<Delta> extends \<Gamma>" by fact
have "T\<^isub>1 closed_in \<Delta>" using ext closed\<^isub>T\<^isub>1 by (simp only: extends_closed)
- hence "\<turnstile> ((X,T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
+ hence "\<turnstile> ((TVarB X T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
moreover
- have "((X,T\<^isub>1)#\<Delta>) extends ((X,T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
- ultimately have "((X,T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
+ have "((TVarB X T\<^isub>1)#\<Delta>) extends ((TVarB X T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
+ ultimately have "((TVarB X T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
moreover
have "\<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" using ok ext ih\<^isub>1 by simp
- ultimately show "\<Delta> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2" using ok by (force intro: S_Forall)
+ ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" using ok by (force intro: SA_all)
qed
text {* In fact all ``non-binding" cases can be solved automatically: *}
@@ -461,44 +799,41 @@
shows "\<Delta> \<turnstile> S <: T"
using a b c
proof (nominal_induct \<Gamma> S T avoiding: \<Delta> rule: subtype_of.strong_induct)
- case (S_Forall \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
+ case (SA_all \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
have fresh_cond: "X\<sharp>\<Delta>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Delta>)" by (simp add: fresh_domain)
+ hence fresh_domain: "X\<sharp>(ty_domain \<Delta>)" by (simp add: fresh_domain)
have ih\<^isub>1: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
- have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((X,T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
+ have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((TVarB X T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
have lh_drv_prem: "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
hence closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in \<Gamma>" by (simp add: subtype_implies_closed)
have ok: "\<turnstile> \<Delta> ok" by fact
have ext: "\<Delta> extends \<Gamma>" by fact
have "T\<^isub>1 closed_in \<Delta>" using ext closed\<^isub>T\<^isub>1 by (simp only: extends_closed)
- hence "\<turnstile> ((X,T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
+ hence "\<turnstile> ((TVarB X T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
moreover
- have "((X,T\<^isub>1)#\<Delta>) extends ((X,T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
- ultimately have "((X,T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
+ have "((TVarB X T\<^isub>1)#\<Delta>) extends ((TVarB X T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
+ ultimately have "((TVarB X T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
moreover
have "\<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" using ok ext ih\<^isub>1 by simp
- ultimately show "\<Delta> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2" using ok by (force intro: S_Forall)
-qed (blast intro: extends_closed extends_memb dest: extends_domain)+
+ ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" using ok by (force intro: SA_all)
+qed (blast intro: extends_closed extends_memb dest: extends_ty_domain)+
section {* Transitivity and Narrowing *}
text {* Some inversion lemmas that are needed in the transitivity and narrowing proof.*}
-lemma S_TopE:
- assumes a: "\<Gamma> \<turnstile> Top <: T"
- shows "T = Top"
-using a by (cases, auto)
+declare ty.inject [simp add]
-lemma S_ArrowE_left:
- assumes a: "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T"
- shows "T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = T\<^isub>1 \<rightarrow> T\<^isub>2 \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2)"
-using a by (cases, auto simp add: ty.inject)
+inductive_cases S_TopE: "\<Gamma> \<turnstile> Top <: T"
+inductive_cases S_ArrowE_left: "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T"
+
+declare ty.inject [simp del]
lemma S_ForallE_left:
- shows "\<lbrakk>\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T; X\<sharp>\<Gamma>; X\<sharp>S\<^isub>1\<rbrakk>
- \<Longrightarrow> T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = \<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2)"
+ shows "\<lbrakk>\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T; X\<sharp>\<Gamma>; X\<sharp>S\<^isub>1\<rbrakk>
+ \<Longrightarrow> T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = (\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2)"
apply(frule subtype_implies_ok)
- apply(ind_cases "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T")
+ apply(ind_cases "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T")
apply(auto simp add: ty.inject alpha)
apply(rule_tac x="[(X,Xa)]\<bullet>T\<^isub>2" in exI)
apply(rule conjI)
@@ -509,18 +844,20 @@
apply(rule at_ds5[OF at_tyvrs_inst])
apply(rule conjI)
apply(simp add: pt_fresh_left[OF pt_tyvrs_inst, OF at_tyvrs_inst] calc_atm)
- apply(drule_tac \<Gamma>="((Xa,T\<^isub>1)#\<Gamma>)" in subtype_implies_closed)+
+ apply(drule_tac \<Gamma>="((TVarB Xa T\<^isub>1)#\<Gamma>)" in subtype_implies_closed)+
apply(simp add: closed_in_def)
apply(drule fresh_domain)+
apply(simp add: fresh_def)
- apply(subgoal_tac "X \<notin> (insert Xa (domain \<Gamma>))")(*A*)
+ apply(subgoal_tac "X \<notin> (insert Xa (ty_domain \<Gamma>))")(*A*)
apply(force)
- (*A*)apply(simp add: at_fin_set_supp[OF at_tyvrs_inst, OF finite_domain])
+ (*A*)apply(simp add: at_fin_set_supp[OF at_tyvrs_inst, OF finite_domains(1)])
(* 2nd conjunct *)apply(frule_tac X="X" in subtype_implies_fresh)
apply(assumption)
+ apply (frule_tac \<Gamma>="TVarB Xa T\<^isub>1 # \<Gamma>" in subtype_implies_ok)
+ apply (erule validE)
+ apply (simp add: valid_ty_domain_fresh)
apply(drule_tac X="Xa" in subtype_implies_fresh)
apply(assumption)
- apply(simp add: fresh_prod)
apply(drule_tac pi="[(X,Xa)]" in subtype_of.eqvt(2))
apply(simp add: calc_atm)
apply(simp add: pt_fresh_fresh[OF pt_tyvrs_inst, OF at_tyvrs_inst])
@@ -556,8 +893,8 @@
that of @{term x} the property @{term "P y"} holds. *}
lemma
- shows trans: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T"
- and narrow: "(\<Delta>@[(X,Q)]@\<Gamma>)\<turnstile>M<:N \<Longrightarrow> \<Gamma>\<turnstile>P<:Q \<Longrightarrow> (\<Delta>@[(X,P)]@\<Gamma>)\<turnstile>M<:N"
+ shows subtype_transitivity: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T"
+ and subtype_narrow: "(\<Delta>@[(TVarB X Q)]@\<Gamma>)\<turnstile>M<:N \<Longrightarrow> \<Gamma>\<turnstile>P<:Q \<Longrightarrow> (\<Delta>@[(TVarB X P)]@\<Gamma>)\<turnstile>M<:N"
proof (induct Q arbitrary: \<Gamma> S T \<Delta> X P M N taking: "size_ty" rule: measure_induct_rule)
case (less Q)
--{* \begin{minipage}[t]{0.9\textwidth}
@@ -566,8 +903,8 @@
have IH_trans:
"\<And>Q' \<Gamma> S T. \<lbrakk>size_ty Q' < size_ty Q; \<Gamma>\<turnstile>S<:Q'; \<Gamma>\<turnstile>Q'<:T\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>S<:T" by fact
have IH_narrow:
- "\<And>Q' \<Delta> \<Gamma> X M N P. \<lbrakk>size_ty Q' < size_ty Q; (\<Delta>@[(X,Q')]@\<Gamma>)\<turnstile>M<:N; \<Gamma>\<turnstile>P<:Q'\<rbrakk>
- \<Longrightarrow> (\<Delta>@[(X,P)]@\<Gamma>)\<turnstile>M<:N" by fact
+ "\<And>Q' \<Delta> \<Gamma> X M N P. \<lbrakk>size_ty Q' < size_ty Q; (\<Delta>@[(TVarB X Q')]@\<Gamma>)\<turnstile>M<:N; \<Gamma>\<turnstile>P<:Q'\<rbrakk>
+ \<Longrightarrow> (\<Delta>@[(TVarB X P)]@\<Gamma>)\<turnstile>M<:N" by fact
--{* \begin{minipage}[t]{0.9\textwidth}
We proceed with the transitivity proof as an auxiliary lemma, because it needs
to be referenced in the narrowing proof.\end{minipage}*}
@@ -579,37 +916,36 @@
and "\<Gamma>' \<turnstile> Q <: T" --{* right-hand derivation *}
thus "\<Gamma>' \<turnstile> S' <: T"
proof (nominal_induct \<Gamma>' S' Q\<equiv>Q rule: subtype_of.strong_induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> S <: Top"}, giving
us @{term "\<turnstile> \<Gamma> ok"} and @{term "S closed_in \<Gamma>"}. This case is straightforward,
because the right-hand derivation must be of the form @{term "\<Gamma> \<turnstile> Top <: Top"}
giving us the equation @{term "T = Top"}.\end{minipage}*}
hence rh_drv: "\<Gamma> \<turnstile> Top <: T" by simp
- hence T_inst: "T = Top" by (simp add: S_TopE)
- have "\<turnstile> \<Gamma> ok"
- and "S closed_in \<Gamma>" by fact+
- hence "\<Gamma> \<turnstile> S <: Top" by (simp add: subtype_of.S_Top)
+ hence T_inst: "T = Top" by (auto elim: S_TopE)
+ from `\<turnstile> \<Gamma> ok` and `S closed_in \<Gamma>`
+ have "\<Gamma> \<turnstile> S <: Top" by (simp add: subtype_of.SA_Top)
thus "\<Gamma> \<turnstile> S <: T" using T_inst by simp
next
- case (S_Var Y U \<Gamma>)
+ case (SA_trans_TVar Y U \<Gamma>)
-- {* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> Tvar Y <: Q"}
with @{term "S = Tvar Y"}. We have therefore @{term "(Y,U)"}
is in @{term "\<Gamma>"} and by inner induction hypothesis that @{term "\<Gamma> \<turnstile> U <: T"}.
By @{text "S_Var"} follows @{term "\<Gamma> \<turnstile> Tvar Y <: T"}.\end{minipage}*}
hence IH_inner: "\<Gamma> \<turnstile> U <: T" by simp
- have "(Y,U) \<in> set \<Gamma>" by fact
- with IH_inner show "\<Gamma> \<turnstile> Tvar Y <: T" by (simp add: subtype_of.S_Var)
+ have "(TVarB Y U) \<in> set \<Gamma>" by fact
+ with IH_inner show "\<Gamma> \<turnstile> Tvar Y <: T" by (simp add: subtype_of.SA_trans_TVar)
next
- case (S_Refl \<Gamma> X)
+ case (SA_refl_TVar \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma>\<turnstile>(Tvar X) <: (Tvar X)"} with
@{term "Q=Tvar X"}. The goal then follows immediately from the right-hand
derivation.\end{minipage}*}
thus "\<Gamma> \<turnstile> Tvar X <: T" by simp
next
- case (S_Arrow \<Gamma> Q\<^isub>1 S\<^isub>1 S\<^isub>2 Q\<^isub>2)
+ case (SA_arrow \<Gamma> Q\<^isub>1 S\<^isub>1 S\<^isub>2 Q\<^isub>2)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: Q\<^isub>1 \<rightarrow> Q\<^isub>2"} with
@{term "S\<^isub>1\<rightarrow>S\<^isub>2=S"} and @{term "Q\<^isub>1\<rightarrow>Q\<^isub>2=Q"}. We know that the @{text "size_ty"} of
@@ -629,7 +965,7 @@
have lh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> Q\<^isub>1 <: S\<^isub>1" by fact
have lh_drv_prm\<^isub>2: "\<Gamma> \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
from rh_drv have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=T\<^isub>1\<rightarrow>T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> \<Gamma>\<turnstile>Q\<^isub>2<:T\<^isub>2)"
- by (simp add: S_ArrowE_left)
+ by (auto elim: S_ArrowE_left)
moreover
have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in \<Gamma>"
using lh_drv_prm\<^isub>1 lh_drv_prm\<^isub>2 by (simp_all add: subtype_implies_closed)
@@ -647,176 +983,1020 @@
moreover
from IH_trans[of "Q\<^isub>2"]
have "\<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 lh_drv_prm\<^isub>2 by simp
- ultimately have "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by (simp add: subtype_of.S_Arrow)
+ ultimately have "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by (simp add: subtype_of.SA_arrow)
hence "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T" using T_inst by simp
}
ultimately show "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T" by blast
next
- case (S_Forall \<Gamma> Q\<^isub>1 S\<^isub>1 X S\<^isub>2 Q\<^isub>2)
+ case (SA_all \<Gamma> Q\<^isub>1 S\<^isub>1 X S\<^isub>2 Q\<^isub>2)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{text "\<Gamma>\<turnstile>\<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:Q\<^isub>1].Q\<^isub>2"} with
- @{text "\<forall>[X<:S\<^isub>1].S\<^isub>2=S"} and @{text "\<forall>[X<:Q\<^isub>1].Q\<^isub>2=Q"}. We therefore have the sub-derivations
- @{term "\<Gamma>\<turnstile>Q\<^isub>1<:S\<^isub>1"} and @{term "((X,Q\<^isub>1)#\<Gamma>)\<turnstile>S\<^isub>2<:Q\<^isub>2"}. Since @{term "X"} is a binder, we
+ In this case the left-hand derivation is @{term "\<Gamma>\<turnstile>(\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:Q\<^isub>1. Q\<^isub>2)"} with
+ @{term "(\<forall>X<:S\<^isub>1. S\<^isub>2)=S"} and @{term "(\<forall>X<:Q\<^isub>1. Q\<^isub>2)=Q"}. We therefore have the sub-derivations
+ @{term "\<Gamma>\<turnstile>Q\<^isub>1<:S\<^isub>1"} and @{term "((TVarB X Q\<^isub>1)#\<Gamma>)\<turnstile>S\<^isub>2<:Q\<^isub>2"}. Since @{term "X"} is a binder, we
assume that it is sufficiently fresh; in particular we have the freshness conditions
@{term "X\<sharp>\<Gamma>"} and @{term "X\<sharp>Q\<^isub>1"} (these assumptions are provided by the strong
induction-rule @{text "subtype_of_induct"}). We know that the @{text "size_ty"} of
@{term Q\<^isub>1} and @{term Q\<^isub>2} is smaller than that of @{term Q};
so we can apply the outer induction hypotheses for @{term Q\<^isub>1} and @{term Q\<^isub>2}.
- The right-hand derivation is @{text "\<Gamma> \<turnstile> \<forall>[X<:Q\<^isub>1].Q\<^isub>2 <: T"}. Since @{term "X\<sharp>\<Gamma>"}
+ The right-hand derivation is @{term "\<Gamma> \<turnstile> (\<forall>X<:Q\<^isub>1. Q\<^isub>2) <: T"}. Since @{term "X\<sharp>\<Gamma>"}
and @{term "X\<sharp>Q\<^isub>1"} there exists types @{text "T\<^isub>1,T\<^isub>2"} such that
- @{text "T=Top \<or> T=\<forall>[X<:T\<^isub>1].T\<^isub>2"}. The @{term "Top"}-case is straightforward once we know
- @{text "(\<forall>[X<:S\<^isub>1].S\<^isub>2) closed_in \<Gamma>"} and @{term "\<turnstile> \<Gamma> ok"}. In the other case we have
- the sub-derivations @{term "\<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1"} and @{term "((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"}. Using the outer
+ @{term "T=Top \<or> T=(\<forall>X<:T\<^isub>1. T\<^isub>2)"}. The @{term "Top"}-case is straightforward once we know
+ @{term "(\<forall>X<:S\<^isub>1. S\<^isub>2) closed_in \<Gamma>"} and @{term "\<turnstile> \<Gamma> ok"}. In the other case we have
+ the sub-derivations @{term "\<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1"} and @{term "((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"}. Using the outer
induction hypothesis for transitivity we can derive @{term "\<Gamma>\<turnstile>T\<^isub>1<:S\<^isub>1"}. From the outer
- induction for narrowing we get @{term "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2"} and then using again
- induction for transitivity we obtain @{term "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. By rule
+ induction for narrowing we get @{term "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2"} and then using again
+ induction for transitivity we obtain @{term "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. By rule
@{text "S_Forall"} and the freshness condition @{term "X\<sharp>\<Gamma>"} follows
- @{text "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"}, which is @{text "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T\<^isub>"}.
+ @{term "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"}, which is @{term "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T\<^isub>"}.
\end{minipage}*}
- hence rh_drv: "\<Gamma> \<turnstile> \<forall>[X<:Q\<^isub>1].Q\<^isub>2 <: T" by simp
+ hence rh_drv: "\<Gamma> \<turnstile> (\<forall>X<:Q\<^isub>1. Q\<^isub>2) <: T" by simp
have lh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> Q\<^isub>1 <: S\<^isub>1" by fact
- have lh_drv_prm\<^isub>2: "((X,Q\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
- have "X\<sharp>\<Gamma>" by fact
+ have lh_drv_prm\<^isub>2: "((TVarB X Q\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
+ then have "X\<sharp>\<Gamma>" by (force dest: subtype_implies_ok simp add: valid_ty_domain_fresh)
then have fresh_cond: "X\<sharp>\<Gamma>" "X\<sharp>Q\<^isub>1" using lh_drv_prm\<^isub>1 by (simp_all add: subtype_implies_fresh)
- from `\<forall>[X<:Q\<^isub>1].Q\<^isub>2 = Q`
+ from `(\<forall>X<:Q\<^isub>1. Q\<^isub>2) = Q`
have Q\<^isub>1\<^isub>2_less: "size_ty Q\<^isub>1 < size_ty Q" "size_ty Q\<^isub>2 < size_ty Q " using fresh_cond by auto
from rh_drv
- have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=\<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2)"
+ have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=(\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2)"
using fresh_cond by (simp add: S_ForallE_left)
moreover
- have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in ((X,Q\<^isub>1)#\<Gamma>)"
+ have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in ((TVarB X Q\<^isub>1)#\<Gamma>)"
using lh_drv_prm\<^isub>1 lh_drv_prm\<^isub>2 by (simp_all add: subtype_implies_closed)
- hence "(\<forall>[X<:S\<^isub>1].S\<^isub>2) closed_in \<Gamma>" by (force simp add: closed_in_def ty.supp abs_supp)
+ hence "(\<forall>X<:S\<^isub>1. S\<^isub>2) closed_in \<Gamma>" by (force simp add: closed_in_def ty.supp abs_supp)
moreover
have "\<turnstile> \<Gamma> ok" using rh_drv by (rule subtype_implies_ok)
moreover
- { assume "\<exists>T\<^isub>1 T\<^isub>2. T=\<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"
+ { assume "\<exists>T\<^isub>1 T\<^isub>2. T=(\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"
then obtain T\<^isub>1 T\<^isub>2
- where T_inst: "T = \<forall>[X<:T\<^isub>1].T\<^isub>2"
+ where T_inst: "T = (\<forall>X<:T\<^isub>1. T\<^isub>2)"
and rh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> T\<^isub>1 <: Q\<^isub>1"
- and rh_drv_prm\<^isub>2:"((X,T\<^isub>1)#\<Gamma>) \<turnstile> Q\<^isub>2 <: T\<^isub>2" by force
+ and rh_drv_prm\<^isub>2:"((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> Q\<^isub>2 <: T\<^isub>2" by force
from IH_trans[of "Q\<^isub>1"]
have "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" using lh_drv_prm\<^isub>1 rh_drv_prm\<^isub>1 Q\<^isub>1\<^isub>2_less by blast
moreover
from IH_narrow[of "Q\<^isub>1" "[]"]
- have "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" using Q\<^isub>1\<^isub>2_less lh_drv_prm\<^isub>2 rh_drv_prm\<^isub>1 by simp
+ have "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" using Q\<^isub>1\<^isub>2_less lh_drv_prm\<^isub>2 rh_drv_prm\<^isub>1 by simp
with IH_trans[of "Q\<^isub>2"]
- have "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 by simp
- ultimately have "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"
- using fresh_cond by (simp add: subtype_of.S_Forall)
- hence "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T" using T_inst by simp
+ have "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 by simp
+ ultimately have "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+ using fresh_cond by (simp add: subtype_of.SA_all)
+ hence "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T" using T_inst by simp
}
- ultimately show "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T" by blast
+ ultimately show "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T" by blast
qed
qed
{ --{* The transitivity proof is now by the auxiliary lemma. *}
case 1
- have "\<Gamma> \<turnstile> S <: Q"
- and "\<Gamma> \<turnstile> Q <: T" by fact+
- thus "\<Gamma> \<turnstile> S <: T" by (rule transitivity_aux)
+ from `\<Gamma> \<turnstile> S <: Q` and `\<Gamma> \<turnstile> Q <: T`
+ show "\<Gamma> \<turnstile> S <: T" by (rule transitivity_aux)
next
- --{* The narrowing proof proceeds by an induction over @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> M <: N"}. *}
+ --{* The narrowing proof proceeds by an induction over @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N"}. *}
case 2
- have "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> M <: N" --{* left-hand derivation *}
- and "\<Gamma> \<turnstile> P<:Q" by fact+ --{* right-hand derivation *}
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> M <: N"
- proof (nominal_induct \<Gamma>\<equiv>"\<Delta>@[(X,Q)]@\<Gamma>" M N avoiding: \<Delta> \<Gamma> X rule: subtype_of.strong_induct)
- case (S_Top _ S \<Delta> \<Gamma> X)
+ from `(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N` --{* left-hand derivation *}
+ and `\<Gamma> \<turnstile> P<:Q` --{* right-hand derivation *}
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> M <: N"
+ proof (nominal_induct \<Gamma>\<equiv>"\<Delta>@[(TVarB X Q)]@\<Gamma>" M N avoiding: \<Delta> \<Gamma> X rule: subtype_of.strong_induct)
+ case (SA_Top _ S \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> S <: Top"}. We show
- that the context @{term "\<Delta>@[(X,P)]@\<Gamma>"} is ok and that @{term S} is closed in
- @{term "\<Delta>@[(X,P)]@\<Gamma>"}. Then we can apply the @{text "S_Top"}-rule.\end{minipage}*}
- hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok"
- and lh_drv_prm\<^isub>2: "S closed_in (\<Delta>@[(X,Q)]@\<Gamma>)" by simp_all
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> S <: Top"}. We show
+ that the context @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} is ok and that @{term S} is closed in
+ @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. Then we can apply the @{text "S_Top"}-rule.\end{minipage}*}
+ hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok"
+ and lh_drv_prm\<^isub>2: "S closed_in (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp_all
have rh_drv: "\<Gamma> \<turnstile> P <: Q" by fact
hence "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
- with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: replace_type)
+ with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: replace_type)
moreover
- from lh_drv_prm\<^isub>2 have "S closed_in (\<Delta>@[(X,P)]@\<Gamma>)"
- by (simp add: closed_in_def domain_append)
- ultimately show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: Top" by (simp add: subtype_of.S_Top)
+ from lh_drv_prm\<^isub>2 have "S closed_in (\<Delta>@[(TVarB X P)]@\<Gamma>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: Top" by (simp add: subtype_of.SA_Top)
next
- case (S_Var Y S _ N \<Delta> \<Gamma> X)
+ case (SA_trans_TVar Y S _ N \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Tvar Y <: N"} and
- by inner induction hypothesis we have @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: N"}. We therefore
- know that the contexts @{term "\<Delta>@[(X,Q)]@\<Gamma>"} and @{term "\<Delta>@[(X,P)]@\<Gamma>"} are ok, and that
- @{term "(Y,S)"} is in @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. We need to show that
- @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N"} holds. In case @{term "X\<noteq>Y"} we know that
- @{term "(Y,S)"} is in @{term "\<Delta>@[(X,P)]@\<Gamma>"} and can use the inner induction hypothesis
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Tvar Y <: N"} and
+ by inner induction hypothesis we have @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: N"}. We therefore
+ know that the contexts @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"} and @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} are ok, and that
+ @{term "(Y,S)"} is in @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. We need to show that
+ @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N"} holds. In case @{term "X\<noteq>Y"} we know that
+ @{term "(Y,S)"} is in @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} and can use the inner induction hypothesis
and rule @{text "S_Var"} to conclude. In case @{term "X=Y"} we can infer that
- @{term "S=Q"}; moreover we have that @{term "(\<Delta>@[(X,P)]@\<Gamma>) extends \<Gamma>"} and therefore
- by @{text "weakening"} that @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: Q"} holds. By transitivity we
- obtain then @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: N"} and can conclude by applying rule
+ @{term "S=Q"}; moreover we have that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) extends \<Gamma>"} and therefore
+ by @{text "weakening"} that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: Q"} holds. By transitivity we
+ obtain then @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: N"} and can conclude by applying rule
@{text "S_Var"}.\end{minipage}*}
- hence IH_inner: "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: N"
- and lh_drv_prm: "(Y,S) \<in> set (\<Delta>@[(X,Q)]@\<Gamma>)"
+ hence IH_inner: "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: N"
+ and lh_drv_prm: "(TVarB Y S) \<in> set (\<Delta>@[(TVarB X Q)]@\<Gamma>)"
and rh_drv: "\<Gamma> \<turnstile> P<:Q"
- and ok\<^isub>Q: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok" by (simp_all add: subtype_implies_ok)
- hence ok\<^isub>P: "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: subtype_implies_ok)
- show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N"
+ and ok\<^isub>Q: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok" by (simp_all add: subtype_implies_ok)
+ hence ok\<^isub>P: "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: subtype_implies_ok)
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N"
proof (cases "X=Y")
case False
have "X\<noteq>Y" by fact
- hence "(Y,S)\<in>set (\<Delta>@[(X,P)]@\<Gamma>)" using lh_drv_prm by simp
- with IH_inner show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N" by (simp add: subtype_of.S_Var)
+ hence "(TVarB Y S)\<in>set (\<Delta>@[(TVarB X P)]@\<Gamma>)" using lh_drv_prm by (simp add:binding.inject)
+ with IH_inner show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N" by (simp add: subtype_of.SA_trans_TVar)
next
case True
- have memb\<^isub>X\<^isub>Q: "(X,Q)\<in>set (\<Delta>@[(X,Q)]@\<Gamma>)" by simp
- have memb\<^isub>X\<^isub>P: "(X,P)\<in>set (\<Delta>@[(X,P)]@\<Gamma>)" by simp
+ have memb\<^isub>X\<^isub>Q: "(TVarB X Q)\<in>set (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp
+ have memb\<^isub>X\<^isub>P: "(TVarB X P)\<in>set (\<Delta>@[(TVarB X P)]@\<Gamma>)" by simp
have eq: "X=Y" by fact
hence "S=Q" using ok\<^isub>Q lh_drv_prm memb\<^isub>X\<^isub>Q by (simp only: uniqueness_of_ctxt)
- hence "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Q <: N" using IH_inner by simp
+ hence "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Q <: N" using IH_inner by simp
moreover
- have "(\<Delta>@[(X,P)]@\<Gamma>) extends \<Gamma>" by (simp add: extends_def)
- hence "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: Q" using rh_drv ok\<^isub>P by (simp only: weakening)
- ultimately have "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: N" by (simp add: transitivity_aux)
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N" using memb\<^isub>X\<^isub>P eq by (simp only: subtype_of.S_Var)
+ have "(\<Delta>@[(TVarB X P)]@\<Gamma>) extends \<Gamma>" by (simp add: extends_def)
+ hence "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: Q" using rh_drv ok\<^isub>P by (simp only: weakening)
+ ultimately have "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: N" by (simp add: transitivity_aux)
+ thus "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N" using memb\<^isub>X\<^isub>P eq by (simp only: subtype_of.SA_trans_TVar)
qed
next
- case (S_Refl _ Y \<Delta> \<Gamma> X)
+ case (SA_refl_TVar _ Y \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y"} and we
- therefore know that @{term "\<Delta>@[(X,Q)]@\<Gamma>"} is ok and that @{term "Y"} is in
- the domain of @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. We therefore know that @{term "\<Delta>@[(X,P)]@\<Gamma>"} is ok
- and that @{term Y} is in the domain of @{term "\<Delta>@[(X,P)]@\<Gamma>"}. We can conclude by applying
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y"} and we
+ therefore know that @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"} is ok and that @{term "Y"} is in
+ the domain of @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. We therefore know that @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} is ok
+ and that @{term Y} is in the domain of @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. We can conclude by applying
rule @{text "S_Refl"}.\end{minipage}*}
- hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok"
- and lh_drv_prm\<^isub>2: "Y \<in> domain (\<Delta>@[(X,Q)]@\<Gamma>)" by simp_all
+ hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok"
+ and lh_drv_prm\<^isub>2: "Y \<in> ty_domain (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp_all
have "\<Gamma> \<turnstile> P <: Q" by fact
hence "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
- with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: replace_type)
+ with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: replace_type)
moreover
- from lh_drv_prm\<^isub>2 have "Y \<in> domain (\<Delta>@[(X,P)]@\<Gamma>)" by (simp add: domain_append)
- ultimately show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y" by (simp add: subtype_of.S_Refl)
+ from lh_drv_prm\<^isub>2 have "Y \<in> ty_domain (\<Delta>@[(TVarB X P)]@\<Gamma>)" by (simp add: domains_append)
+ ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y" by (simp add: subtype_of.SA_refl_TVar)
next
- case (S_Arrow _ S\<^isub>1 Q\<^isub>1 Q\<^isub>2 S\<^isub>2 \<Delta> \<Gamma> X)
+ case (SA_arrow _ S\<^isub>1 Q\<^isub>1 Q\<^isub>2 S\<^isub>2 \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2"}
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2"}
and the proof is trivial.\end{minipage}*}
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2" by blast
+ thus "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2" by blast
next
- case (S_Forall _ T\<^isub>1 S\<^isub>1 Y S\<^isub>2 T\<^isub>2 \<Delta> \<Gamma> X)
+ case (SA_all \<Gamma>' T\<^isub>1 S\<^isub>1 Y S\<^isub>2 T\<^isub>2 \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{text "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> \<forall>[Y<:S\<^isub>1].S\<^isub>2 <: \<forall>[Y<:T\<^isub>1].T\<^isub>2"}
- and therfore we know that the binder @{term Y} is fresh for @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. By
- the inner induction hypothesis we have that @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"} and
- @{term "((Y,T\<^isub>1)#\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. Since @{term P} is a subtype of @{term Q}
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> (\<forall>Y<:S\<^isub>1. S\<^isub>2) <: (\<forall>Y<:T\<^isub>1. T\<^isub>2)"}
+ and therfore we know that the binder @{term Y} is fresh for @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. By
+ the inner induction hypothesis we have that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"} and
+ @{term "((TVarB Y T\<^isub>1)#\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. Since @{term P} is a subtype of @{term Q}
we can infer that @{term Y} is fresh for @{term P} and thus also fresh for
- @{term "\<Delta>@[(X,P)]@\<Gamma>"}. We can then conclude by applying rule @{text "S_Forall"}.
+ @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. We can then conclude by applying rule @{text "S_Forall"}.
\end{minipage}*}
- hence IH_inner\<^isub>1: "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"
- and IH_inner\<^isub>2: "((Y,T\<^isub>1)#\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"
- and lh_drv_prm: "Y\<sharp>(\<Delta>@[(X,Q)]@\<Gamma>)" by force+
- have rh_drv: "\<Gamma> \<turnstile> P <: Q" by fact
- hence "Y\<sharp>P" using lh_drv_prm by (simp only: fresh_list_append subtype_implies_fresh)
- hence "Y\<sharp>(\<Delta>@[(X,P)]@\<Gamma>)" using lh_drv_prm
- by (simp add: fresh_list_append fresh_list_cons fresh_prod)
+ hence rh_drv: "\<Gamma> \<turnstile> P <: Q"
+ and IH_inner\<^isub>1: "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"
+ and "TVarB Y T\<^isub>1 # \<Gamma>' = ((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X Q] @ \<Gamma>" by auto
+ moreover have " \<lbrakk>\<Gamma>\<turnstile>P<:Q; TVarB Y T\<^isub>1 # \<Gamma>' = ((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X Q] @ \<Gamma>\<rbrakk> \<Longrightarrow> (((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X P] @ \<Gamma>)\<turnstile>S\<^isub>2<:T\<^isub>2" by fact
+ ultimately have IH_inner\<^isub>2: "(((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X P] @ \<Gamma>)\<turnstile>S\<^isub>2<:T\<^isub>2" by auto
with IH_inner\<^isub>1 IH_inner\<^isub>2
- show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> \<forall>[Y<:S\<^isub>1].S\<^isub>2 <: \<forall>[Y<:T\<^isub>1].T\<^isub>2" by (simp add: subtype_of.S_Forall)
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> (\<forall>Y<:S\<^isub>1. S\<^isub>2) <: (\<forall>Y<:T\<^isub>1. T\<^isub>2)" by (simp add: subtype_of.SA_all)
qed
}
qed
-end
\ No newline at end of file
+section {* Typing *}
+
+inductive
+ typing :: "env \<Rightarrow> trm \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _" [60,60,60] 60)
+where
+ T_Var[intro]: "\<lbrakk> VarB x T \<in> set \<Gamma>; \<turnstile> \<Gamma> ok \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var x : T"
+| T_App[intro]: "\<lbrakk> \<Gamma> \<turnstile> t\<^isub>1 : T\<^isub>1 \<rightarrow> T\<^isub>2; \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^isub>1 \<cdot> t\<^isub>2 : T\<^isub>2"
+| T_Abs[intro]: "\<lbrakk> VarB x T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<lambda>x:T\<^isub>1. t\<^isub>2) : T\<^isub>1 \<rightarrow> T\<^isub>2"
+| T_Sub[intro]: "\<lbrakk> \<Gamma> \<turnstile> t : S; \<Gamma> \<turnstile> S <: T \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t : T"
+| T_TAbs[intro]:"\<lbrakk> TVarB X T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<lambda>X<:T\<^isub>1. t\<^isub>2) : (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+| T_TApp[intro]:"\<lbrakk> X \<sharp> (\<Gamma>, t\<^isub>1, T\<^isub>2); \<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2); \<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 : (T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>)"
+
+equivariance typing
+
+lemma better_T_TApp:
+ assumes H1: "\<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T11. T12)"
+ and H2: "\<Gamma> \<turnstile> T2 <: T11"
+ shows "\<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T2 : (T12[X \<mapsto> T2]\<^sub>\<tau>)"
+proof -
+ obtain Y::tyvrs where Y: "Y \<sharp> (X, T12, \<Gamma>, t\<^isub>1, T2)"
+ by (rule exists_fresh) (rule fin_supp)
+ then have "Y \<sharp> (\<Gamma>, t\<^isub>1, T2)" by simp
+ moreover from Y have "(\<forall>X<:T11. T12) = (\<forall>Y<:T11. [(Y, X)] \<bullet> T12)"
+ by (auto simp add: ty.inject alpha' fresh_prod fresh_atm)
+ with H1 have "\<Gamma> \<turnstile> t\<^isub>1 : (\<forall>Y<:T11. [(Y, X)] \<bullet> T12)" by simp
+ ultimately have "\<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T2 : (([(Y, X)] \<bullet> T12)[Y \<mapsto> T2]\<^sub>\<tau>)" using H2
+ by (rule T_TApp)
+ with Y show ?thesis by (simp add: type_subst_rename)
+qed
+
+lemma typing_ok:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "\<turnstile> \<Gamma> ok"
+using assms by (induct, auto)
+
+nominal_inductive typing
+ by (auto dest!: typing_ok intro: closed_in_fresh fresh_domain
+ simp: abs_fresh fresh_prod fresh_atm freshs valid_ty_domain_fresh fresh_trm_domain)
+
+lemma ok_imp_VarB_closed_in:
+ assumes ok: "\<turnstile> \<Gamma> ok"
+ shows "VarB x T \<in> set \<Gamma> \<Longrightarrow> T closed_in \<Gamma>" using ok
+ by induct (auto simp add: binding.inject closed_in_def)
+
+lemma tyvrs_of_subst: "tyvrs_of (B[X \<mapsto> T]\<^sub>b) = tyvrs_of B"
+ by (nominal_induct B rule: binding.strong_induct) simp_all
+
+lemma ty_domain_subst: "ty_domain (\<Gamma>[X \<mapsto> T]\<^sub>e) = ty_domain \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: tyvrs_of_subst)
+
+lemma vrs_of_subst: "vrs_of (B[X \<mapsto> T]\<^sub>b) = vrs_of B"
+ by (nominal_induct B rule: binding.strong_induct) simp_all
+
+lemma trm_domain_subst: "trm_domain (\<Gamma>[X \<mapsto> T]\<^sub>e) = trm_domain \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: vrs_of_subst)
+
+lemma subst_closed_in:
+ "T closed_in (\<Delta> @ TVarB X S # \<Gamma>) \<Longrightarrow> U closed_in \<Gamma> \<Longrightarrow> T[X \<mapsto> U]\<^sub>\<tau> closed_in (\<Delta>[X \<mapsto> U]\<^sub>e @ \<Gamma>)"
+ apply (nominal_induct T avoiding: X U \<Gamma> rule: ty.strong_induct)
+ apply (simp add: closed_in_def ty.supp supp_atm domains_append ty_domain_subst)
+ apply blast
+ apply (simp add: closed_in_def ty.supp)
+ apply (simp add: closed_in_def ty.supp)
+ apply (simp add: closed_in_def ty.supp abs_supp)
+ apply (drule_tac x = X in meta_spec)
+ apply (drule_tac x = U in meta_spec)
+ apply (drule_tac x = "(TVarB tyvrs ty2) # \<Gamma>" in meta_spec)
+ apply (simp add: domains_append ty_domain_subst)
+ apply blast
+ done
+
+lemmas subst_closed_in' = subst_closed_in [where \<Delta>="[]", simplified]
+
+lemma typing_closed_in:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "T closed_in \<Gamma>"
+using assms
+proof induct
+ case (T_Var x T \<Gamma>)
+ from `\<turnstile> \<Gamma> ok` and `VarB x T \<in> set \<Gamma>`
+ show ?case by (rule ok_imp_VarB_closed_in)
+next
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1 T\<^isub>2 t\<^isub>2)
+ then show ?case by (auto simp add: ty.supp closed_in_def)
+next
+ case (T_Abs x T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `VarB x T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2`
+ have "T\<^isub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
+ with T_Abs show ?case by (auto simp add: ty.supp closed_in_def)
+next
+ case (T_Sub \<Gamma> t S T)
+ from `\<Gamma> \<turnstile> S <: T` show ?case by (simp add: subtype_implies_closed)
+next
+ case (T_TAbs X T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `TVarB X T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2`
+ have "T\<^isub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
+ with T_TAbs show ?case by (auto simp add: ty.supp closed_in_def abs_supp)
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T2 T11 T12)
+ then have "T12 closed_in (TVarB X T11 # \<Gamma>)"
+ by (auto simp add: closed_in_def ty.supp abs_supp)
+ moreover from T_TApp have "T2 closed_in \<Gamma>"
+ by (simp add: subtype_implies_closed)
+ ultimately show ?case by (rule subst_closed_in')
+qed
+
+
+subsection {* Evaluation *}
+
+inductive
+ val :: "trm \<Rightarrow> bool"
+where
+ Abs[intro]: "val (\<lambda>x:T. t)"
+| TAbs[intro]: "val (\<lambda>X<:T. t)"
+
+equivariance val
+
+inductive_cases val_inv_auto[elim]:
+ "val (Var x)"
+ "val (t1 \<cdot> t2)"
+ "val (t1 \<cdot>\<^sub>\<tau> t2)"
+
+inductive
+ eval :: "trm \<Rightarrow> trm \<Rightarrow> bool" ("_ \<longmapsto> _" [60,60] 60)
+where
+ E_Abs : "\<lbrakk> x \<sharp> v\<^isub>2; val v\<^isub>2 \<rbrakk> \<Longrightarrow> (\<lambda>x:T\<^isub>1\<^isub>1. t\<^isub>1\<^isub>2) \<cdot> v\<^isub>2 \<longmapsto> t\<^isub>1\<^isub>2[x \<mapsto> v\<^isub>2]"
+| E_App1 [intro]: "t \<longmapsto> t' \<Longrightarrow> t \<cdot> u \<longmapsto> t' \<cdot> u"
+| E_App2 [intro]: "\<lbrakk> val v; t \<longmapsto> t' \<rbrakk> \<Longrightarrow> v \<cdot> t \<longmapsto> v \<cdot> t'"
+| E_TAbs : "X \<sharp> (T\<^isub>1\<^isub>1, T\<^isub>2) \<Longrightarrow> (\<lambda>X<:T\<^isub>1\<^isub>1. t\<^isub>1\<^isub>2) \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t\<^isub>1\<^isub>2[X \<mapsto>\<^sub>\<tau> T\<^isub>2]"
+| E_TApp [intro]: "t \<longmapsto> t' \<Longrightarrow> t \<cdot>\<^sub>\<tau> T \<longmapsto> t' \<cdot>\<^sub>\<tau> T"
+
+lemma better_E_Abs[intro]:
+ assumes H: "val v2"
+ shows "(\<lambda>x:T11. t12) \<cdot> v2 \<longmapsto> t12[x \<mapsto> v2]"
+proof -
+ obtain y::vrs where y: "y \<sharp> (x, t12, v2)" by (rule exists_fresh) (rule fin_supp)
+ then have "y \<sharp> v2" by simp
+ then have "(\<lambda>y:T11. [(y, x)] \<bullet> t12) \<cdot> v2 \<longmapsto> ([(y, x)] \<bullet> t12)[y \<mapsto> v2]" using H
+ by (rule E_Abs)
+ moreover from y have "(\<lambda>x:T11. t12) \<cdot> v2 = (\<lambda>y:T11. [(y, x)] \<bullet> t12) \<cdot> v2"
+ by (auto simp add: trm.inject alpha' fresh_prod fresh_atm)
+ ultimately have "(\<lambda>x:T11. t12) \<cdot> v2 \<longmapsto> ([(y, x)] \<bullet> t12)[y \<mapsto> v2]"
+ by simp
+ with y show ?thesis by (simp add: subst_trm_rename)
+qed
+
+lemma better_E_TAbs[intro]: "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> t12[X \<mapsto>\<^sub>\<tau> T2]"
+proof -
+ obtain Y::tyvrs where Y: "Y \<sharp> (X, t12, T11, T2)" by (rule exists_fresh) (rule fin_supp)
+ then have "Y \<sharp> (T11, T2)" by simp
+ then have "(\<lambda>Y<:T11. [(Y, X)] \<bullet> t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> ([(Y, X)] \<bullet> t12)[Y \<mapsto>\<^sub>\<tau> T2]"
+ by (rule E_TAbs)
+ moreover from Y have "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 = (\<lambda>Y<:T11. [(Y, X)] \<bullet> t12) \<cdot>\<^sub>\<tau> T2"
+ by (auto simp add: trm.inject alpha' fresh_prod fresh_atm)
+ ultimately have "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> ([(Y, X)] \<bullet> t12)[Y \<mapsto>\<^sub>\<tau> T2]"
+ by simp
+ with Y show ?thesis by (simp add: subst_trm_ty_rename)
+qed
+
+equivariance eval
+
+nominal_inductive eval
+ by (simp_all add: abs_fresh ty_vrs_fresh subst_trm_fresh_tyvar
+ subst_trm_fresh_var subst_trm_ty_fresh')
+
+inductive_cases eval_inv_auto[elim]:
+ "Var x \<longmapsto> t'"
+ "(\<lambda>x:T. t) \<longmapsto> t'"
+ "(\<lambda>X<:T. t) \<longmapsto> t'"
+
+lemma ty_domain_cons:
+ shows "ty_domain (\<Gamma>@[VarB X Q]@\<Delta>) = ty_domain (\<Gamma>@\<Delta>)"
+by (induct \<Gamma>, auto)
+
+lemma closed_in_cons:
+ assumes "S closed_in (\<Gamma> @ VarB X Q # \<Delta>)"
+ shows "S closed_in (\<Gamma>@\<Delta>)"
+using assms ty_domain_cons closed_in_def by auto
+
+lemma closed_in_weaken: "T closed_in (\<Delta> @ \<Gamma>) \<Longrightarrow> T closed_in (\<Delta> @ B # \<Gamma>)"
+ by (auto simp add: closed_in_def domains_append)
+
+lemma closed_in_weaken': "T closed_in \<Gamma> \<Longrightarrow> T closed_in (\<Delta> @ \<Gamma>)"
+ by (auto simp add: closed_in_def domains_append)
+
+lemma valid_subst:
+ assumes ok: "\<turnstile> (\<Delta> @ TVarB X Q # \<Gamma>) ok"
+ and closed: "P closed_in \<Gamma>"
+ shows "\<turnstile> (\<Delta>[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" using ok closed
+ apply (induct \<Delta>)
+ apply simp_all
+ apply (erule validE)
+ apply assumption
+ apply (erule validE)
+ apply simp
+ apply (rule valid_consT)
+ apply assumption
+ apply (simp add: domains_append ty_domain_subst)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)
+ apply (rule_tac S=Q in subst_closed_in')
+ apply (simp add: closed_in_def domains_append ty_domain_subst)
+ apply (simp add: closed_in_def domains_append)
+ apply blast
+ apply simp
+ apply (rule valid_cons)
+ apply assumption
+ apply (simp add: domains_append trm_domain_subst)
+ apply (rule_tac S=Q in subst_closed_in')
+ apply (simp add: closed_in_def domains_append ty_domain_subst)
+ apply (simp add: closed_in_def domains_append)
+ apply blast
+ done
+
+lemma ty_domain_vrs:
+ shows "ty_domain (G @ [VarB x Q] @ D) = ty_domain (G @ D)"
+by (induct G, auto)
+
+lemma valid_cons':
+ assumes "\<turnstile> (\<Gamma> @ VarB x Q # \<Delta>) ok"
+ shows "\<turnstile> (\<Gamma> @ \<Delta>) ok"
+ using assms
+proof (induct \<Gamma>' \<equiv> "\<Gamma> @ VarB x Q # \<Delta>" arbitrary: \<Gamma> \<Delta>)
+ case valid_nil
+ have "[] = \<Gamma> @ VarB x Q # \<Delta>" by fact
+ then have "False" by auto
+ then show ?case by auto
+next
+ case (valid_consT G X T)
+ then show ?case
+ proof (cases \<Gamma>)
+ case Nil
+ with valid_consT show ?thesis by simp
+ next
+ case (Cons b bs)
+ with valid_consT
+ have "\<turnstile> (bs @ \<Delta>) ok" by simp
+ moreover from Cons and valid_consT have "X \<sharp> ty_domain (bs @ \<Delta>)"
+ by (simp add: domains_append)
+ moreover from Cons and valid_consT have "T closed_in (bs @ \<Delta>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately have "\<turnstile> (TVarB X T # bs @ \<Delta>) ok"
+ by (rule valid_rel.valid_consT)
+ with Cons and valid_consT show ?thesis by simp
+ qed
+next
+ case (valid_cons G x T)
+ then show ?case
+ proof (cases \<Gamma>)
+ case Nil
+ with valid_cons show ?thesis by simp
+ next
+ case (Cons b bs)
+ with valid_cons
+ have "\<turnstile> (bs @ \<Delta>) ok" by simp
+ moreover from Cons and valid_cons have "x \<sharp> trm_domain (bs @ \<Delta>)"
+ by (simp add: domains_append finite_domains
+ fresh_fin_insert [OF pt_vrs_inst at_vrs_inst fs_vrs_inst])
+ moreover from Cons and valid_cons have "T closed_in (bs @ \<Delta>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately have "\<turnstile> (VarB x T # bs @ \<Delta>) ok"
+ by (rule valid_rel.valid_cons)
+ with Cons and valid_cons show ?thesis by simp
+ qed
+qed
+
+text {* A.5(6) *}
+
+lemma type_weaken:
+ assumes "(\<Delta>@\<Gamma>) \<turnstile> t : T"
+ and "\<turnstile> (\<Delta> @ B # \<Gamma>) ok"
+ shows "(\<Delta> @ B # \<Gamma>) \<turnstile> t : T"
+using assms
+proof(nominal_induct \<Gamma>'\<equiv> "\<Delta> @ \<Gamma>" t T avoiding: \<Delta> \<Gamma> B rule: typing.strong_induct)
+ case (T_Var x' T \<Gamma>' \<Gamma>'' \<Delta>')
+ then show ?case by auto
+next
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1 T\<^isub>2 t\<^isub>2 \<Gamma> \<Delta>)
+ then show ?case by force
+next
+ case (T_Abs y T\<^isub>1 \<Gamma>' t\<^isub>2 T\<^isub>2 \<Delta> \<Gamma>)
+ then have "VarB y T\<^isub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then have closed: "T\<^isub>1 closed_in (\<Delta> @ \<Gamma>)"
+ by (auto dest: typing_ok)
+ have "\<turnstile> (VarB y T\<^isub>1 # \<Delta> @ B # \<Gamma>) ok"
+ apply (rule valid_cons)
+ apply (rule T_Abs)
+ apply (simp add: domains_append
+ fresh_fin_insert [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ fresh_fin_union [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_domains finite_vrs fresh_vrs_of T_Abs fresh_trm_domain)
+ apply (rule closed_in_weaken)
+ apply (rule closed)
+ done
+ then have "\<turnstile> ((VarB y T\<^isub>1 # \<Delta>) @ B # \<Gamma>) ok" by simp
+ then have "(VarB y T\<^isub>1 # \<Delta>) @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2"
+ by (rule T_Abs) (simp add: T_Abs)
+ then have "VarB y T\<^isub>1 # \<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then show ?case by (rule typing.T_Abs)
+next
+ case (T_Sub \<Gamma>' t S T \<Delta> \<Gamma>)
+ from `\<turnstile> (\<Delta> @ B # \<Gamma>) ok` and `\<Gamma>' = \<Delta> @ \<Gamma>`
+ have "\<Delta> @ B # \<Gamma> \<turnstile> t : S" by (rule T_Sub)
+ moreover from `\<Gamma>'\<turnstile>S<:T` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+ have "(\<Delta> @ B # \<Gamma>)\<turnstile>S<:T"
+ by (rule weakening) (simp add: extends_def T_Sub)
+ ultimately show ?case by (rule typing.T_Sub)
+next
+ case (T_TAbs X T\<^isub>1 \<Gamma>' t\<^isub>2 T\<^isub>2 \<Delta> \<Gamma>)
+ then have "TVarB X T\<^isub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then have closed: "T\<^isub>1 closed_in (\<Delta> @ \<Gamma>)"
+ by (auto dest: typing_ok)
+ have "\<turnstile> (TVarB X T\<^isub>1 # \<Delta> @ B # \<Gamma>) ok"
+ apply (rule valid_consT)
+ apply (rule T_TAbs)
+ apply (simp add: domains_append
+ fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ fresh_fin_union [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ finite_domains finite_vrs tyvrs_fresh T_TAbs fresh_domain)
+ apply (rule closed_in_weaken)
+ apply (rule closed)
+ done
+ then have "\<turnstile> ((TVarB X T\<^isub>1 # \<Delta>) @ B # \<Gamma>) ok" by simp
+ then have "(TVarB X T\<^isub>1 # \<Delta>) @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2"
+ by (rule T_TAbs) (simp add: T_TAbs)
+ then have "TVarB X T\<^isub>1 # \<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then show ?case by (rule typing.T_TAbs)
+next
+ case (T_TApp X \<Gamma>' t\<^isub>1 T2 T11 T12 \<Delta> \<Gamma>)
+ have "\<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T11. T12)"
+ by (rule T_TApp)+
+ moreover from `\<Gamma>'\<turnstile>T2<:T11` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+ have "(\<Delta> @ B # \<Gamma>)\<turnstile>T2<:T11"
+ by (rule weakening) (simp add: extends_def T_TApp)
+ ultimately show ?case by (rule better_T_TApp)
+qed
+
+lemma type_weaken':
+ "\<Gamma> \<turnstile> t : T \<Longrightarrow> \<turnstile> (\<Delta>@\<Gamma>) ok \<Longrightarrow> (\<Delta>@\<Gamma>) \<turnstile> t : T"
+ apply (induct \<Delta>)
+ apply simp_all
+ apply (erule validE)
+ apply (insert type_weaken [of "[]", simplified])
+ apply simp_all
+ done
+
+text {* A.6 *}
+
+lemma strengthening:
+ assumes "(\<Gamma> @ VarB x Q # \<Delta>) \<turnstile> S <: T"
+ shows "(\<Gamma>@\<Delta>) \<turnstile> S <: T"
+ using assms
+proof (induct \<Gamma>' \<equiv> "\<Gamma> @ VarB x Q # \<Delta>" S T arbitrary: \<Gamma>)
+ case (SA_Top G' S G)
+ then have "\<turnstile> (G @ \<Delta>) ok" by (auto dest: valid_cons')
+ moreover have "S closed_in (G @ \<Delta>)" using SA_Top by (auto dest: closed_in_cons)
+ ultimately show ?case using subtype_of.SA_Top by auto
+next
+ case (SA_refl_TVar G X' G')
+ then have "\<turnstile> (G' @ VarB x Q # \<Delta>) ok" by simp
+ then have h1:"\<turnstile> (G' @ \<Delta>) ok" by (auto dest: valid_cons')
+ have "X' \<in> ty_domain (G' @ VarB x Q # \<Delta>)" using SA_refl_TVar by auto
+ then have h2:"X' \<in> ty_domain (G' @ \<Delta>)" using ty_domain_vrs by auto
+ show ?case using h1 h2 by auto
+next
+ case (SA_all G T1 S1 X S2 T2 G')
+ have ih1:"TVarB X T1 # G = (TVarB X T1 # G') @ VarB x Q # \<Delta> \<Longrightarrow> ((TVarB X T1 # G') @ \<Delta>)\<turnstile>S2<:T2" by fact
+ then have h1:"(TVarB X T1 # (G' @ \<Delta>))\<turnstile>S2<:T2" using SA_all by auto
+ have ih2:"G = G' @ VarB x Q # \<Delta> \<Longrightarrow> (G' @ \<Delta>)\<turnstile>T1<:S1" by fact
+ then have h2:"(G' @ \<Delta>)\<turnstile>T1<:S1" using SA_all by auto
+ then show ?case using h1 h2 by auto
+qed (auto)
+
+lemma narrow_type: -- {* A.7 *}
+ assumes H: "\<Delta> @ (TVarB X Q) # \<Gamma> \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> \<Delta> @ (TVarB X P) # \<Gamma> \<turnstile> t : T"
+ using H
+ proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ (TVarB X Q) # \<Gamma>" t T avoiding: P arbitrary: \<Delta> rule: typing.strong_induct)
+ case (T_Var x T G P D)
+ then have "VarB x T \<in> set (D @ TVarB X P # \<Gamma>)"
+ and "\<turnstile> (D @ TVarB X P # \<Gamma>) ok"
+ by (auto intro: replace_type dest!: subtype_implies_closed)
+ then show ?case by auto
+ next
+ case (T_App G t1 T1 T2 t2 P D)
+ then show ?case by force
+ next
+ case (T_Abs x T1 G t2 T2 P D)
+ then show ?case by (fastsimp dest: typing_ok)
+ next
+ case (T_Sub G t S T D)
+ then show ?case using subtype_narrow by fastsimp
+ next
+ case (T_TAbs X' T1 G t2 T2 P D)
+ then show ?case by (fastsimp dest: typing_ok)
+ next
+ case (T_TApp X' G t1 T2 T11 T12 P D)
+ then have "D @ TVarB X P # \<Gamma> \<turnstile> t1 : Forall X' T12 T11" by fastsimp
+ moreover have "(D @ [TVarB X Q] @ \<Gamma>) \<turnstile> T2<:T11" using T_TApp by auto
+ then have "(D @ [TVarB X P] @ \<Gamma>) \<turnstile> T2<:T11" using `\<Gamma>\<turnstile>P<:Q`
+ by (rule subtype_narrow)
+ moreover from T_TApp have "X' \<sharp> (D @ TVarB X P # \<Gamma>, t1, T2)"
+ by (simp add: fresh_list_append fresh_list_cons fresh_prod)
+ ultimately show ?case by auto
+qed
+
+subsection {* Substitution lemmas *}
+
+subsubsection {* Substition Preserves Typing *}
+
+theorem subst_type: -- {* A.8 *}
+ assumes H: "(\<Delta> @ (VarB x U) # \<Gamma>) \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> u : U \<Longrightarrow> \<Delta> @ \<Gamma> \<turnstile> t[x \<mapsto> u] : T" using H
+ proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ (VarB x U) # \<Gamma>" t T avoiding: x u arbitrary: \<Delta> rule: typing.strong_induct)
+ case (T_Var y T G x u D)
+ show ?case
+ proof (cases "x = y")
+ assume eq:"x=y"
+ then have "T=U" using T_Var uniqueness_of_ctxt' by auto
+ then show ?case using eq T_Var
+ by (auto intro: type_weaken' dest: valid_cons')
+ next
+ assume "x\<noteq>y"
+ then show ?case using T_Var
+ by (auto simp add:binding.inject dest: valid_cons')
+ qed
+ next
+ case (T_App G t1 T1 T2 t2 x u D)
+ then show ?case by force
+ next
+ case (T_Abs y T1 G t2 T2 x u D)
+ then show ?case by force
+ next
+ case (T_Sub G t S T x u D)
+ then have "D @ \<Gamma> \<turnstile> t[x \<mapsto> u] : S" by auto
+ moreover have "(D @ \<Gamma>) \<turnstile> S<:T" using T_Sub by (auto dest: strengthening)
+ ultimately show ?case by auto
+ next
+ case (T_TAbs X T1 G t2 T2 x u D)
+ from `TVarB X T1 # G \<turnstile> t2 : T2` have "X \<sharp> T1"
+ by (auto simp add: valid_ty_domain_fresh dest: typing_ok intro!: closed_in_fresh)
+ with `X \<sharp> u` and T_TAbs show ?case by fastsimp
+ next
+ case (T_TApp X G t1 T2 T11 T12 x u D)
+ then have "(D@\<Gamma>) \<turnstile>T2<:T11" using T_TApp by (auto dest: strengthening)
+ then show "((D @ \<Gamma>) \<turnstile> ((t1 \<cdot>\<^sub>\<tau> T2)[x \<mapsto> u]) : (T12[X \<mapsto> T2]\<^sub>\<tau>))" using T_TApp
+ by (force simp add: fresh_prod fresh_list_append fresh_list_cons subst_trm_fresh_tyvar)
+qed
+
+subsubsection {* Type Substitution Preserves Subtyping *}
+
+lemma substT_subtype: -- {* A.10 *}
+ assumes H: "(\<Delta> @ ((TVarB X Q) # \<Gamma>)) \<turnstile> S <: T"
+ shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> (\<Delta>[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S[X \<mapsto> P]\<^sub>\<tau> <: T[X \<mapsto> P]\<^sub>\<tau>"
+ using H
+proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ TVarB X Q # \<Gamma>" S T avoiding: X P arbitrary: \<Delta> rule: subtype_of.strong_induct)
+ case (SA_Top G S X P D)
+ then have "\<turnstile> (D @ TVarB X Q # \<Gamma>) ok" by simp
+ moreover have closed: "P closed_in \<Gamma>" using SA_Top subtype_implies_closed by auto
+ ultimately have "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" by (rule valid_subst)
+ moreover from SA_Top have "S closed_in (D @ TVarB X Q # \<Gamma>)" by simp
+ then have "S[X \<mapsto> P]\<^sub>\<tau> closed_in (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" using closed by (rule subst_closed_in)
+ ultimately show ?case by auto
+next
+ case (SA_trans_TVar Y S G T X P D)
+ have h:"G\<turnstile>S<:T" by fact
+ then have ST: "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S[X \<mapsto> P]\<^sub>\<tau> <: T[X \<mapsto> P]\<^sub>\<tau>" using SA_trans_TVar by auto
+ from `G\<turnstile>S<:T` have G_ok: "\<turnstile> G ok" by (rule subtype_implies_ok)
+ from G_ok and SA_trans_TVar have X\<Gamma>_ok: "\<turnstile> (TVarB X Q # \<Gamma>) ok"
+ by (auto intro: validE_append)
+ show "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> Tvar Y[X \<mapsto> P]\<^sub>\<tau><:T[X \<mapsto> P]\<^sub>\<tau>"
+ proof (cases "X = Y")
+ assume eq: "X = Y"
+ from eq and SA_trans_TVar have "TVarB Y Q \<in> set G" by simp
+ with G_ok have QS: "Q = S" using `TVarB Y S \<in> set G` by (rule uniqueness_of_ctxt)
+ from X\<Gamma>_ok have "X \<sharp> ty_domain \<Gamma>" and "Q closed_in \<Gamma>" by auto
+ then have XQ: "X \<sharp> Q" by (rule closed_in_fresh)
+ note `\<Gamma>\<turnstile>P<:Q`
+ moreover from ST have "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" by (rule subtype_implies_ok)
+ moreover have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) extends \<Gamma>" by (simp add: extends_def)
+ ultimately have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:Q" by (rule weakening)
+ with QS have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:S" by simp
+ moreover from XQ and ST and QS have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (simp add: type_subst_identity)
+ ultimately have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (rule subtype_transitivity)
+ with eq show ?case by simp
+ next
+ assume neq: "X \<noteq> Y"
+ with SA_trans_TVar have "TVarB Y S \<in> set D \<or> TVarB Y S \<in> set \<Gamma>"
+ by (simp add: binding.inject)
+ then show ?case
+ proof
+ assume "TVarB Y S \<in> set D"
+ then have "TVarB Y (S[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_TVarB)
+ then have "TVarB Y (S[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" by simp
+ with neq and ST show ?thesis by auto
+ next
+ assume Y: "TVarB Y S \<in> set \<Gamma>"
+ from X\<Gamma>_ok have "X \<sharp> ty_domain \<Gamma>" and "\<turnstile> \<Gamma> ok" by auto
+ then have "X \<sharp> \<Gamma>" by (simp add: valid_ty_domain_fresh)
+ with Y have "X \<sharp> S"
+ by (induct \<Gamma>) (auto simp add: fresh_list_nil fresh_list_cons)
+ with ST have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>S<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (simp add: type_subst_identity)
+ moreover from Y have "TVarB Y S \<in> set (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" by simp
+ ultimately show ?thesis using neq by auto
+ qed
+ qed
+next
+ case (SA_refl_TVar G Y X P D)
+ then have "\<turnstile> (D @ TVarB X Q # \<Gamma>) ok" by simp
+ moreover from SA_refl_TVar have closed: "P closed_in \<Gamma>"
+ by (auto dest: subtype_implies_closed)
+ ultimately have ok: "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" using valid_subst by auto
+ from closed have closed': "P closed_in (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)"
+ by (simp add: closed_in_weaken')
+ show ?case
+ proof (cases "X = Y")
+ assume "X = Y"
+ with closed' and ok show ?thesis
+ by (auto intro: subtype_reflexivity)
+ next
+ assume neq: "X \<noteq> Y"
+ with SA_refl_TVar have "Y \<in> ty_domain (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)"
+ by (simp add: ty_domain_subst domains_append)
+ with neq and ok show ?thesis by auto
+ qed
+next
+ case (SA_arrow G T1 S1 S2 T2 X P D)
+ then have h1:"(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>T1[X \<mapsto> P]\<^sub>\<tau><:S1[X \<mapsto> P]\<^sub>\<tau>" using SA_arrow by auto
+ from SA_arrow have h2:"(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>S2[X \<mapsto> P]\<^sub>\<tau><:T2[X \<mapsto> P]\<^sub>\<tau>" using SA_arrow by auto
+ show ?case using subtype_of.SA_arrow h1 h2 by auto
+next
+ case (SA_all G T1 S1 Y S2 T2 X P D)
+ then have Y: "Y \<sharp> ty_domain (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_ok intro: fresh_domain)
+ moreover from SA_all have "S1 closed_in (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_closed)
+ ultimately have S1: "Y \<sharp> S1" by (rule closed_in_fresh)
+ from SA_all have "T1 closed_in (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_closed)
+ with Y have T1: "Y \<sharp> T1" by (rule closed_in_fresh)
+ with SA_all and S1 show ?case by force
+qed
+
+subsubsection {* Type Substitution Preserves Typing *}
+
+theorem substT_type: -- {* A.11 *}
+ assumes H: "(D @ TVarB X Q # G) \<turnstile> t : T"
+ shows "G \<turnstile> P <: Q \<Longrightarrow>
+ (D[X \<mapsto> P]\<^sub>e @ G) \<turnstile> t[X \<mapsto>\<^sub>\<tau> P] : T[X \<mapsto> P]\<^sub>\<tau>" using H
+proof (nominal_induct \<Gamma>'\<equiv>"(D @ TVarB X Q # G)" t T avoiding: X P arbitrary: D rule: typing.strong_induct)
+ case (T_Var x T G' X P D')
+ have "G\<turnstile>P<:Q" by fact
+ then have "P closed_in G" using subtype_implies_closed by auto
+ moreover have "\<turnstile> (D' @ TVarB X Q # G) ok" using T_Var by auto
+ ultimately have "\<turnstile> (D'[X \<mapsto> P]\<^sub>e @ G) ok" using valid_subst by auto
+ moreover have "VarB x T \<in> set (D' @ TVarB X Q # G)" using T_Var by auto
+ then have "VarB x T \<in> set D' \<or> VarB x T \<in> set G" by simp
+ then have "(VarB x (T[X \<mapsto> P]\<^sub>\<tau>)) \<in> set (D'[X \<mapsto> P]\<^sub>e @ G)"
+ proof
+ assume "VarB x T \<in> set D'"
+ then have "VarB x (T[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D'[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_VarB)
+ then show ?thesis by simp
+ next
+ assume x: "VarB x T \<in> set G"
+ from T_Var have ok: "\<turnstile> G ok" by (auto dest: subtype_implies_ok)
+ then have "X \<sharp> ty_domain G" using T_Var by (auto dest: validE_append)
+ with ok have "X \<sharp> G" by (simp add: valid_ty_domain_fresh)
+ moreover from x have "VarB x T \<in> set (D' @ G)" by simp
+ then have "VarB x (T[X \<mapsto> P]\<^sub>\<tau>) \<in> set ((D' @ G)[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_VarB)
+ ultimately show ?thesis
+ by (simp add: ctxt_subst_append ctxt_subst_identity)
+ qed
+ ultimately show ?case by auto
+next
+ case (T_App G' t1 T1 T2 t2 X P D')
+ then have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t1[X \<mapsto>\<^sub>\<tau> P] : (T1 \<rightarrow> T2)[X \<mapsto> P]\<^sub>\<tau>" by auto
+ moreover from T_App have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t2[X \<mapsto>\<^sub>\<tau> P] : T1[X \<mapsto> P]\<^sub>\<tau>" by auto
+ ultimately show ?case by auto
+next
+ case (T_Abs x T1 G' t2 T2 X P D')
+ then show ?case by force
+next
+ case (T_Sub G' t S T X P D')
+ then show ?case using substT_subtype by force
+next
+ case (T_TAbs X' G' T1 t2 T2 X P D')
+ then have "X' \<sharp> ty_domain (D' @ TVarB X Q # G)"
+ and "G' closed_in (D' @ TVarB X Q # G)"
+ by (auto dest: typing_ok)
+ then have "X' \<sharp> G'" by (rule closed_in_fresh)
+ with T_TAbs show ?case by force
+next
+ case (T_TApp X' G' t1 T2 T11 T12 X P D')
+ then have "X' \<sharp> ty_domain (D' @ TVarB X Q # G)"
+ by (simp add: fresh_domain)
+ moreover from T_TApp have "T11 closed_in (D' @ TVarB X Q # G)"
+ by (auto dest: subtype_implies_closed)
+ ultimately have X': "X' \<sharp> T11" by (rule closed_in_fresh)
+ from T_TApp have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t1[X \<mapsto>\<^sub>\<tau> P] : (\<forall>X'<:T11. T12)[X \<mapsto> P]\<^sub>\<tau>"
+ by simp
+ with X' and T_TApp show ?case
+ by (auto simp add: fresh_atm type_substitution_lemma
+ fresh_list_append fresh_list_cons
+ ctxt_subst_fresh' type_subst_fresh subst_trm_ty_fresh
+ intro: substT_subtype)
+qed
+
+lemma Abs_type: -- {* A.13(1) *}
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>x:S. s) : T"
+ and H': "\<Gamma> \<turnstile> T <: U \<rightarrow> U'"
+ and H'': "x \<sharp> \<Gamma>"
+ obtains S' where "\<Gamma> \<turnstile> U <: S"
+ and "(VarB x S) # \<Gamma> \<turnstile> s : S'"
+ and "\<Gamma> \<turnstile> S' <: U'"
+ using H H' H''
+proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>x:S. s" T avoiding: x arbitrary: U U' S s rule: typing.strong_induct)
+ case (T_Abs y T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `\<Gamma> \<turnstile> T\<^isub>1 \<rightarrow> T\<^isub>2 <: U \<rightarrow> U'`
+ obtain ty1: "\<Gamma> \<turnstile> U <: S" and ty2: "\<Gamma> \<turnstile> T\<^isub>2 <: U'" using T_Abs
+ by cases (simp_all add: ty.inject trm.inject alpha fresh_atm)
+ from T_Abs have "VarB y S # \<Gamma> \<turnstile> [(y, x)] \<bullet> s : T\<^isub>2"
+ by (simp add: trm.inject alpha fresh_atm)
+ then have "[(y, x)] \<bullet> (VarB y S # \<Gamma>) \<turnstile> [(y, x)] \<bullet> [(y, x)] \<bullet> s : [(y, x)] \<bullet> T\<^isub>2"
+ by (rule typing.eqvt)
+ moreover from T_Abs have "y \<sharp> \<Gamma>"
+ by (auto dest!: typing_ok simp add: fresh_trm_domain)
+ ultimately have "VarB x S # \<Gamma> \<turnstile> s : T\<^isub>2" using T_Abs
+ by (perm_simp add: ty_vrs_prm_simp)
+ with ty1 show ?case using ty2 by (rule T_Abs)
+next
+ case (T_Sub \<Gamma> t S T)
+ then show ?case using subtype_transitivity by blast
+qed simp_all
+
+lemma subtype_reflexivity_from_typing:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> T <: T"
+using assms subtype_reflexivity typing_ok typing_closed_in by simp
+
+lemma Abs_type':
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>x:S. s) : U \<rightarrow> U'"
+ and H': "x \<sharp> \<Gamma>"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(VarB x S) # \<Gamma> \<turnstile> s : S'"
+ and "\<Gamma> \<turnstile> S' <: U'"
+ using H subtype_reflexivity_from_typing [OF H] H'
+ by (rule Abs_type)
+
+lemma TAbs_type: -- {* A.13(2) *}
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>X<:S. s) : T"
+ and H': "\<Gamma> \<turnstile> T <: (\<forall>X<:U. U')"
+ and fresh: "X \<sharp> \<Gamma>" "X \<sharp> S" "X \<sharp> U"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(TVarB X U # \<Gamma>) \<turnstile> s : S'"
+ and "(TVarB X U # \<Gamma>) \<turnstile> S' <: U'"
+ using H H' fresh
+proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>X<:S. s" T avoiding: X U U' S arbitrary: s rule: typing.strong_induct)
+ case (T_TAbs Y T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `TVarB Y T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2` have Y: "Y \<sharp> \<Gamma>"
+ by (auto dest!: typing_ok simp add: valid_ty_domain_fresh)
+ from `Y \<sharp> U'` and `Y \<sharp> X`
+ have "(\<forall>X<:U. U') = (\<forall>Y<:U. [(Y, X)] \<bullet> U')"
+ by (simp add: ty.inject alpha' fresh_atm)
+ with T_TAbs have "\<Gamma> \<turnstile> (\<forall>Y<:S. T\<^isub>2) <: (\<forall>Y<:U. [(Y, X)] \<bullet> U')" by (simp add: trm.inject)
+ then obtain ty1: "\<Gamma> \<turnstile> U <: S" and ty2: "(TVarB Y U # \<Gamma>) \<turnstile> T\<^isub>2 <: ([(Y, X)] \<bullet> U')" using T_TAbs Y
+ by (cases rule: subtype_of.strong_cases [where X=Y]) (simp_all add: ty.inject alpha abs_fresh)
+ note ty1
+ moreover from T_TAbs have "TVarB Y S # \<Gamma> \<turnstile> ([(Y, X)] \<bullet> s) : T\<^isub>2"
+ by (simp add: trm.inject alpha fresh_atm)
+ then have "[(Y, X)] \<bullet> (TVarB Y S # \<Gamma>) \<turnstile> [(Y, X)] \<bullet> [(Y, X)] \<bullet> s : [(Y, X)] \<bullet> T\<^isub>2"
+ by (rule typing.eqvt)
+ with `X \<sharp> \<Gamma>` `X \<sharp> S` Y `Y \<sharp> S` have "TVarB X S # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^isub>2"
+ by perm_simp
+ then have "TVarB X U # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^isub>2" using ty1
+ by (rule narrow_type [of "[]", simplified])
+ moreover from ty2 have "([(Y, X)] \<bullet> (TVarB Y U # \<Gamma>)) \<turnstile> ([(Y, X)] \<bullet> T\<^isub>2) <: ([(Y, X)] \<bullet> [(Y, X)] \<bullet> U')"
+ by (rule subtype_of.eqvt)
+ with `X \<sharp> \<Gamma>` `X \<sharp> U` Y `Y \<sharp> U` have "(TVarB X U # \<Gamma>) \<turnstile> ([(Y, X)] \<bullet> T\<^isub>2) <: U'"
+ by perm_simp
+ ultimately show ?case by (rule T_TAbs)
+next
+ case (T_Sub \<Gamma> t S T)
+ then show ?case using subtype_transitivity by blast
+qed simp_all
+
+lemma TAbs_type':
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>X<:S. s) : (\<forall>X<:U. U')"
+ and fresh: "X \<sharp> \<Gamma>" "X \<sharp> S" "X \<sharp> U"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(TVarB X U # \<Gamma>) \<turnstile> s : S'"
+ and "(TVarB X U # \<Gamma>) \<turnstile> S' <: U'"
+ using H subtype_reflexivity_from_typing [OF H] fresh
+ by (rule TAbs_type)
+
+theorem preservation: -- {* A.20 *}
+ assumes H: "\<Gamma> \<turnstile> t : T"
+ shows "t \<longmapsto> t' \<Longrightarrow> \<Gamma> \<turnstile> t' : T" using H
+proof (nominal_induct avoiding: t' rule: typing.strong_induct)
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t\<^isub>2 t')
+ obtain x::vrs where x_fresh: "x \<sharp> (\<Gamma>, t\<^isub>1 \<cdot> t\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ obtain X::tyvrs where "X \<sharp> (t\<^isub>1 \<cdot> t\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ with `t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t'` show ?case
+ proof (cases rule: eval.strong_cases [where x=x and X=X])
+ case (E_Abs v\<^isub>2 T\<^isub>1\<^isub>1' t\<^isub>1\<^isub>2)
+ with T_App and x_fresh have h: "\<Gamma> \<turnstile> (\<lambda>x:T\<^isub>1\<^isub>1'. t\<^isub>1\<^isub>2) : T\<^isub>1\<^isub>1 \<rightarrow> T\<^isub>1\<^isub>2"
+ by (simp add: trm.inject fresh_prod)
+ moreover from x_fresh have "x \<sharp> \<Gamma>" by simp
+ ultimately obtain S'
+ where T\<^isub>1\<^isub>1: "\<Gamma> \<turnstile> T\<^isub>1\<^isub>1 <: T\<^isub>1\<^isub>1'"
+ and t\<^isub>1\<^isub>2: "(VarB x T\<^isub>1\<^isub>1') # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : S'"
+ and S': "\<Gamma> \<turnstile> S' <: T\<^isub>1\<^isub>2"
+ by (rule Abs_type') blast
+ from `\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1`
+ have "\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1'" using T\<^isub>1\<^isub>1 by (rule T_Sub)
+ with t\<^isub>1\<^isub>2 have "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[x \<mapsto> t\<^isub>2] : S'"
+ by (rule subst_type [where \<Delta>="[]", simplified])
+ hence "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[x \<mapsto> t\<^isub>2] : T\<^isub>1\<^isub>2" using S' by (rule T_Sub)
+ with E_Abs and x_fresh show ?thesis by (simp add: trm.inject fresh_prod)
+ next
+ case (E_App1 t''' t'' u)
+ hence "t\<^isub>1 \<longmapsto> t''" by (simp add:trm.inject)
+ hence "\<Gamma> \<turnstile> t'' : T\<^isub>1\<^isub>1 \<rightarrow> T\<^isub>1\<^isub>2" by (rule T_App)
+ hence "\<Gamma> \<turnstile> t'' \<cdot> t\<^isub>2 : T\<^isub>1\<^isub>2" using `\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1`
+ by (rule typing.T_App)
+ with E_App1 show ?thesis by (simp add:trm.inject)
+ next
+ case (E_App2 v t''' t'')
+ hence "t\<^isub>2 \<longmapsto> t''" by (simp add:trm.inject)
+ hence "\<Gamma> \<turnstile> t'' : T\<^isub>1\<^isub>1" by (rule T_App)
+ with T_App(1) have "\<Gamma> \<turnstile> t\<^isub>1 \<cdot> t'' : T\<^isub>1\<^isub>2"
+ by (rule typing.T_App)
+ with E_App2 show ?thesis by (simp add:trm.inject)
+ qed (simp_all add: fresh_prod)
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T\<^isub>2 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t')
+ obtain x::vrs where "x \<sharp> (t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ with `t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t'`
+ show ?case
+ proof (cases rule: eval.strong_cases [where X=X and x=x])
+ case (E_TAbs T\<^isub>1\<^isub>1' T\<^isub>2' t\<^isub>1\<^isub>2)
+ with T_TApp have "\<Gamma> \<turnstile> (\<lambda>X<:T\<^isub>1\<^isub>1'. t\<^isub>1\<^isub>2) : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2)" and "X \<sharp> \<Gamma>" and "X \<sharp> T\<^isub>1\<^isub>1'"
+ by (simp_all add: trm.inject)
+ moreover from `\<Gamma>\<turnstile>T\<^isub>2<:T\<^isub>1\<^isub>1` and `X \<sharp> \<Gamma>` have "X \<sharp> T\<^isub>1\<^isub>1"
+ by (blast intro: closed_in_fresh fresh_domain dest: subtype_implies_closed)
+ ultimately obtain S'
+ where "TVarB X T\<^isub>1\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : S'"
+ and "(TVarB X T\<^isub>1\<^isub>1 # \<Gamma>) \<turnstile> S' <: T\<^isub>1\<^isub>2"
+ by (rule TAbs_type') blast
+ hence "TVarB X T\<^isub>1\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : T\<^isub>1\<^isub>2" by (rule T_Sub)
+ hence "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[X \<mapsto>\<^sub>\<tau> T\<^isub>2] : T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1`
+ by (rule substT_type [where D="[]", simplified])
+ with T_TApp and E_TAbs show ?thesis by (simp add: trm.inject)
+ next
+ case (E_TApp t''' t'' T)
+ from E_TApp have "t\<^isub>1 \<longmapsto> t''" by (simp add: trm.inject)
+ then have "\<Gamma> \<turnstile> t'' : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2)" by (rule T_TApp)
+ then have "\<Gamma> \<turnstile> t'' \<cdot>\<^sub>\<tau> T\<^isub>2 : T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1`
+ by (rule better_T_TApp)
+ with E_TApp show ?thesis by (simp add: trm.inject)
+ qed (simp_all add: fresh_prod)
+next
+ case (T_Sub \<Gamma> t S T t')
+ have "t \<longmapsto> t'" by fact
+ hence "\<Gamma> \<turnstile> t' : S" by (rule T_Sub)
+ moreover have "\<Gamma> \<turnstile> S <: T" by fact
+ ultimately show ?case by (rule typing.T_Sub)
+qed (auto)
+
+lemma Fun_canonical: -- {* A.14(1) *}
+ assumes ty: "[] \<turnstile> v : T\<^isub>1 \<rightarrow> T\<^isub>2"
+ shows "val v \<Longrightarrow> \<exists>x t S. v = (\<lambda>x:S. t)" using ty
+proof (induct \<Gamma>\<equiv>"[]::env" v T\<equiv>"T\<^isub>1 \<rightarrow> T\<^isub>2" arbitrary: T\<^isub>1 T\<^isub>2)
+ case (T_Sub \<Gamma> t S T)
+ hence "\<Gamma> \<turnstile> S <: T\<^isub>1 \<rightarrow> T\<^isub>2" by simp
+ then obtain S\<^isub>1 S\<^isub>2 where S: "S = S\<^isub>1 \<rightarrow> S\<^isub>2"
+ by cases (auto simp add: T_Sub)
+ with `val t` and `\<Gamma> = []` show ?case by (rule T_Sub)
+qed (auto)
+
+lemma TyAll_canonical: -- {* A.14(3) *}
+ fixes X::tyvrs
+ assumes ty: "[] \<turnstile> v : (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+ shows "val v \<Longrightarrow> \<exists>X t S. v = (\<lambda>X<:S. t)" using ty
+proof (induct \<Gamma>\<equiv>"[]::env" v T\<equiv>"\<forall>X<:T\<^isub>1. T\<^isub>2" arbitrary: X T\<^isub>1 T\<^isub>2)
+ case (T_Sub \<Gamma> t S T)
+ hence "\<Gamma> \<turnstile> S <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" by simp
+ then obtain X S\<^isub>1 S\<^isub>2 where S: "S = (\<forall>X<:S\<^isub>1. S\<^isub>2)"
+ by cases (auto simp add: T_Sub)
+ then show ?case using T_Sub by auto
+qed (auto)
+
+theorem progress:
+ assumes "[] \<turnstile> t : T"
+ shows "val t \<or> (\<exists>t'. t \<longmapsto> t')"
+using assms
+proof (induct \<Gamma> \<equiv> "[]::env" t T)
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t\<^isub>2)
+ hence "val t\<^isub>1 \<or> (\<exists>t'. t\<^isub>1 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume t\<^isub>1_val: "val t\<^isub>1"
+ with T_App obtain x t3 S where t\<^isub>1: "t\<^isub>1 = (\<lambda>x:S. t3)"
+ by (auto dest!: Fun_canonical)
+ from T_App have "val t\<^isub>2 \<or> (\<exists>t'. t\<^isub>2 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume "val t\<^isub>2"
+ with t\<^isub>1 have "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t3[x \<mapsto> t\<^isub>2]" by auto
+ thus ?case by auto
+ next
+ assume "\<exists>t'. t\<^isub>2 \<longmapsto> t'"
+ then obtain t' where "t\<^isub>2 \<longmapsto> t'" by auto
+ with t\<^isub>1_val have "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t\<^isub>1 \<cdot> t'" by auto
+ thus ?case by auto
+ qed
+ next
+ assume "\<exists>t'. t\<^isub>1 \<longmapsto> t'"
+ then obtain t' where "t\<^isub>1 \<longmapsto> t'" by auto
+ hence "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t' \<cdot> t\<^isub>2" by auto
+ thus ?case by auto
+ qed
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T\<^isub>2 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2)
+ hence "val t\<^isub>1 \<or> (\<exists>t'. t\<^isub>1 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume "val t\<^isub>1"
+ with T_TApp obtain x t S where "t\<^isub>1 = (\<lambda>x<:S. t)"
+ by (auto dest!: TyAll_canonical)
+ hence "t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t[x \<mapsto>\<^sub>\<tau> T\<^isub>2]" by auto
+ thus ?case by auto
+ next
+ assume "\<exists>t'. t\<^isub>1 \<longmapsto> t'" thus ?case by auto
+ qed
+qed (auto)
+
+end
--- a/src/HOL/Nominal/Nominal.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/Nominal.thy Wed Mar 04 10:45:52 2009 +0100
@@ -397,6 +397,37 @@
lemmas fresh_star_prod = fresh_star_prod_list fresh_star_prod_set
+lemma fresh_star_set_eq: "set xs \<sharp>* c = xs \<sharp>* c"
+ by (simp add: fresh_star_def)
+
+lemma fresh_star_Un_elim:
+ "((S \<union> T) \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (S \<sharp>* c \<Longrightarrow> T \<sharp>* c \<Longrightarrow> PROP C)"
+ apply rule
+ apply (simp_all add: fresh_star_def)
+ apply (erule meta_mp)
+ apply blast
+ done
+
+lemma fresh_star_insert_elim:
+ "(insert x S \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (x \<sharp> c \<Longrightarrow> S \<sharp>* c \<Longrightarrow> PROP C)"
+ by rule (simp_all add: fresh_star_def)
+
+lemma fresh_star_empty_elim:
+ "({} \<sharp>* c \<Longrightarrow> PROP C) \<equiv> PROP C"
+ by (simp add: fresh_star_def)
+
+text {* Normalization of freshness results; cf.\ @{text nominal_induct} *}
+
+lemma fresh_star_unit_elim:
+ shows "((a::'a set)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C"
+ and "((b::'a list)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C"
+ by (simp_all add: fresh_star_def fresh_def supp_unit)
+
+lemma fresh_star_prod_elim:
+ shows "((a::'a set)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (a\<sharp>*x \<Longrightarrow> a\<sharp>*y \<Longrightarrow> PROP C)"
+ and "((b::'a list)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (b\<sharp>*x \<Longrightarrow> b\<sharp>*y \<Longrightarrow> PROP C)"
+ by (rule, simp_all add: fresh_star_prod)+
+
section {* Abstract Properties for Permutations and Atoms *}
(*=========================================================*)
@@ -1645,6 +1676,31 @@
apply(rule at)
done
+lemma pt_fresh_star_eqvt:
+ fixes pi :: "'x prm"
+ and x :: "'a"
+ and a :: "'x set"
+ and b :: "'x list"
+ assumes pt: "pt TYPE('a) TYPE('x)"
+ and at: "at TYPE('x)"
+ shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)"
+ and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)"
+ by (simp_all add: perm_bool pt_fresh_star_bij[OF pt, OF at])
+
+lemma pt_fresh_star_eqvt_ineq:
+ fixes pi::"'x prm"
+ and a::"'y set"
+ and b::"'y list"
+ and x::"'a"
+ assumes pta: "pt TYPE('a) TYPE('x)"
+ and ptb: "pt TYPE('y) TYPE('x)"
+ and at: "at TYPE('x)"
+ and cp: "cp TYPE('a) TYPE('x) TYPE('y)"
+ and dj: "disjoint TYPE('y) TYPE('x)"
+ shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)"
+ and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)"
+ by (simp_all add: pt_fresh_star_bij_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj] perm_bool)
+
lemma pt_fresh_bij1:
fixes pi :: "'x prm"
and x :: "'a"
--- a/src/HOL/Nominal/nominal_atoms.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_atoms.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* title: HOL/Nominal/nominal_atoms.ML
- ID: $Id$
Author: Christian Urban and Stefan Berghofer, TU Muenchen
Declaration of atom types to be used in nominal datatypes.
@@ -784,6 +783,8 @@
val fresh_star_bij = @{thms "Nominal.pt_fresh_star_bij"};
val fresh_eqvt = @{thm "Nominal.pt_fresh_eqvt"};
val fresh_eqvt_ineq = @{thm "Nominal.pt_fresh_eqvt_ineq"};
+ val fresh_star_eqvt = @{thms "Nominal.pt_fresh_star_eqvt"};
+ val fresh_star_eqvt_ineq= @{thms "Nominal.pt_fresh_star_eqvt_ineq"};
val set_diff_eqvt = @{thm "Nominal.pt_set_diff_eqvt"};
val in_eqvt = @{thm "Nominal.pt_in_eqvt"};
val eq_eqvt = @{thm "Nominal.pt_eq_eqvt"};
@@ -947,13 +948,17 @@
in [(("fresh_bij", thms1 @ thms2),[])] end
||>> add_thmss_string
let val thms1 = inst_pt_at fresh_star_bij
- and thms2 = flat (map (fn ti => inst_pt_pt_at_cp [ti]) fresh_star_bij_ineq);
+ and thms2 = maps (fn ti => inst_pt_pt_at_cp [ti]) fresh_star_bij_ineq
in [(("fresh_star_bij", thms1 @ thms2),[])] end
||>> add_thmss_string
let val thms1 = inst_pt_at [fresh_eqvt]
and thms2 = inst_pt_pt_at_cp_dj [fresh_eqvt_ineq]
in [(("fresh_eqvt", thms1 @ thms2),[NominalThmDecls.eqvt_add])] end
||>> add_thmss_string
+ let val thms1 = inst_pt_at fresh_star_eqvt
+ and thms2 = maps (fn ti => inst_pt_pt_at_cp_dj [ti]) fresh_star_eqvt_ineq
+ in [(("fresh_star_eqvt", thms1 @ thms2),[NominalThmDecls.eqvt_add])] end
+ ||>> add_thmss_string
let val thms1 = inst_pt_at [in_eqvt]
in [(("in_eqvt", thms1),[NominalThmDecls.eqvt_add])] end
||>> add_thmss_string
--- a/src/HOL/Nominal/nominal_induct.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_induct.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
-(* ID: $Id$
- Author: Christian Urban and Makarius
+(* Author: Christian Urban and Makarius
The nominal induct proof method.
*)
@@ -24,7 +23,8 @@
val split_all_tuples =
Simplifier.full_simplify (HOL_basic_ss addsimps
- [split_conv, split_paired_all, unit_all_eq1, thm "fresh_unit_elim", thm "fresh_prod_elim"]);
+ [split_conv, split_paired_all, unit_all_eq1, @{thm fresh_unit_elim}, @{thm fresh_prod_elim}] @
+ @{thms fresh_star_unit_elim} @ @{thms fresh_star_prod_elim});
(* prepare rule *)
--- a/src/HOL/Nominal/nominal_inductive.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_inductive.ML Wed Mar 04 10:45:52 2009 +0100
@@ -7,8 +7,8 @@
signature NOMINAL_INDUCTIVE =
sig
- val prove_strong_ind: string -> (string * string list) list -> theory -> Proof.state
- val prove_eqvt: string -> string list -> theory -> theory
+ val prove_strong_ind: string -> (string * string list) list -> local_theory -> Proof.state
+ val prove_eqvt: string -> string list -> local_theory -> local_theory
end
structure NominalInductive : NOMINAL_INDUCTIVE =
@@ -28,6 +28,8 @@
fun atomize_induct ctxt = Conv.fconv_rule (Conv.prems_conv ~1
(Conv.params_conv ~1 (K (Conv.prems_conv ~1 atomize_conv)) ctxt));
+fun preds_of ps t = gen_inter (op = o apfst dest_Free) (ps, Term.add_frees t []);
+
val fresh_prod = thm "fresh_prod";
val perm_bool = mk_meta_eq (thm "perm_bool");
@@ -142,9 +144,9 @@
fun first_order_mrs ths th = ths MRS
Thm.instantiate (first_order_matchs (cprems_of th) (map cprop_of ths)) th;
-fun prove_strong_ind s avoids thy =
+fun prove_strong_ind s avoids ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val ind_params = InductivePackage.params_of raw_induct;
@@ -158,8 +160,7 @@
commas_quote xs));
val induct_cases = map fst (fst (RuleCases.get (the
(Induct.lookup_inductP ctxt (hd names)))));
- val raw_induct' = Logic.unvarify (prop_of raw_induct);
- val elims' = map (Logic.unvarify o prop_of) elims;
+ val ([raw_induct'], ctxt') = Variable.import_terms false [prop_of raw_induct] ctxt;
val concls = raw_induct' |> Logic.strip_imp_concl |> HOLogic.dest_Trueprop |>
HOLogic.dest_conj |> map (HOLogic.dest_imp ##> strip_comb);
val ps = map (fst o snd) concls;
@@ -199,8 +200,8 @@
val ind_sort = if null atomTs then HOLogic.typeS
else Sign.certify_sort thy (map (fn T => Sign.intern_class thy
("fs_" ^ Sign.base_name (fst (dest_Type T)))) atomTs);
- val fs_ctxt_tyname = Name.variant (map fst (OldTerm.term_tfrees raw_induct')) "'n";
- val fs_ctxt_name = Name.variant (OldTerm.add_term_names (raw_induct', [])) "z";
+ val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt');
+ val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt';
val fsT = TFree (fs_ctxt_tyname, ind_sort);
val inductive_forall_def' = Drule.instantiate'
@@ -237,7 +238,7 @@
val prem = Logic.list_implies
(map mk_fresh bvars @ mk_distinct bvars @
map (fn prem =>
- if null (OldTerm.term_frees prem inter ps) then prem
+ if null (preds_of ps prem) then prem
else lift_prem prem) prems,
HOLogic.mk_Trueprop (lift_pred p ts));
val vs = map (Var o apfst (rpair 0)) (Term.rename_wrt_term prem params')
@@ -263,7 +264,7 @@
val vc_compat = map (fn (params, bvars, prems, (p, ts)) =>
map (fn q => list_all (params, incr_boundvars ~1 (Logic.list_implies
(List.mapPartial (fn prem =>
- if null (ps inter OldTerm.term_frees prem) then SOME prem
+ if null (preds_of ps prem) then SOME prem
else map_term (split_conj (K o I) names) prem prem) prems, q))))
(mk_distinct bvars @
maps (fn (t, T) => map (fn (u, U) => HOLogic.mk_Trueprop
@@ -309,8 +310,8 @@
[ex] ctxt
in (freshs1 @ [term_of cx], freshs2 @ ths, ctxt') end;
- fun mk_ind_proof thy thss =
- Goal.prove_global thy [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
+ fun mk_ind_proof ctxt' thss =
+ Goal.prove ctxt' [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
let val th = Goal.prove ctxt [] [] concl (fn {context, ...} =>
rtac raw_induct 1 THEN
EVERY (maps (fn ((((_, bvars, oprems, _), vc_compat_ths), ihyp), (vs, ihypt)) =>
@@ -352,7 +353,7 @@
(rev pis' @ pis) th));
val (gprems1, gprems2) = split_list
(map (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then (SOME th, mk_pi th)
+ if null (preds_of ps t) then (SOME th, mk_pi th)
else
(map_thm ctxt (split_conj (K o I) names)
(etac conjunct1 1) monos NONE th,
@@ -403,42 +404,42 @@
REPEAT (REPEAT (resolve_tac [conjI, impI] 1) THEN
etac impE 1 THEN atac 1 THEN REPEAT (etac @{thm allE_Nil} 1) THEN
asm_full_simp_tac (simpset_of thy) 1)
- end);
+ end) |> singleton (ProofContext.export ctxt' ctxt);
(** strong case analysis rule **)
val cases_prems = map (fn ((name, avoids), rule) =>
let
- val prem :: prems = Logic.strip_imp_prems rule;
- val concl = Logic.strip_imp_concl rule;
- val used = Term.add_free_names rule [];
+ val ([rule'], ctxt') = Variable.import_terms false [prop_of rule] ctxt;
+ val prem :: prems = Logic.strip_imp_prems rule';
+ val concl = Logic.strip_imp_concl rule'
in
(prem,
List.drop (snd (strip_comb (HOLogic.dest_Trueprop prem)), length ind_params),
concl,
- fst (fold_map (fn (prem, (_, avoid)) => fn used =>
+ fold_map (fn (prem, (_, avoid)) => fn ctxt =>
let
val prems = Logic.strip_assums_hyp prem;
val params = Logic.strip_params prem;
val bnds = fold (add_binders thy 0) prems [] @ mk_avoids params avoid;
- fun mk_subst (p as (s, T)) (i, j, used, ps, qs, is, ts) =
+ fun mk_subst (p as (s, T)) (i, j, ctxt, ps, qs, is, ts) =
if member (op = o apsnd fst) bnds (Bound i) then
let
- val s' = Name.variant used s;
+ val ([s'], ctxt') = Variable.variant_fixes [s] ctxt;
val t = Free (s', T)
- in (i + 1, j, s' :: used, ps, (t, T) :: qs, i :: is, t :: ts) end
- else (i + 1, j + 1, used, p :: ps, qs, is, Bound j :: ts);
- val (_, _, used', ps, qs, is, ts) = fold_rev mk_subst params
- (0, 0, used, [], [], [], [])
+ in (i + 1, j, ctxt', ps, (t, T) :: qs, i :: is, t :: ts) end
+ else (i + 1, j + 1, ctxt, p :: ps, qs, is, Bound j :: ts);
+ val (_, _, ctxt', ps, qs, is, ts) = fold_rev mk_subst params
+ (0, 0, ctxt, [], [], [], [])
in
- ((ps, qs, is, map (curry subst_bounds (rev ts)) prems), used')
- end) (prems ~~ avoids) used))
+ ((ps, qs, is, map (curry subst_bounds (rev ts)) prems), ctxt')
+ end) (prems ~~ avoids) ctxt')
end)
(InductivePackage.partition_rules' raw_induct (intrs ~~ avoids') ~~
- elims');
+ elims);
val cases_prems' =
- map (fn (prem, args, concl, prems) =>
+ map (fn (prem, args, concl, (prems, _)) =>
let
fun mk_prem (ps, [], _, prems) =
list_all (ps, Logic.list_implies (prems, concl))
@@ -462,9 +463,9 @@
val simp_fresh_atm = map
(Simplifier.simplify (HOL_basic_ss addsimps fresh_atm));
- fun mk_cases_proof thy ((((name, thss), elim), (prem, args, concl, prems)),
+ fun mk_cases_proof ((((name, thss), elim), (prem, args, concl, (prems, ctxt'))),
prems') =
- (name, Goal.prove_global thy [] (prem :: prems') concl
+ (name, Goal.prove ctxt' [] (prem :: prems') concl
(fn {prems = hyp :: hyps, context = ctxt1} =>
EVERY (rtac (hyp RS elim) 1 ::
map (fn (((_, vc_compat_ths), case_hyp), (_, qs, is, _)) =>
@@ -537,52 +538,54 @@
end) ctxt4 1)
val final = ProofContext.export ctxt3 ctxt2 [th]
in resolve_tac final 1 end) ctxt1 1)
- (thss ~~ hyps ~~ prems))))
+ (thss ~~ hyps ~~ prems))) |>
+ singleton (ProofContext.export ctxt' ctxt))
in
- thy |>
- ProofContext.init |>
- Proof.theorem_i NONE (fn thss => ProofContext.theory (fn thy =>
+ ctxt'' |>
+ Proof.theorem_i NONE (fn thss => fn ctxt =>
let
- val ctxt = ProofContext.init thy;
val rec_name = space_implode "_" (map Sign.base_name names);
+ val rec_qualified = Binding.qualify false rec_name;
val ind_case_names = RuleCases.case_names induct_cases;
val induct_cases' = InductivePackage.partition_rules' raw_induct
(intrs ~~ induct_cases);
val thss' = map (map atomize_intr) thss;
val thsss = InductivePackage.partition_rules' raw_induct (intrs ~~ thss');
val strong_raw_induct =
- mk_ind_proof thy thss' |> InductivePackage.rulify;
- val strong_cases = map (mk_cases_proof thy ##> InductivePackage.rulify)
+ mk_ind_proof ctxt thss' |> InductivePackage.rulify;
+ val strong_cases = map (mk_cases_proof ##> InductivePackage.rulify)
(thsss ~~ elims ~~ cases_prems ~~ cases_prems');
val strong_induct =
if length names > 1 then
(strong_raw_induct, [ind_case_names, RuleCases.consumes 0])
else (strong_raw_induct RSN (2, rev_mp),
[ind_case_names, RuleCases.consumes 1]);
- val ([strong_induct'], thy') = thy |>
- Sign.add_path rec_name |>
- PureThy.add_thms [((Binding.name "strong_induct", #1 strong_induct), #2 strong_induct)];
+ val ((_, [strong_induct']), ctxt') = LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_induct"),
+ map (Attrib.internal o K) (#2 strong_induct)), [#1 strong_induct])
+ ctxt;
val strong_inducts =
ProjectRule.projects ctxt (1 upto length names) strong_induct'
in
- thy' |>
- PureThy.add_thmss [((Binding.name "strong_inducts", strong_inducts),
- [ind_case_names, RuleCases.consumes 1])] |> snd |>
- Sign.parent_path |>
- fold (fn ((name, elim), (_, cases)) =>
- Sign.add_path (Sign.base_name name) #>
- PureThy.add_thms [((Binding.name "strong_cases", elim),
- [RuleCases.case_names (map snd cases),
- RuleCases.consumes 1])] #> snd #>
- Sign.parent_path) (strong_cases ~~ induct_cases')
- end))
+ ctxt' |>
+ LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_inducts"),
+ [Attrib.internal (K ind_case_names),
+ Attrib.internal (K (RuleCases.consumes 1))]),
+ strong_inducts) |> snd |>
+ LocalTheory.notes Thm.theoremK (map (fn ((name, elim), (_, cases)) =>
+ ((Binding.name (NameSpace.qualified (Sign.base_name name) "strong_cases"),
+ [Attrib.internal (K (RuleCases.case_names (map snd cases))),
+ Attrib.internal (K (RuleCases.consumes 1))]), [([elim], [])]))
+ (strong_cases ~~ induct_cases')) |> snd
+ end)
(map (map (rulify_term thy #> rpair [])) vc_compat)
end;
-fun prove_eqvt s xatoms thy =
+fun prove_eqvt s xatoms ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val raw_induct = atomize_induct ctxt raw_induct;
@@ -594,6 +597,7 @@
(s, ths ~~ InductivePackage.infer_intro_vars th k ths))
(InductivePackage.partition_rules raw_induct intrs ~~
InductivePackage.arities_of raw_induct ~~ elims));
+ val k = length (InductivePackage.params_of raw_induct);
val atoms' = NominalAtoms.atoms_of thy;
val atoms =
if null xatoms then atoms' else
@@ -612,19 +616,21 @@
(NominalThmDecls.get_eqvt_thms ctxt @ perm_pi_simp) addsimprocs
[mk_perm_bool_simproc names,
NominalPermeq.perm_simproc_app, NominalPermeq.perm_simproc_fun];
- val t = Logic.unvarify (concl_of raw_induct);
- val pi = Name.variant (OldTerm.add_term_names (t, [])) "pi";
+ val (([t], [pi]), ctxt') = ctxt |>
+ Variable.import_terms false [concl_of raw_induct] ||>>
+ Variable.variant_fixes ["pi"];
val ps = map (fst o HOLogic.dest_imp)
(HOLogic.dest_conj (HOLogic.dest_Trueprop t));
- fun eqvt_tac pi (intr, vs) st =
+ fun eqvt_tac ctxt'' pi (intr, vs) st =
let
- fun eqvt_err s = error
- ("Could not prove equivariance for introduction rule\n" ^
- Syntax.string_of_term_global (theory_of_thm intr)
- (Logic.unvarify (prop_of intr)) ^ "\n" ^ s);
+ fun eqvt_err s =
+ let val ([t], ctxt''') = Variable.import_terms true [prop_of intr] ctxt
+ in error ("Could not prove equivariance for introduction rule\n" ^
+ Syntax.string_of_term ctxt''' t ^ "\n" ^ s)
+ end;
val res = SUBPROOF (fn {prems, params, ...} =>
let
- val prems' = map (fn th => the_default th (map_thm ctxt
+ val prems' = map (fn th => the_default th (map_thm ctxt'
(split_conj (K I) names) (etac conjunct2 1) monos NONE th)) prems;
val prems'' = map (fn th => Simplifier.simplify eqvt_ss
(mk_perm_bool (cterm_of thy pi) th)) prems';
@@ -632,29 +638,36 @@
map (cterm_of thy o NominalPackage.mk_perm [] pi o term_of) params)
intr
in (rtac intr' THEN_ALL_NEW (TRY o resolve_tac prems'')) 1
- end) ctxt 1 st
+ end) ctxt' 1 st
in
case (Seq.pull res handle THM (s, _, _) => eqvt_err s) of
NONE => eqvt_err ("Rule does not match goal\n" ^
- Syntax.string_of_term_global (theory_of_thm st) (hd (prems_of st)))
+ Syntax.string_of_term ctxt'' (hd (prems_of st)))
| SOME (th, _) => Seq.single th
end;
val thss = map (fn atom =>
let val pi' = Free (pi, NominalAtoms.mk_permT (Type (atom, [])))
in map (fn th => zero_var_indexes (th RS mp))
- (DatatypeAux.split_conj_thm (Goal.prove_global thy [] []
+ (DatatypeAux.split_conj_thm (Goal.prove ctxt' [] []
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj (map (fn p =>
- HOLogic.mk_imp (p, list_comb
- (apsnd (map (NominalPackage.mk_perm [] pi')) (strip_comb p)))) ps)))
- (fn _ => EVERY (rtac raw_induct 1 :: map (fn intr_vs =>
+ let
+ val (h, ts) = strip_comb p;
+ val (ts1, ts2) = chop k ts
+ in
+ HOLogic.mk_imp (p, list_comb (h, ts1 @
+ map (NominalPackage.mk_perm [] pi') ts2))
+ end) ps)))
+ (fn {context, ...} => EVERY (rtac raw_induct 1 :: map (fn intr_vs =>
full_simp_tac eqvt_ss 1 THEN
- eqvt_tac pi' intr_vs) intrs'))))
+ eqvt_tac context pi' intr_vs) intrs')) |>
+ singleton (ProofContext.export ctxt' ctxt)))
end) atoms
in
- fold (fn (name, ths) =>
- Sign.add_path (Sign.base_name name) #>
- PureThy.add_thmss [((Binding.name "eqvt", ths), [NominalThmDecls.eqvt_add])] #> snd #>
- Sign.parent_path) (names ~~ transp thss) thy
+ ctxt |>
+ LocalTheory.notes Thm.theoremK (map (fn (name, ths) =>
+ ((Binding.name (NameSpace.qualified (Sign.base_name name) "eqvt"),
+ [Attrib.internal (K NominalThmDecls.eqvt_add)]), [(ths, [])]))
+ (names ~~ transp thss)) |> snd
end;
@@ -665,17 +678,17 @@
val _ = OuterKeyword.keyword "avoids";
val _ =
- OuterSyntax.command "nominal_inductive"
+ OuterSyntax.local_theory_to_proof "nominal_inductive"
"prove equivariance and strong induction theorem for inductive predicate involving nominal datatypes" K.thy_goal
- (P.name -- Scan.optional (P.$$$ "avoids" |-- P.and_list1 (P.name --
+ (P.xname -- Scan.optional (P.$$$ "avoids" |-- P.and_list1 (P.name --
(P.$$$ ":" |-- Scan.repeat1 P.name))) [] >> (fn (name, avoids) =>
- Toplevel.print o Toplevel.theory_to_proof (prove_strong_ind name avoids)));
+ prove_strong_ind name avoids));
val _ =
- OuterSyntax.command "equivariance"
+ OuterSyntax.local_theory "equivariance"
"prove equivariance for inductive predicate involving nominal datatypes" K.thy_decl
- (P.name -- Scan.optional (P.$$$ "[" |-- P.list1 P.name --| P.$$$ "]") [] >>
- (fn (name, atoms) => Toplevel.theory (prove_eqvt name atoms)));
+ (P.xname -- Scan.optional (P.$$$ "[" |-- P.list1 P.name --| P.$$$ "]") [] >>
+ (fn (name, atoms) => prove_eqvt name atoms));
end;
--- a/src/HOL/Nominal/nominal_inductive2.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_inductive2.ML Wed Mar 04 10:45:52 2009 +0100
@@ -8,7 +8,7 @@
signature NOMINAL_INDUCTIVE2 =
sig
- val prove_strong_ind: string -> (string * string list) list -> theory -> Proof.state
+ val prove_strong_ind: string -> (string * string list) list -> local_theory -> Proof.state
end
structure NominalInductive2 : NOMINAL_INDUCTIVE2 =
@@ -28,6 +28,13 @@
fun atomize_induct ctxt = Conv.fconv_rule (Conv.prems_conv ~1
(Conv.params_conv ~1 (K (Conv.prems_conv ~1 atomize_conv)) ctxt));
+val fresh_postprocess =
+ Simplifier.full_simplify (HOL_basic_ss addsimps
+ [@{thm fresh_star_set_eq}, @{thm fresh_star_Un_elim},
+ @{thm fresh_star_insert_elim}, @{thm fresh_star_empty_elim}]);
+
+fun preds_of ps t = gen_inter (op = o apfst dest_Free) (ps, Term.add_frees t []);
+
val perm_bool = mk_meta_eq (thm "perm_bool");
val perm_boolI = thm "perm_boolI";
val (_, [perm_boolI_pi, _]) = Drule.strip_comb (snd (Thm.dest_comb
@@ -148,9 +155,9 @@
map (Envir.subst_vars env #> cterm_of thy) vs ~~ cts) th
end;
-fun prove_strong_ind s avoids thy =
+fun prove_strong_ind s avoids ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val ind_params = InductivePackage.params_of raw_induct;
@@ -166,8 +173,7 @@
(Induct.lookup_inductP ctxt (hd names)))));
val induct_cases' = if null induct_cases then replicate (length intrs) ""
else induct_cases;
- val raw_induct' = Logic.unvarify (prop_of raw_induct);
- val elims' = map (Logic.unvarify o prop_of) elims;
+ val ([raw_induct'], ctxt') = Variable.import_terms false [prop_of raw_induct] ctxt;
val concls = raw_induct' |> Logic.strip_imp_concl |> HOLogic.dest_Trueprop |>
HOLogic.dest_conj |> map (HOLogic.dest_imp ##> strip_comb);
val ps = map (fst o snd) concls;
@@ -191,12 +197,15 @@
handle TERM _ =>
error ("Expression " ^ quote s ^ " to be avoided in case " ^
quote name ^ " is not a set type");
- val ps = map mk sets
+ fun add_set p [] = [p]
+ | add_set (t, T) ((u, U) :: ps) =
+ if T = U then
+ let val S = HOLogic.mk_setT T
+ in (Const (@{const_name "op Un"}, S --> S --> S) $ u $ t, T) :: ps
+ end
+ else (u, U) :: add_set (t, T) ps
in
- case duplicates op = (map snd ps) of
- [] => ps
- | Ts => error ("More than one set in case " ^ quote name ^
- " for type(s) " ^ commas_quote (map (Syntax.string_of_typ ctxt') Ts))
+ fold (mk #> add_set) sets []
end;
val prems = map (fn (prem, name) =>
@@ -221,8 +230,8 @@
val ind_sort = if null atomTs then HOLogic.typeS
else Sign.certify_sort thy (map (fn a => Sign.intern_class thy
("fs_" ^ Sign.base_name a)) atoms);
- val fs_ctxt_tyname = Name.variant (map fst (OldTerm.term_tfrees raw_induct')) "'n";
- val fs_ctxt_name = Name.variant (OldTerm.add_term_names (raw_induct', [])) "z";
+ val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt');
+ val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt';
val fsT = TFree (fs_ctxt_tyname, ind_sort);
val inductive_forall_def' = Drule.instantiate'
@@ -253,7 +262,7 @@
val prem = Logic.list_implies
(map mk_fresh sets @
map (fn prem =>
- if null (OldTerm.term_frees prem inter ps) then prem
+ if null (preds_of ps prem) then prem
else lift_prem prem) prems,
HOLogic.mk_Trueprop (lift_pred p ts));
in abs_params params' prem end) prems);
@@ -276,7 +285,7 @@
val (vc_compat, vc_compat') = map (fn (params, sets, prems, (p, ts)) =>
map (fn q => abs_params params (incr_boundvars ~1 (Logic.list_implies
(List.mapPartial (fn prem =>
- if null (ps inter OldTerm.term_frees prem) then SOME prem
+ if null (preds_of ps prem) then SOME prem
else map_term (split_conj (K o I) names) prem prem) prems, q))))
(maps (fn (t, T) => map (fn (u, U) => HOLogic.mk_Trueprop
(NominalPackage.fresh_star_const U T $ u $ t)) sets)
@@ -345,8 +354,8 @@
ths1 @ ths, ths2 @ [th1], ths3 @ [th2'], ctxt')
end;
- fun mk_ind_proof thy thss =
- Goal.prove_global thy [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
+ fun mk_ind_proof ctxt' thss =
+ Goal.prove ctxt' [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
let val th = Goal.prove ctxt [] [] concl (fn {context, ...} =>
rtac raw_induct 1 THEN
EVERY (maps (fn (((((_, sets, oprems, _),
@@ -363,7 +372,7 @@
fold_rev (NominalPackage.mk_perm []) pis t) sets';
val (P, ts) = strip_comb (HOLogic.dest_Trueprop (term_of concl));
val gprems1 = List.mapPartial (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then SOME th
+ if null (preds_of ps t) then SOME th
else
map_thm ctxt' (split_conj (K o I) names)
(etac conjunct1 1) monos NONE th)
@@ -405,7 +414,7 @@
(fold_rev (mk_perm_bool o cterm_of thy)
(pis' @ pis) th));
val gprems2 = map (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then mk_pi th
+ if null (preds_of ps t) then mk_pi th
else
mk_pi (the (map_thm ctxt (inst_conj_all names ps (rev pis''))
(inst_conj_all_tac (length pis'')) monos (SOME t) th)))
@@ -435,38 +444,42 @@
REPEAT (REPEAT (resolve_tac [conjI, impI] 1) THEN
etac impE 1 THEN atac 1 THEN REPEAT (etac @{thm allE_Nil} 1) THEN
asm_full_simp_tac (simpset_of thy) 1)
- end);
+ end) |>
+ fresh_postprocess |>
+ singleton (ProofContext.export ctxt' ctxt);
in
- thy |>
- ProofContext.init |>
- Proof.theorem_i NONE (fn thss => ProofContext.theory (fn thy =>
+ ctxt'' |>
+ Proof.theorem_i NONE (fn thss => fn ctxt =>
let
- val ctxt = ProofContext.init thy;
val rec_name = space_implode "_" (map Sign.base_name names);
+ val rec_qualified = Binding.qualify false rec_name;
val ind_case_names = RuleCases.case_names induct_cases;
val induct_cases' = InductivePackage.partition_rules' raw_induct
(intrs ~~ induct_cases);
val thss' = map (map atomize_intr) thss;
val thsss = InductivePackage.partition_rules' raw_induct (intrs ~~ thss');
val strong_raw_induct =
- mk_ind_proof thy thss' |> InductivePackage.rulify;
+ mk_ind_proof ctxt thss' |> InductivePackage.rulify;
val strong_induct =
if length names > 1 then
(strong_raw_induct, [ind_case_names, RuleCases.consumes 0])
else (strong_raw_induct RSN (2, rev_mp),
[ind_case_names, RuleCases.consumes 1]);
- val ([strong_induct'], thy') = thy |>
- Sign.add_path rec_name |>
- PureThy.add_thms [((Binding.name "strong_induct", #1 strong_induct), #2 strong_induct)];
+ val ((_, [strong_induct']), ctxt') = LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_induct"),
+ map (Attrib.internal o K) (#2 strong_induct)), [#1 strong_induct])
+ ctxt;
val strong_inducts =
- ProjectRule.projects ctxt (1 upto length names) strong_induct'
+ ProjectRule.projects ctxt' (1 upto length names) strong_induct'
in
- thy' |>
- PureThy.add_thmss [((Binding.name "strong_inducts", strong_inducts),
- [ind_case_names, RuleCases.consumes 1])] |> snd |>
- Sign.parent_path
- end))
+ ctxt' |>
+ LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_inducts"),
+ [Attrib.internal (K ind_case_names),
+ Attrib.internal (K (RuleCases.consumes 1))]),
+ strong_inducts) |> snd
+ end)
(map (map (rulify_term thy #> rpair [])) vc_compat)
end;
@@ -476,11 +489,11 @@
local structure P = OuterParse and K = OuterKeyword in
val _ =
- OuterSyntax.command "nominal_inductive2"
+ OuterSyntax.local_theory_to_proof "nominal_inductive2"
"prove strong induction theorem for inductive predicate involving nominal datatypes" K.thy_goal
- (P.name -- Scan.optional (P.$$$ "avoids" |-- P.enum1 "|" (P.name --
+ (P.xname -- Scan.optional (P.$$$ "avoids" |-- P.enum1 "|" (P.name --
(P.$$$ ":" |-- P.and_list1 P.term))) [] >> (fn (name, avoids) =>
- Toplevel.print o Toplevel.theory_to_proof (prove_strong_ind name avoids)));
+ prove_strong_ind name avoids));
end;
--- a/src/HOL/Nominal/nominal_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -547,10 +547,10 @@
HOLogic.mk_Trueprop (Free (List.nth (rep_set_names, k),
T --> HOLogic.boolT) $ free')) :: prems
| _ => prems,
- snd (foldr mk_abs_fun (j', free) Ts) :: ts)
+ snd (List.foldr mk_abs_fun (j', free) Ts) :: ts)
end;
- val (_, _, prems, ts) = foldr mk_prem (1, 1, [], []) cargs;
+ val (_, _, prems, ts) = List.foldr mk_prem (1, 1, [], []) cargs;
val concl = HOLogic.mk_Trueprop (Free (s, T --> HOLogic.boolT) $
list_comb (Const (cname, map fastype_of ts ---> T), ts))
in Logic.list_implies (prems, concl)
@@ -716,7 +716,7 @@
Type ("Nominal.noption", [U])) $ x $ t
end;
- val (ty_idxs, _) = foldl
+ val (ty_idxs, _) = List.foldl
(fn ((i, ("Nominal.noption", _, _)), p) => p
| ((i, _), (ty_idxs, j)) => (ty_idxs @ [(i, j)], j + 1)) ([], 0) descr;
@@ -738,7 +738,7 @@
val SOME index = AList.lookup op = ty_idxs i;
val (constrs1, constrs2) = ListPair.unzip
(map (fn (cname, cargs) => apfst (pair (strip_nth_name 2 (strip_nth_name 1 cname)))
- (foldl_map (fn (dts, dt) =>
+ (Library.foldl_map (fn (dts, dt) =>
let val (dts', dt') = strip_option dt
in (dts @ dts' @ [reindex dt'], (length dts, length dts')) end)
([], cargs))) constrs)
@@ -780,7 +780,7 @@
in
(j + length dts + 1,
xs @ x :: l_args,
- foldr mk_abs_fun
+ List.foldr mk_abs_fun
(case dt of
DtRec k => if k < length new_type_names then
Const (List.nth (rep_names, k), typ_of_dtyp descr'' sorts dt -->
@@ -789,7 +789,7 @@
| _ => x) xs :: r_args)
end
- val (_, l_args, r_args) = foldr constr_arg (1, [], []) cargs;
+ val (_, l_args, r_args) = List.foldr constr_arg (1, [], []) cargs;
val abs_name = Sign.intern_const thy ("Abs_" ^ tname);
val rep_name = Sign.intern_const thy ("Rep_" ^ tname);
val constrT = map fastype_of l_args ---> T;
@@ -909,7 +909,7 @@
map perm (xs @ [x]) @ r_args)
end
- val (_, l_args, r_args) = foldr constr_arg (1, [], []) dts;
+ val (_, l_args, r_args) = List.foldr constr_arg (1, [], []) dts;
val c = Const (cname, map fastype_of l_args ---> T)
in
Goal.prove_global thy8 [] []
@@ -958,10 +958,10 @@
(j + length dts + 1,
xs @ (x :: args1), ys @ (y :: args2),
HOLogic.mk_eq
- (foldr mk_abs_fun x xs, foldr mk_abs_fun y ys) :: eqs)
+ (List.foldr mk_abs_fun x xs, List.foldr mk_abs_fun y ys) :: eqs)
end;
- val (_, args1, args2, eqs) = foldr make_inj (1, [], [], []) dts;
+ val (_, args1, args2, eqs) = List.foldr make_inj (1, [], [], []) dts;
val Ts = map fastype_of args1;
val c = Const (cname, Ts ---> T)
in
@@ -997,10 +997,10 @@
val x = mk_Free "x" (typ_of_dtyp descr'' sorts dt) (j + length dts)
in
(j + length dts + 1,
- xs @ (x :: args1), foldr mk_abs_fun x xs :: args2)
+ xs @ (x :: args1), List.foldr mk_abs_fun x xs :: args2)
end;
- val (_, args1, args2) = foldr process_constr (1, [], []) dts;
+ val (_, args1, args2) = List.foldr process_constr (1, [], []) dts;
val Ts = map fastype_of args1;
val c = list_comb (Const (cname, Ts ---> T), args1);
fun supp t =
@@ -1413,7 +1413,7 @@
val _ = warning "defining recursion combinator ...";
- val used = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val (rec_result_Ts', rec_fn_Ts') = DatatypeProp.make_primrec_Ts descr' sorts used;
--- a/src/HOL/Nominal/nominal_primrec.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_primrec.ML Wed Mar 04 10:45:52 2009 +0100
@@ -210,7 +210,7 @@
val def_name = Thm.def_name (Sign.base_name fname);
val rhs = singleton (Syntax.check_terms ctxt) raw_rhs;
val SOME var = get_first (fn ((b, _), mx) =>
- if Binding.base_name b = fname then SOME (b, mx) else NONE) fixes;
+ if Binding.name_of b = fname then SOME (b, mx) else NONE) fixes;
in
((var, ((Binding.name def_name, []), rhs)),
subst_bounds (rev (map Free frees), strip_abs_body rhs))
@@ -248,7 +248,7 @@
val (names_atts, spec') = split_list spec;
val eqns' = map unquantify spec'
val eqns = fold_rev (process_eqn lthy (fn v => Variable.is_fixed lthy v
- orelse exists (fn ((w, _), _) => v = Binding.base_name w) fixes)) spec' [];
+ orelse exists (fn ((w, _), _) => v = Binding.name_of w) fixes)) spec' [];
val dt_info = NominalPackage.get_nominal_datatypes (ProofContext.theory_of lthy);
val lsrs :: lsrss = maps (fn (_, (_, _, eqns)) =>
map (fn (_, (ls, _, rs, _, _)) => ls @ rs) eqns) eqns
@@ -285,7 +285,7 @@
set_group ? LocalTheory.set_group (serial_string ()) |>
fold_map (apfst (snd o snd) oo
LocalTheory.define Thm.definitionK o fst) defs';
- val qualify = Binding.qualify
+ val qualify = Binding.qualify false
(space_implode "_" (map (Sign.base_name o #1) defs));
val names_atts' = map (apfst qualify) names_atts;
val cert = cterm_of (ProofContext.theory_of lthy');
--- a/src/HOL/Nominal/nominal_thmdecls.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Nominal/nominal_thmdecls.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
-(* ID: "$Id$"
- Authors: Julien Narboux and Christian Urban
+(* Authors: Julien Narboux and Christian Urban
This file introduces the infrastructure for the lemma
declaration "eqvts" "bijs" and "freshs".
@@ -63,10 +62,11 @@
then tac THEN print_tac ("after "^msg)
else tac
-fun tactic_eqvt ctx orig_thm pi typi =
+fun tactic_eqvt ctx orig_thm pi pi' =
let
- val mypi = Thm.cterm_of ctx (Var (pi,typi))
- val mypifree = Thm.cterm_of ctx (Const ("List.rev",typi --> typi) $ Free (fst pi,typi))
+ val mypi = Thm.cterm_of ctx pi
+ val T = fastype_of pi'
+ val mypifree = Thm.cterm_of ctx (Const ("List.rev", T --> T) $ pi')
val perm_pi_simp = PureThy.get_thms ctx "perm_pi_simp"
in
EVERY [tactic ("iffI applied",rtac iffI 1),
@@ -80,14 +80,19 @@
full_simp_tac (HOL_basic_ss addsimps perm_pi_simp) 1)]
end;
-fun get_derived_thm thy hyp concl orig_thm pi typi =
- let
- val lhs = (Const("Nominal.perm", typi --> HOLogic.boolT --> HOLogic.boolT) $ Var(pi,typi) $ hyp)
- val goal_term = Logic.unvarify (HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs,concl)))
- val _ = Display.print_cterm (cterm_of thy goal_term)
- in
- Goal.prove_global thy [] [] goal_term (fn _ => (tactic_eqvt thy orig_thm pi typi))
- end
+fun get_derived_thm ctxt hyp concl orig_thm pi typi =
+ let
+ val thy = ProofContext.theory_of ctxt;
+ val pi' = Var (pi, typi);
+ val lhs = Const ("Nominal.perm", typi --> HOLogic.boolT --> HOLogic.boolT) $ pi' $ hyp;
+ val ([goal_term, pi''], ctxt') = Variable.import_terms false
+ [HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs, concl)), pi'] ctxt
+ val _ = Display.print_cterm (cterm_of thy goal_term)
+ in
+ Goal.prove ctxt' [] [] goal_term
+ (fn _ => tactic_eqvt thy orig_thm pi' pi'') |>
+ singleton (ProofContext.export ctxt' ctxt)
+ end
(* replaces every variable x in t with pi o x *)
fun apply_pi trm (pi,typi) =
@@ -145,7 +150,8 @@
if (apply_pi hyp (pi,typi) = concl)
then
(warning ("equivariance lemma of the relational form");
- [orig_thm, get_derived_thm thy hyp concl orig_thm pi typi])
+ [orig_thm,
+ get_derived_thm (Context.proof_of context) hyp concl orig_thm pi typi])
else raise EQVT_FORM "Type Implication"
end
(* case: eqvt-lemma is of the equational form *)
--- a/src/HOL/NumberTheory/Chinese.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Chinese.thy Wed Mar 04 10:45:52 2009 +0100
@@ -90,10 +90,8 @@
"k \<le> i --> i \<le> k + l --> mf i dvd funprod mf k l"
apply (induct l)
apply auto
- apply (rule_tac [1] zdvd_zmult2)
- apply (rule_tac [2] zdvd_zmult)
- apply (subgoal_tac "i = Suc (k + l)")
- apply (simp_all (no_asm_simp))
+ apply (subgoal_tac "i = Suc (k + l)")
+ apply (simp_all (no_asm_simp))
done
lemma funsum_mod:
@@ -103,7 +101,7 @@
apply (rule trans)
apply (rule mod_add_eq)
apply simp
- apply (rule zmod_zadd_right_eq [symmetric])
+ apply (rule mod_add_right_eq [symmetric])
done
lemma funsum_zero [rule_format (no_asm)]:
@@ -196,8 +194,8 @@
apply (case_tac [2] "i = n")
apply (simp_all (no_asm_simp))
apply (case_tac [3] "j < i")
- apply (rule_tac [3] zdvd_zmult2)
- apply (rule_tac [4] zdvd_zmult)
+ apply (rule_tac [3] dvd_mult2)
+ apply (rule_tac [4] dvd_mult)
apply (rule_tac [!] funprod_zdvd)
apply arith
apply arith
@@ -217,8 +215,8 @@
apply (subst funsum_mod)
apply (subst funsum_oneelem)
apply auto
- apply (subst zdvd_iff_zmod_eq_0 [symmetric])
- apply (rule zdvd_zmult)
+ apply (subst dvd_eq_mod_eq_0 [symmetric])
+ apply (rule dvd_mult)
apply (rule x_sol_lin_aux)
apply auto
done
@@ -238,20 +236,20 @@
apply safe
apply (tactic {* stac (thm "zcong_zmod") 3 *})
apply (tactic {* stac (thm "mod_mult_eq") 3 *})
- apply (tactic {* stac (thm "zmod_zdvd_zmod") 3 *})
- apply (tactic {* stac (thm "x_sol_lin") 5 *})
- apply (tactic {* stac (thm "mod_mult_eq" RS sym) 7 *})
- apply (tactic {* stac (thm "zcong_zmod" RS sym) 7 *})
- apply (subgoal_tac [7]
+ apply (tactic {* stac (thm "mod_mod_cancel") 3 *})
+ apply (tactic {* stac (thm "x_sol_lin") 4 *})
+ apply (tactic {* stac (thm "mod_mult_eq" RS sym) 6 *})
+ apply (tactic {* stac (thm "zcong_zmod" RS sym) 6 *})
+ apply (subgoal_tac [6]
"0 \<le> xilin_sol i n kf bf mf \<and> xilin_sol i n kf bf mf < mf i
\<and> [kf i * mhf mf n i * xilin_sol i n kf bf mf = bf i] (mod mf i)")
- prefer 7
+ prefer 6
apply (simp add: zmult_ac)
apply (unfold xilin_sol_def)
- apply (tactic {* asm_simp_tac @{simpset} 7 *})
- apply (rule_tac [7] ex1_implies_ex [THEN someI_ex])
- apply (rule_tac [7] unique_xi_sol)
- apply (rule_tac [4] funprod_zdvd)
+ apply (tactic {* asm_simp_tac @{simpset} 6 *})
+ apply (rule_tac [6] ex1_implies_ex [THEN someI_ex])
+ apply (rule_tac [6] unique_xi_sol)
+ apply (rule_tac [3] funprod_zdvd)
apply (unfold m_cond_def)
apply (rule funprod_pos [THEN pos_mod_sign])
apply (rule_tac [2] funprod_pos [THEN pos_mod_bound])
--- a/src/HOL/NumberTheory/Euler.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Euler.thy Wed Mar 04 10:45:52 2009 +0100
@@ -272,7 +272,7 @@
text {* \medskip Prove the final part of Euler's Criterion: *}
lemma aux__1: "[| ~([x = 0] (mod p)); [y ^ 2 = x] (mod p)|] ==> ~(p dvd y)"
- by (metis dvdI power2_eq_square zcong_sym zcong_trans zcong_zero_equiv_div zdvd_trans)
+ by (metis dvdI power2_eq_square zcong_sym zcong_trans zcong_zero_equiv_div dvd_trans)
lemma aux__2: "2 * nat((p - 1) div 2) = nat (2 * ((p - 1) div 2))"
by (auto simp add: nat_mult_distrib)
--- a/src/HOL/NumberTheory/EulerFermat.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/EulerFermat.thy Wed Mar 04 10:45:52 2009 +0100
@@ -155,7 +155,7 @@
prefer 2
apply (subst zdvd_iff_zgcd [symmetric])
apply (rule_tac [4] zgcd_zcong_zgcd)
- apply (simp_all add: zdvd_zminus_iff zcong_sym)
+ apply (simp_all add: zcong_sym)
done
--- a/src/HOL/NumberTheory/Gauss.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Gauss.thy Wed Mar 04 10:45:52 2009 +0100
@@ -64,14 +64,14 @@
qed
lemma p_eq: "p = (2 * (p - 1) div 2) + 1"
- using zdiv_zmult_self2 [of 2 "p - 1"] by auto
+ using div_mult_self1_is_id [of 2 "p - 1"] by auto
lemma (in -) zodd_imp_zdiv_eq: "x \<in> zOdd ==> 2 * (x - 1) div 2 = 2 * ((x - 1) div 2)"
apply (frule odd_minus_one_even)
apply (simp add: zEven_def)
apply (subgoal_tac "2 \<noteq> 0")
- apply (frule_tac b = "2 :: int" and a = "x - 1" in zdiv_zmult_self2)
+ apply (frule_tac b = "2 :: int" and a = "x - 1" in div_mult_self1_is_id)
apply (auto simp add: even_div_2_prop2)
done
--- a/src/HOL/NumberTheory/Int2.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Int2.thy Wed Mar 04 10:45:52 2009 +0100
@@ -18,7 +18,7 @@
lemma zpower_zdvd_prop1:
"0 < n \<Longrightarrow> p dvd y \<Longrightarrow> p dvd ((y::int) ^ n)"
- by (induct n) (auto simp add: zdvd_zmult zdvd_zmult2 [of p y])
+ by (induct n) (auto simp add: dvd_mult2 [of p y])
lemma zdvd_bounds: "n dvd m ==> m \<le> (0::int) | n \<le> m"
proof -
@@ -42,7 +42,7 @@
apply simp
apply (frule zprime_zdvd_zmult_better)
apply simp
- apply force
+ apply (force simp del:dvd_mult)
done
lemma div_prop1: "[| 0 < z; (x::int) < y * z |] ==> x div z < y"
@@ -86,7 +86,7 @@
by (auto simp add: zcong_def)
lemma zcong_id: "[m = 0] (mod m)"
- by (auto simp add: zcong_def zdvd_0_right)
+ by (auto simp add: zcong_def)
lemma zcong_shift: "[a = b] (mod m) ==> [a + c = b + c] (mod m)"
by (auto simp add: zcong_refl zcong_zadd)
--- a/src/HOL/NumberTheory/IntPrimes.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/IntPrimes.thy Wed Mar 04 10:45:52 2009 +0100
@@ -50,7 +50,7 @@
lemma zrelprime_zdvd_zmult_aux:
"zgcd n k = 1 ==> k dvd m * n ==> 0 \<le> m ==> k dvd m"
- by (metis abs_of_nonneg zdvd_triv_right zgcd_greatest_iff zgcd_zmult_distrib2_abs zmult_1_right)
+ by (metis abs_of_nonneg dvd_triv_right zgcd_greatest_iff zgcd_zmult_distrib2_abs zmult_1_right)
lemma zrelprime_zdvd_zmult: "zgcd n k = 1 ==> k dvd m * n ==> k dvd m"
apply (case_tac "0 \<le> m")
@@ -73,7 +73,7 @@
lemma zprime_imp_zrelprime:
"zprime p ==> \<not> p dvd n ==> zgcd n p = 1"
apply (auto simp add: zprime_def)
- apply (metis zgcd_commute zgcd_geq_zero zgcd_zdvd1 zgcd_zdvd2)
+ apply (metis zgcd_geq_zero zgcd_zdvd1 zgcd_zdvd2)
done
lemma zless_zprime_imp_zrelprime:
@@ -93,9 +93,7 @@
done
lemma zgcd_zdvd_zgcd_zmult: "zgcd m n dvd zgcd (k * m) n"
- apply (simp add: zgcd_greatest_iff)
- apply (blast intro: zdvd_trans dvd_triv_right)
- done
+by (simp add: zgcd_greatest_iff)
lemma zgcd_zmult_zdvd_zgcd:
"zgcd k n = 1 ==> zgcd (k * m) n dvd zgcd m n"
@@ -127,20 +125,20 @@
by (unfold zcong_def, auto)
lemma zcong_sym: "[a = b] (mod m) = [b = a] (mod m)"
- unfolding zcong_def minus_diff_eq [of a, symmetric] zdvd_zminus_iff ..
+ unfolding zcong_def minus_diff_eq [of a, symmetric] dvd_minus_iff ..
lemma zcong_zadd:
"[a = b] (mod m) ==> [c = d] (mod m) ==> [a + c = b + d] (mod m)"
apply (unfold zcong_def)
apply (rule_tac s = "(a - b) + (c - d)" in subst)
- apply (rule_tac [2] zdvd_zadd, auto)
+ apply (rule_tac [2] dvd_add, auto)
done
lemma zcong_zdiff:
"[a = b] (mod m) ==> [c = d] (mod m) ==> [a - c = b - d] (mod m)"
apply (unfold zcong_def)
apply (rule_tac s = "(a - b) - (c - d)" in subst)
- apply (rule_tac [2] zdvd_zdiff, auto)
+ apply (rule_tac [2] dvd_diff, auto)
done
lemma zcong_trans:
@@ -151,8 +149,8 @@
"[a = b] (mod m) ==> [c = d] (mod m) ==> [a * c = b * d] (mod m)"
apply (rule_tac b = "b * c" in zcong_trans)
apply (unfold zcong_def)
- apply (metis zdiff_zmult_distrib2 zdvd_zmult zmult_commute)
- apply (metis zdiff_zmult_distrib2 zdvd_zmult)
+ apply (metis zdiff_zmult_distrib2 dvd_mult zmult_commute)
+ apply (metis zdiff_zmult_distrib2 dvd_mult)
done
lemma zcong_scalar: "[a = b] (mod m) ==> [a * k = b * k] (mod m)"
@@ -163,7 +161,7 @@
lemma zcong_zmult_self: "[a * m = b * m] (mod m)"
apply (unfold zcong_def)
- apply (rule zdvd_zdiff, simp_all)
+ apply (rule dvd_diff, simp_all)
done
lemma zcong_square:
@@ -191,7 +189,7 @@
apply (simp_all add: zdiff_zmult_distrib)
apply (subgoal_tac "m dvd (-(a * k - b * k))")
apply simp
- apply (subst zdvd_zminus_iff, assumption)
+ apply (subst dvd_minus_iff, assumption)
done
lemma zcong_cancel2:
@@ -206,10 +204,10 @@
apply (subgoal_tac "m dvd n * ka")
apply (subgoal_tac "m dvd ka")
apply (case_tac [2] "0 \<le> ka")
- apply (metis zdvd_mult_div_cancel zdvd_refl zdvd_zminus2_iff zdvd_zmultD2 zgcd_zminus zmult_commute zmult_zminus zrelprime_zdvd_zmult)
- apply (metis IntDiv.zdvd_abs1 abs_of_nonneg zadd_0 zgcd_0_left zgcd_commute zgcd_zadd_zmult zgcd_zdvd_zgcd_zmult zgcd_zmult_distrib2_abs zmult_1_right zmult_commute)
- apply (metis abs_eq_0 int_0_neq_1 mult_le_0_iff zdvd_mono zdvd_mult_cancel zdvd_mult_cancel1 zdvd_refl zdvd_triv_left zdvd_zmult2 zero_le_mult_iff zgcd_greatest_iff zle_anti_sym zle_linear zle_refl zmult_commute zrelprime_zdvd_zmult)
- apply (metis zdvd_triv_left)
+ apply (metis zdvd_mult_div_cancel dvd_refl dvd_mult_left zmult_commute zrelprime_zdvd_zmult)
+ apply (metis abs_dvd_iff abs_of_nonneg zadd_0 zgcd_0_left zgcd_commute zgcd_zadd_zmult zgcd_zdvd_zgcd_zmult zgcd_zmult_distrib2_abs zmult_1_right zmult_commute)
+ apply (metis mult_le_0_iff zdvd_mono zdvd_mult_cancel dvd_triv_left zero_le_mult_iff zle_anti_sym zle_linear zle_refl zmult_commute zrelprime_zdvd_zmult)
+ apply (metis dvd_triv_left)
done
lemma zcong_zless_imp_eq:
@@ -217,7 +215,7 @@
a < m ==> 0 \<le> b ==> b < m ==> [a = b] (mod m) ==> a = b"
apply (unfold zcong_def dvd_def, auto)
apply (drule_tac f = "\<lambda>z. z mod m" in arg_cong)
- apply (metis diff_add_cancel mod_pos_pos_trivial zadd_0 zadd_commute zmod_eq_0_iff zmod_zadd_right_eq)
+ apply (metis diff_add_cancel mod_pos_pos_trivial zadd_0 zadd_commute zmod_eq_0_iff mod_add_right_eq)
done
lemma zcong_square_zless:
@@ -237,7 +235,7 @@
lemma zcong_zless_0:
"0 \<le> a ==> a < m ==> [a = 0] (mod m) ==> a = 0"
apply (unfold zcong_def dvd_def, auto)
- apply (metis div_pos_pos_trivial linorder_not_less zdiv_zmult_self2 zle_refl zle_trans)
+ apply (metis div_pos_pos_trivial linorder_not_less div_mult_self1_is_id)
done
lemma zcong_zless_unique:
@@ -302,7 +300,7 @@
lemma zmod_zdvd_zmod:
"0 < (m::int) ==> m dvd b ==> (a mod b mod m) = (a mod m)"
- by (rule zmod_zmod_cancel)
+ by (rule mod_mod_cancel)
subsection {* Extended GCD *}
@@ -403,7 +401,7 @@
prefer 2
apply simp
apply (unfold zcong_def)
- apply (simp (no_asm) add: zmult_commute zdvd_zminus_iff)
+ apply (simp (no_asm) add: zmult_commute)
done
lemma zcong_lineq_unique:
--- a/src/HOL/NumberTheory/Quadratic_Reciprocity.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Quadratic_Reciprocity.thy Wed Mar 04 10:45:52 2009 +0100
@@ -322,7 +322,7 @@
by (rule zdiv_mono1) (insert p_g_2, auto)
then show "b \<le> (q * a) div p"
apply (subgoal_tac "p \<noteq> 0")
- apply (frule zdiv_zmult_self2, force)
+ apply (frule div_mult_self1_is_id, force)
apply (insert p_g_2, auto)
done
qed
@@ -356,7 +356,7 @@
by (rule zdiv_mono1) (insert q_g_2, auto)
then show "a \<le> (p * b) div q"
apply (subgoal_tac "q \<noteq> 0")
- apply (frule zdiv_zmult_self2, force)
+ apply (frule div_mult_self1_is_id, force)
apply (insert q_g_2, auto)
done
qed
--- a/src/HOL/NumberTheory/Residues.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/Residues.thy Wed Mar 04 10:45:52 2009 +0100
@@ -48,7 +48,7 @@
by (auto simp add: StandardRes_def zcong_zmod_eq)
lemma StandardRes_prop3: "(~[x = 0] (mod p)) = (~(StandardRes p x = 0))"
- by (auto simp add: StandardRes_def zcong_def zdvd_iff_zmod_eq_0)
+ by (auto simp add: StandardRes_def zcong_def dvd_eq_mod_eq_0)
lemma StandardRes_prop4: "2 < m
==> [StandardRes m x * StandardRes m y = (x * y)] (mod m)"
--- a/src/HOL/NumberTheory/WilsonBij.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/WilsonBij.thy Wed Mar 04 10:45:52 2009 +0100
@@ -57,7 +57,7 @@
apply (rule_tac [2] zdvd_not_zless)
apply (subgoal_tac "p dvd 1")
prefer 2
- apply (subst zdvd_zminus_iff [symmetric])
+ apply (subst dvd_minus_iff [symmetric])
apply auto
done
@@ -79,7 +79,7 @@
apply (simp add: OrderedGroup.diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
apply (rule_tac s = "p dvd -((a + 1) + (p * -a))" in trans)
apply (simp add: mult_commute)
- apply (subst zdvd_zminus_iff)
+ apply (subst dvd_minus_iff)
apply (subst zdvd_reduce)
apply (rule_tac s = "p dvd (a + 1) + (p * -1)" in trans)
apply (subst zdvd_reduce)
--- a/src/HOL/NumberTheory/WilsonRuss.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/NumberTheory/WilsonRuss.thy Wed Mar 04 10:45:52 2009 +0100
@@ -68,7 +68,7 @@
apply (rule_tac [2] zdvd_not_zless)
apply (subgoal_tac "p dvd 1")
prefer 2
- apply (subst zdvd_zminus_iff [symmetric], auto)
+ apply (subst dvd_minus_iff [symmetric], auto)
done
lemma inv_not_1:
@@ -87,7 +87,7 @@
apply (simp add: OrderedGroup.diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
apply (rule_tac s = "p dvd -((a + 1) + (p * -a))" in trans)
apply (simp add: mult_commute)
- apply (subst zdvd_zminus_iff)
+ apply (subst dvd_minus_iff)
apply (subst zdvd_reduce)
apply (rule_tac s = "p dvd (a + 1) + (p * -1)" in trans)
apply (subst zdvd_reduce, auto)
--- a/src/HOL/Orderings.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Orderings.thy Wed Mar 04 10:45:52 2009 +0100
@@ -331,7 +331,7 @@
fun struct_tac ((s, [eq, le, less]), thms) prems =
let
- fun decomp thy (Trueprop $ t) =
+ fun decomp thy (@{const Trueprop} $ t) =
let
fun excluded t =
(* exclude numeric types: linear arithmetic subsumes transitivity *)
@@ -350,7 +350,8 @@
of NONE => NONE
| SOME (t1, rel, t2) => SOME (t1, "~" ^ rel, t2))
| dec x = rel x;
- in dec t end;
+ in dec t end
+ | decomp thy _ = NONE;
in
case s of
"order" => Order_Tac.partial_tac decomp thms prems
--- a/src/HOL/Parity.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Parity.thy Wed Mar 04 10:45:52 2009 +0100
@@ -228,20 +228,9 @@
lemma zero_le_odd_power: "odd n ==>
(0 <= (x::'a::{recpower,ordered_idom}) ^ n) = (0 <= x)"
- apply (simp add: odd_nat_equiv_def2)
- apply (erule exE)
- apply (erule ssubst)
- apply (subst power_Suc)
- apply (subst power_add)
- apply (subst zero_le_mult_iff)
- apply auto
- apply (subgoal_tac "x = 0 & y > 0")
- apply (erule conjE, assumption)
- apply (subst power_eq_0_iff [symmetric])
- apply (subgoal_tac "0 <= x^y * x^y")
- apply simp
- apply (rule zero_le_square)+
- done
+apply (auto simp: odd_nat_equiv_def2 power_Suc power_add zero_le_mult_iff)
+apply (metis field_power_not_zero no_zero_divirors_neq0 order_antisym_conv zero_le_square)
+done
lemma zero_le_power_eq[presburger]: "(0 <= (x::'a::{recpower,ordered_idom}) ^ n) =
(even n | (odd n & 0 <= x))"
--- a/src/HOL/Plain.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Plain.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,7 @@
header {* Plain HOL *}
theory Plain
-imports Datatype FunDef Record Extraction Divides Fact
+imports Datatype FunDef Record Extraction Divides
begin
text {*
--- a/src/HOL/Power.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Power.thy Wed Mar 04 10:45:52 2009 +0100
@@ -31,7 +31,7 @@
by (induct n) (simp_all add: power_Suc)
lemma power_one_right [simp]: "(a::'a::recpower) ^ 1 = a"
- by (simp add: power_Suc)
+ unfolding One_nat_def by (simp add: power_Suc)
lemma power_commutes: "(a::'a::recpower) ^ n * a = a * a ^ n"
by (induct n) (simp_all add: power_Suc mult_assoc)
@@ -143,11 +143,13 @@
done
lemma power_eq_0_iff [simp]:
- "(a^n = 0) = (a = (0::'a::{ring_1_no_zero_divisors,recpower}) & n>0)"
+ "(a^n = 0) \<longleftrightarrow>
+ (a = (0::'a::{mult_zero,zero_neq_one,no_zero_divisors,recpower}) & n\<noteq>0)"
apply (induct "n")
-apply (auto simp add: power_Suc zero_neq_one [THEN not_sym])
+apply (auto simp add: power_Suc zero_neq_one [THEN not_sym] no_zero_divisors)
done
+
lemma field_power_not_zero:
"a \<noteq> (0::'a::{ring_1_no_zero_divisors,recpower}) ==> a^n \<noteq> 0"
by force
@@ -324,6 +326,24 @@
shows "\<lbrakk>a ^ n = b ^ n; 0 \<le> a; 0 \<le> b; 0 < n\<rbrakk> \<Longrightarrow> a = b"
by (cases n, simp_all, rule power_inject_base)
+text {* The divides relation *}
+
+lemma le_imp_power_dvd:
+ fixes a :: "'a::{comm_semiring_1,recpower}"
+ assumes "m \<le> n" shows "a^m dvd a^n"
+proof
+ have "a^n = a^(m + (n - m))"
+ using `m \<le> n` by simp
+ also have "\<dots> = a^m * a^(n - m)"
+ by (rule power_add)
+ finally show "a^n = a^m * a^(n - m)" .
+qed
+
+lemma power_le_dvd:
+ fixes a b :: "'a::{comm_semiring_1,recpower}"
+ shows "a^n dvd b \<Longrightarrow> m \<le> n \<Longrightarrow> a^m dvd b"
+ by (rule dvd_trans [OF le_imp_power_dvd])
+
subsection{*Exponentiation for the Natural Numbers*}
@@ -346,12 +366,19 @@
"of_nat (m ^ n) = (of_nat m::'a::{semiring_1,recpower}) ^ n"
by (induct n, simp_all add: power_Suc of_nat_mult)
-lemma nat_one_le_power [simp]: "1 \<le> i ==> Suc 0 \<le> i^n"
-by (insert one_le_power [of i n], simp)
+lemma nat_one_le_power [simp]: "Suc 0 \<le> i ==> Suc 0 \<le> i^n"
+by (rule one_le_power [of i n, unfolded One_nat_def])
lemma nat_zero_less_power_iff [simp]: "(x^n > 0) = (x > (0::nat) | n=0)"
by (induct "n", auto)
+lemma nat_power_eq_Suc_0_iff [simp]:
+ "((x::nat)^m = Suc 0) = (m = 0 | x = Suc 0)"
+by (induct_tac m, auto)
+
+lemma power_Suc_0[simp]: "(Suc 0)^n = Suc 0"
+by simp
+
text{*Valid for the naturals, but what if @{text"0<i<1"}?
Premises cannot be weakened: consider the case where @{term "i=0"},
@{term "m=1"} and @{term "n=0"}.*}
@@ -425,4 +452,3 @@
*}
end
-
--- a/src/HOL/Presburger.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Presburger.thy Wed Mar 04 10:45:52 2009 +0100
@@ -412,19 +412,15 @@
"(((number_of v)::int) = (number_of w)) = iszero ((number_of (v + (uminus w)))::int)"
by (rule eq_number_of_eq)
-lemma mod_eq0_dvd_iff[presburger]: "(m::nat) mod n = 0 \<longleftrightarrow> n dvd m"
-unfolding dvd_eq_mod_eq_0[symmetric] ..
-
-lemma zmod_eq0_zdvd_iff[presburger]: "(m::int) mod n = 0 \<longleftrightarrow> n dvd m"
-unfolding zdvd_iff_zmod_eq_0[symmetric] ..
+declare dvd_eq_mod_eq_0[symmetric, presburger]
declare mod_1[presburger]
declare mod_0[presburger]
-declare zmod_1[presburger]
+declare mod_by_1[presburger]
declare zmod_zero[presburger]
declare zmod_self[presburger]
declare mod_self[presburger]
declare mod_by_0[presburger]
-declare nat_mod_div_trivial[presburger]
+declare mod_div_trivial[presburger]
declare div_mod_equality2[presburger]
declare div_mod_equality[presburger]
declare mod_div_equality2[presburger]
--- a/src/HOL/RComplete.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/RComplete.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,8 +1,8 @@
-(* Title : HOL/RComplete.thy
- Author : Jacques D. Fleuriot, University of Edinburgh
- Author : Larry Paulson, University of Cambridge
- Author : Jeremy Avigad, Carnegie Mellon University
- Author : Florian Zuleger, Johannes Hoelzl, and Simon Funke, TU Muenchen
+(* Title: HOL/RComplete.thy
+ Author: Jacques D. Fleuriot, University of Edinburgh
+ Author: Larry Paulson, University of Cambridge
+ Author: Jeremy Avigad, Carnegie Mellon University
+ Author: Florian Zuleger, Johannes Hoelzl, and Simon Funke, TU Muenchen
*)
header {* Completeness of the Reals; Floor and Ceiling Functions *}
@@ -380,33 +380,28 @@
thus "\<exists>(n::nat). x < real n" ..
qed
+instance real :: archimedean_field
+proof
+ fix r :: real
+ obtain n :: nat where "r < real n"
+ using reals_Archimedean2 ..
+ then have "r \<le> of_int (int n)"
+ unfolding real_eq_of_nat by simp
+ then show "\<exists>z. r \<le> of_int z" ..
+qed
+
lemma reals_Archimedean3:
assumes x_greater_zero: "0 < x"
shows "\<forall>(y::real). \<exists>(n::nat). y < real n * x"
-proof
- fix y
- have x_not_zero: "x \<noteq> 0" using x_greater_zero by simp
- obtain n where "y * inverse x < real (n::nat)"
- using reals_Archimedean2 ..
- hence "y * inverse x * x < real n * x"
- using x_greater_zero by (simp add: mult_strict_right_mono)
- hence "x * inverse x * y < x * real n"
- by (simp add: algebra_simps)
- hence "y < real (n::nat) * x"
- using x_not_zero by (simp add: algebra_simps)
- thus "\<exists>(n::nat). y < real n * x" ..
-qed
+ unfolding real_of_nat_def using `0 < x`
+ by (auto intro: ex_less_of_nat_mult)
lemma reals_Archimedean6:
"0 \<le> r ==> \<exists>(n::nat). real (n - 1) \<le> r & r < real (n)"
-apply (insert reals_Archimedean2 [of r], safe)
-apply (subgoal_tac "\<exists>x::nat. r < real x \<and> (\<forall>y. r < real y \<longrightarrow> x \<le> y)", auto)
-apply (rule_tac x = x in exI)
-apply (case_tac x, simp)
-apply (rename_tac x')
-apply (drule_tac x = x' in spec, simp)
-apply (rule_tac x="LEAST n. r < real n" in exI, safe)
-apply (erule LeastI, erule Least_le)
+unfolding real_of_nat_def
+apply (rule exI [where x="nat (floor r + 1)"])
+apply (insert floor_correct [of r])
+apply (simp add: nat_add_distrib of_nat_nat)
done
lemma reals_Archimedean6a: "0 \<le> r ==> \<exists>n. real (n) \<le> r & r < real (Suc n)"
@@ -414,19 +409,11 @@
lemma reals_Archimedean_6b_int:
"0 \<le> r ==> \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (drule reals_Archimedean6a, auto)
-apply (rule_tac x = "int n" in exI)
-apply (simp add: real_of_int_real_of_nat real_of_nat_Suc)
-done
+ unfolding real_of_int_def by (rule floor_exists)
lemma reals_Archimedean_6c_int:
"r < 0 ==> \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (rule reals_Archimedean_6b_int [of "-r", THEN exE], simp, auto)
-apply (rename_tac n)
-apply (drule order_le_imp_less_or_eq, auto)
-apply (rule_tac x = "- n - 1" in exI)
-apply (rule_tac [2] x = "- n" in exI, auto)
-done
+ unfolding real_of_int_def by (rule floor_exists)
subsection{*Density of the Rational Reals in the Reals*}
@@ -485,23 +472,6 @@
subsection{*Floor and Ceiling Functions from the Reals to the Integers*}
-definition
- floor :: "real => int" where
- [code del]: "floor r = (LEAST n::int. r < real (n+1))"
-
-definition
- ceiling :: "real => int" where
- "ceiling r = - floor (- r)"
-
-notation (xsymbols)
- floor ("\<lfloor>_\<rfloor>") and
- ceiling ("\<lceil>_\<rceil>")
-
-notation (HTML output)
- floor ("\<lfloor>_\<rfloor>") and
- ceiling ("\<lceil>_\<rceil>")
-
-
lemma number_of_less_real_of_int_iff [simp]:
"((number_of n) < real (m::int)) = (number_of n < m)"
apply auto
@@ -524,51 +494,23 @@
"(real (m::int) \<le> (number_of n)) = (m \<le> number_of n)"
by (simp add: linorder_not_less [symmetric])
-lemma floor_zero [simp]: "floor 0 = 0"
-apply (simp add: floor_def del: real_of_int_add)
-apply (rule Least_equality)
-apply simp_all
-done
-
-lemma floor_real_of_nat_zero [simp]: "floor (real (0::nat)) = 0"
-by auto
+lemma floor_real_of_nat_zero: "floor (real (0::nat)) = 0"
+by auto (* delete? *)
lemma floor_real_of_nat [simp]: "floor (real (n::nat)) = int n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_of_nat_eq [THEN ssubst])
-apply (drule_tac [2] real_of_int_less_iff [THEN iffD1])
-apply simp_all
-done
+unfolding real_of_nat_def by simp
lemma floor_minus_real_of_nat [simp]: "floor (- real (n::nat)) = - int n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_of_nat_eq [THEN ssubst])
-apply (drule_tac [2] real_of_int_minus [THEN sym, THEN subst])
-apply (drule_tac [2] real_of_int_less_iff [THEN iffD1])
-apply simp_all
-done
+unfolding real_of_nat_def by (simp add: floor_minus)
lemma floor_real_of_int [simp]: "floor (real (n::int)) = n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply auto
-done
+unfolding real_of_int_def by simp
lemma floor_minus_real_of_int [simp]: "floor (- real (n::int)) = - n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_minus [THEN sym, THEN subst])
-apply auto
-done
+unfolding real_of_int_def by (simp add: floor_minus)
lemma real_lb_ub_int: " \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (case_tac "r < 0")
-apply (blast intro: reals_Archimedean_6c_int)
-apply (simp only: linorder_not_less)
-apply (blast intro: reals_Archimedean_6b_int reals_Archimedean_6c_int)
-done
+unfolding real_of_int_def by (rule floor_exists)
lemma lemma_floor:
assumes a1: "real m \<le> r" and a2: "r < real n + 1"
@@ -581,48 +523,20 @@
qed
lemma real_of_int_floor_le [simp]: "real (floor r) \<le> r"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply auto
-done
-
-lemma floor_mono: "x < y ==> floor x \<le> floor y"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of x])
-apply (insert real_lb_ub_int [of y], safe)
-apply (rule theI2)
-apply (rule_tac [3] theI2)
-apply simp
-apply (erule conjI)
-apply (auto simp add: order_eq_iff int_le_real_less)
-done
-
-lemma floor_mono2: "x \<le> y ==> floor x \<le> floor y"
-by (auto dest: order_le_imp_less_or_eq simp add: floor_mono)
+unfolding real_of_int_def by (rule of_int_floor_le)
lemma lemma_floor2: "real n < real (x::int) + 1 ==> n \<le> x"
by (auto intro: lemma_floor)
lemma real_of_int_floor_cancel [simp]:
"(real (floor x) = x) = (\<exists>n::int. x = real n)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of x], erule exE)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ using floor_real_of_int by metis
lemma floor_eq: "[| real n < x; x < real n + 1 |] ==> floor x = n"
-apply (simp add: floor_def)
-apply (rule Least_equality)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_unique [of n x] by simp
lemma floor_eq2: "[| real n \<le> x; x < real n + 1 |] ==> floor x = n"
-apply (simp add: floor_def)
-apply (rule Least_equality)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def by (rule floor_unique)
lemma floor_eq3: "[| real n < x; x < real (Suc n) |] ==> nat(floor x) = n"
apply (rule inj_int [THEN injD])
@@ -635,353 +549,205 @@
apply (auto intro: floor_eq3)
done
-lemma floor_number_of_eq [simp]:
+lemma floor_number_of_eq:
"floor(number_of n :: real) = (number_of n :: int)"
-apply (subst real_number_of [symmetric])
-apply (rule floor_real_of_int)
-done
-
-lemma floor_one [simp]: "floor 1 = 1"
- apply (rule trans)
- prefer 2
- apply (rule floor_real_of_int)
- apply simp
-done
+ by (rule floor_number_of) (* already declared [simp] *)
lemma real_of_int_floor_ge_diff_one [simp]: "r - 1 \<le> real(floor r)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_gt_diff_one [simp]: "r - 1 < real(floor r)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_add_one_ge [simp]: "r \<le> real(floor r) + 1"
-apply (insert real_of_int_floor_ge_diff_one [of r])
-apply (auto simp del: real_of_int_floor_ge_diff_one)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_add_one_gt [simp]: "r < real(floor r) + 1"
-apply (insert real_of_int_floor_gt_diff_one [of r])
-apply (auto simp del: real_of_int_floor_gt_diff_one)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma le_floor: "real a <= x ==> a <= floor x"
- apply (subgoal_tac "a < floor x + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (insert real_of_int_floor_add_one_gt [of x])
- apply arith
-done
+ unfolding real_of_int_def by (simp add: le_floor_iff)
lemma real_le_floor: "a <= floor x ==> real a <= x"
- apply (rule order_trans)
- prefer 2
- apply (rule real_of_int_floor_le)
- apply (subst real_of_int_le_iff)
- apply assumption
-done
+ unfolding real_of_int_def by (simp add: le_floor_iff)
lemma le_floor_eq: "(a <= floor x) = (real a <= x)"
- apply (rule iffI)
- apply (erule real_le_floor)
- apply (erule le_floor)
-done
+ unfolding real_of_int_def by (rule le_floor_iff)
-lemma le_floor_eq_number_of [simp]:
+lemma le_floor_eq_number_of:
"(number_of n <= floor x) = (number_of n <= x)"
-by (simp add: le_floor_eq)
+ by (rule number_of_le_floor) (* already declared [simp] *)
-lemma le_floor_eq_zero [simp]: "(0 <= floor x) = (0 <= x)"
-by (simp add: le_floor_eq)
+lemma le_floor_eq_zero: "(0 <= floor x) = (0 <= x)"
+ by (rule zero_le_floor) (* already declared [simp] *)
-lemma le_floor_eq_one [simp]: "(1 <= floor x) = (1 <= x)"
-by (simp add: le_floor_eq)
+lemma le_floor_eq_one: "(1 <= floor x) = (1 <= x)"
+ by (rule one_le_floor) (* already declared [simp] *)
lemma floor_less_eq: "(floor x < a) = (x < real a)"
- apply (subst linorder_not_le [THEN sym])+
- apply simp
- apply (rule le_floor_eq)
-done
+ unfolding real_of_int_def by (rule floor_less_iff)
-lemma floor_less_eq_number_of [simp]:
+lemma floor_less_eq_number_of:
"(floor x < number_of n) = (x < number_of n)"
-by (simp add: floor_less_eq)
+ by (rule floor_less_number_of) (* already declared [simp] *)
-lemma floor_less_eq_zero [simp]: "(floor x < 0) = (x < 0)"
-by (simp add: floor_less_eq)
+lemma floor_less_eq_zero: "(floor x < 0) = (x < 0)"
+ by (rule floor_less_zero) (* already declared [simp] *)
-lemma floor_less_eq_one [simp]: "(floor x < 1) = (x < 1)"
-by (simp add: floor_less_eq)
+lemma floor_less_eq_one: "(floor x < 1) = (x < 1)"
+ by (rule floor_less_one) (* already declared [simp] *)
lemma less_floor_eq: "(a < floor x) = (real a + 1 <= x)"
- apply (insert le_floor_eq [of "a + 1" x])
- apply auto
-done
+ unfolding real_of_int_def by (rule less_floor_iff)
-lemma less_floor_eq_number_of [simp]:
+lemma less_floor_eq_number_of:
"(number_of n < floor x) = (number_of n + 1 <= x)"
-by (simp add: less_floor_eq)
+ by (rule number_of_less_floor) (* already declared [simp] *)
-lemma less_floor_eq_zero [simp]: "(0 < floor x) = (1 <= x)"
-by (simp add: less_floor_eq)
+lemma less_floor_eq_zero: "(0 < floor x) = (1 <= x)"
+ by (rule zero_less_floor) (* already declared [simp] *)
-lemma less_floor_eq_one [simp]: "(1 < floor x) = (2 <= x)"
-by (simp add: less_floor_eq)
+lemma less_floor_eq_one: "(1 < floor x) = (2 <= x)"
+ by (rule one_less_floor) (* already declared [simp] *)
lemma floor_le_eq: "(floor x <= a) = (x < real a + 1)"
- apply (insert floor_less_eq [of x "a + 1"])
- apply auto
-done
+ unfolding real_of_int_def by (rule floor_le_iff)
-lemma floor_le_eq_number_of [simp]:
+lemma floor_le_eq_number_of:
"(floor x <= number_of n) = (x < number_of n + 1)"
-by (simp add: floor_le_eq)
+ by (rule floor_le_number_of) (* already declared [simp] *)
-lemma floor_le_eq_zero [simp]: "(floor x <= 0) = (x < 1)"
-by (simp add: floor_le_eq)
+lemma floor_le_eq_zero: "(floor x <= 0) = (x < 1)"
+ by (rule floor_le_zero) (* already declared [simp] *)
-lemma floor_le_eq_one [simp]: "(floor x <= 1) = (x < 2)"
-by (simp add: floor_le_eq)
+lemma floor_le_eq_one: "(floor x <= 1) = (x < 2)"
+ by (rule floor_le_one) (* already declared [simp] *)
lemma floor_add [simp]: "floor (x + real a) = floor x + a"
- apply (subst order_eq_iff)
- apply (rule conjI)
- prefer 2
- apply (subgoal_tac "floor x + a < floor (x + real a) + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (subgoal_tac "x + real a < real(floor(x + real a)) + 1")
- apply (subgoal_tac "real (floor x) <= x")
- apply arith
- apply (rule real_of_int_floor_le)
- apply (rule real_of_int_floor_add_one_gt)
- apply (subgoal_tac "floor (x + real a) < floor x + a + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (subgoal_tac "real(floor(x + real a)) <= x + real a")
- apply (subgoal_tac "x < real(floor x) + 1")
- apply arith
- apply (rule real_of_int_floor_add_one_gt)
- apply (rule real_of_int_floor_le)
-done
-
-lemma floor_add_number_of [simp]:
- "floor (x + number_of n) = floor x + number_of n"
- apply (subst floor_add [THEN sym])
- apply simp
-done
-
-lemma floor_add_one [simp]: "floor (x + 1) = floor x + 1"
- apply (subst floor_add [THEN sym])
- apply simp
-done
+ unfolding real_of_int_def by (rule floor_add_of_int)
lemma floor_subtract [simp]: "floor (x - real a) = floor x - a"
- apply (subst diff_minus)+
- apply (subst real_of_int_minus [THEN sym])
- apply (rule floor_add)
-done
+ unfolding real_of_int_def by (rule floor_diff_of_int)
-lemma floor_subtract_number_of [simp]: "floor (x - number_of n) =
+lemma floor_subtract_number_of: "floor (x - number_of n) =
floor x - number_of n"
- apply (subst floor_subtract [THEN sym])
- apply simp
-done
+ by (rule floor_diff_number_of) (* already declared [simp] *)
-lemma floor_subtract_one [simp]: "floor (x - 1) = floor x - 1"
- apply (subst floor_subtract [THEN sym])
- apply simp
-done
-
-lemma ceiling_zero [simp]: "ceiling 0 = 0"
-by (simp add: ceiling_def)
+lemma floor_subtract_one: "floor (x - 1) = floor x - 1"
+ by (rule floor_diff_one) (* already declared [simp] *)
lemma ceiling_real_of_nat [simp]: "ceiling (real (n::nat)) = int n"
-by (simp add: ceiling_def)
+ unfolding real_of_nat_def by simp
-lemma ceiling_real_of_nat_zero [simp]: "ceiling (real (0::nat)) = 0"
-by auto
+lemma ceiling_real_of_nat_zero: "ceiling (real (0::nat)) = 0"
+by auto (* delete? *)
lemma ceiling_floor [simp]: "ceiling (real (floor r)) = floor r"
-by (simp add: ceiling_def)
+ unfolding real_of_int_def by simp
lemma floor_ceiling [simp]: "floor (real (ceiling r)) = ceiling r"
-by (simp add: ceiling_def)
+ unfolding real_of_int_def by simp
lemma real_of_int_ceiling_ge [simp]: "r \<le> real (ceiling r)"
-apply (simp add: ceiling_def)
-apply (subst le_minus_iff, simp)
-done
+ unfolding real_of_int_def by (rule le_of_int_ceiling)
-lemma ceiling_mono: "x < y ==> ceiling x \<le> ceiling y"
-by (simp add: floor_mono ceiling_def)
-
-lemma ceiling_mono2: "x \<le> y ==> ceiling x \<le> ceiling y"
-by (simp add: floor_mono2 ceiling_def)
+lemma ceiling_real_of_int [simp]: "ceiling (real (n::int)) = n"
+ unfolding real_of_int_def by simp
lemma real_of_int_ceiling_cancel [simp]:
"(real (ceiling x) = x) = (\<exists>n::int. x = real n)"
-apply (auto simp add: ceiling_def)
-apply (drule arg_cong [where f = uminus], auto)
-apply (rule_tac x = "-n" in exI, auto)
-done
+ using ceiling_real_of_int by metis
lemma ceiling_eq: "[| real n < x; x < real n + 1 |] ==> ceiling x = n + 1"
-apply (simp add: ceiling_def)
-apply (rule minus_equation_iff [THEN iffD1])
-apply (simp add: floor_eq [where n = "-(n+1)"])
-done
+ unfolding real_of_int_def using ceiling_unique [of "n + 1" x] by simp
lemma ceiling_eq2: "[| real n < x; x \<le> real n + 1 |] ==> ceiling x = n + 1"
-by (simp add: ceiling_def floor_eq2 [where n = "-(n+1)"])
+ unfolding real_of_int_def using ceiling_unique [of "n + 1" x] by simp
lemma ceiling_eq3: "[| real n - 1 < x; x \<le> real n |] ==> ceiling x = n"
-by (simp add: ceiling_def floor_eq2 [where n = "-n"])
+ unfolding real_of_int_def using ceiling_unique [of n x] by simp
-lemma ceiling_real_of_int [simp]: "ceiling (real (n::int)) = n"
-by (simp add: ceiling_def)
-
-lemma ceiling_number_of_eq [simp]:
+lemma ceiling_number_of_eq:
"ceiling (number_of n :: real) = (number_of n)"
-apply (subst real_number_of [symmetric])
-apply (rule ceiling_real_of_int)
-done
-
-lemma ceiling_one [simp]: "ceiling 1 = 1"
- by (unfold ceiling_def, simp)
+ by (rule ceiling_number_of) (* already declared [simp] *)
lemma real_of_int_ceiling_diff_one_le [simp]: "real (ceiling r) - 1 \<le> r"
-apply (rule neg_le_iff_le [THEN iffD1])
-apply (simp add: ceiling_def diff_minus)
-done
+ unfolding real_of_int_def using ceiling_correct [of r] by simp
lemma real_of_int_ceiling_le_add_one [simp]: "real (ceiling r) \<le> r + 1"
-apply (insert real_of_int_ceiling_diff_one_le [of r])
-apply (simp del: real_of_int_ceiling_diff_one_le)
-done
+ unfolding real_of_int_def using ceiling_correct [of r] by simp
lemma ceiling_le: "x <= real a ==> ceiling x <= a"
- apply (unfold ceiling_def)
- apply (subgoal_tac "-a <= floor(- x)")
- apply simp
- apply (rule le_floor)
- apply simp
-done
+ unfolding real_of_int_def by (simp add: ceiling_le_iff)
lemma ceiling_le_real: "ceiling x <= a ==> x <= real a"
- apply (unfold ceiling_def)
- apply (subgoal_tac "real(- a) <= - x")
- apply simp
- apply (rule real_le_floor)
- apply simp
-done
+ unfolding real_of_int_def by (simp add: ceiling_le_iff)
lemma ceiling_le_eq: "(ceiling x <= a) = (x <= real a)"
- apply (rule iffI)
- apply (erule ceiling_le_real)
- apply (erule ceiling_le)
-done
+ unfolding real_of_int_def by (rule ceiling_le_iff)
-lemma ceiling_le_eq_number_of [simp]:
+lemma ceiling_le_eq_number_of:
"(ceiling x <= number_of n) = (x <= number_of n)"
-by (simp add: ceiling_le_eq)
+ by (rule ceiling_le_number_of) (* already declared [simp] *)
-lemma ceiling_le_zero_eq [simp]: "(ceiling x <= 0) = (x <= 0)"
-by (simp add: ceiling_le_eq)
+lemma ceiling_le_zero_eq: "(ceiling x <= 0) = (x <= 0)"
+ by (rule ceiling_le_zero) (* already declared [simp] *)
-lemma ceiling_le_eq_one [simp]: "(ceiling x <= 1) = (x <= 1)"
-by (simp add: ceiling_le_eq)
+lemma ceiling_le_eq_one: "(ceiling x <= 1) = (x <= 1)"
+ by (rule ceiling_le_one) (* already declared [simp] *)
lemma less_ceiling_eq: "(a < ceiling x) = (real a < x)"
- apply (subst linorder_not_le [THEN sym])+
- apply simp
- apply (rule ceiling_le_eq)
-done
+ unfolding real_of_int_def by (rule less_ceiling_iff)
-lemma less_ceiling_eq_number_of [simp]:
+lemma less_ceiling_eq_number_of:
"(number_of n < ceiling x) = (number_of n < x)"
-by (simp add: less_ceiling_eq)
+ by (rule number_of_less_ceiling) (* already declared [simp] *)
-lemma less_ceiling_eq_zero [simp]: "(0 < ceiling x) = (0 < x)"
-by (simp add: less_ceiling_eq)
+lemma less_ceiling_eq_zero: "(0 < ceiling x) = (0 < x)"
+ by (rule zero_less_ceiling) (* already declared [simp] *)
-lemma less_ceiling_eq_one [simp]: "(1 < ceiling x) = (1 < x)"
-by (simp add: less_ceiling_eq)
+lemma less_ceiling_eq_one: "(1 < ceiling x) = (1 < x)"
+ by (rule one_less_ceiling) (* already declared [simp] *)
lemma ceiling_less_eq: "(ceiling x < a) = (x <= real a - 1)"
- apply (insert ceiling_le_eq [of x "a - 1"])
- apply auto
-done
+ unfolding real_of_int_def by (rule ceiling_less_iff)
-lemma ceiling_less_eq_number_of [simp]:
+lemma ceiling_less_eq_number_of:
"(ceiling x < number_of n) = (x <= number_of n - 1)"
-by (simp add: ceiling_less_eq)
+ by (rule ceiling_less_number_of) (* already declared [simp] *)
-lemma ceiling_less_eq_zero [simp]: "(ceiling x < 0) = (x <= -1)"
-by (simp add: ceiling_less_eq)
+lemma ceiling_less_eq_zero: "(ceiling x < 0) = (x <= -1)"
+ by (rule ceiling_less_zero) (* already declared [simp] *)
-lemma ceiling_less_eq_one [simp]: "(ceiling x < 1) = (x <= 0)"
-by (simp add: ceiling_less_eq)
+lemma ceiling_less_eq_one: "(ceiling x < 1) = (x <= 0)"
+ by (rule ceiling_less_one) (* already declared [simp] *)
lemma le_ceiling_eq: "(a <= ceiling x) = (real a - 1 < x)"
- apply (insert less_ceiling_eq [of "a - 1" x])
- apply auto
-done
+ unfolding real_of_int_def by (rule le_ceiling_iff)
-lemma le_ceiling_eq_number_of [simp]:
+lemma le_ceiling_eq_number_of:
"(number_of n <= ceiling x) = (number_of n - 1 < x)"
-by (simp add: le_ceiling_eq)
+ by (rule number_of_le_ceiling) (* already declared [simp] *)
-lemma le_ceiling_eq_zero [simp]: "(0 <= ceiling x) = (-1 < x)"
-by (simp add: le_ceiling_eq)
+lemma le_ceiling_eq_zero: "(0 <= ceiling x) = (-1 < x)"
+ by (rule zero_le_ceiling) (* already declared [simp] *)
-lemma le_ceiling_eq_one [simp]: "(1 <= ceiling x) = (0 < x)"
-by (simp add: le_ceiling_eq)
+lemma le_ceiling_eq_one: "(1 <= ceiling x) = (0 < x)"
+ by (rule one_le_ceiling) (* already declared [simp] *)
lemma ceiling_add [simp]: "ceiling (x + real a) = ceiling x + a"
- apply (unfold ceiling_def, simp)
- apply (subst real_of_int_minus [THEN sym])
- apply (subst floor_add)
- apply simp
-done
-
-lemma ceiling_add_number_of [simp]: "ceiling (x + number_of n) =
- ceiling x + number_of n"
- apply (subst ceiling_add [THEN sym])
- apply simp
-done
-
-lemma ceiling_add_one [simp]: "ceiling (x + 1) = ceiling x + 1"
- apply (subst ceiling_add [THEN sym])
- apply simp
-done
+ unfolding real_of_int_def by (rule ceiling_add_of_int)
lemma ceiling_subtract [simp]: "ceiling (x - real a) = ceiling x - a"
- apply (subst diff_minus)+
- apply (subst real_of_int_minus [THEN sym])
- apply (rule ceiling_add)
-done
+ unfolding real_of_int_def by (rule ceiling_diff_of_int)
-lemma ceiling_subtract_number_of [simp]: "ceiling (x - number_of n) =
+lemma ceiling_subtract_number_of: "ceiling (x - number_of n) =
ceiling x - number_of n"
- apply (subst ceiling_subtract [THEN sym])
- apply simp
-done
+ by (rule ceiling_diff_number_of) (* already declared [simp] *)
-lemma ceiling_subtract_one [simp]: "ceiling (x - 1) = ceiling x - 1"
- apply (subst ceiling_subtract [THEN sym])
- apply simp
-done
+lemma ceiling_subtract_one: "ceiling (x - 1) = ceiling x - 1"
+ by (rule ceiling_diff_one) (* already declared [simp] *)
+
subsection {* Versions for the natural numbers *}
@@ -1015,7 +781,7 @@
apply (unfold natfloor_def)
apply (subgoal_tac "floor x <= floor 0")
apply simp
- apply (erule floor_mono2)
+ apply (erule floor_mono)
done
lemma natfloor_mono: "x <= y ==> natfloor x <= natfloor y"
@@ -1023,7 +789,7 @@
apply (subst natfloor_def)+
apply (subst nat_le_eq_zle)
apply force
- apply (erule floor_mono2)
+ apply (erule floor_mono)
apply (subst natfloor_neg)
apply simp
apply simp
@@ -1144,7 +910,7 @@
apply (subst real_nat_eq_real)
apply (subgoal_tac "ceiling 0 <= ceiling x")
apply simp
- apply (rule ceiling_mono2)
+ apply (rule ceiling_mono)
apply simp
apply simp
done
@@ -1165,7 +931,7 @@
apply simp
apply (erule order_trans)
apply simp
- apply (erule ceiling_mono2)
+ apply (erule ceiling_mono)
apply (subst natceiling_neg)
apply simp_all
done
@@ -1215,7 +981,7 @@
apply (subst eq_nat_nat_iff)
apply (subgoal_tac "ceiling 0 <= ceiling x")
apply simp
- apply (rule ceiling_mono2)
+ apply (rule ceiling_mono)
apply force
apply force
apply (rule ceiling_eq2)
@@ -1233,7 +999,7 @@
apply (subst nat_add_distrib)
apply (subgoal_tac "0 = ceiling 0")
apply (erule ssubst)
- apply (erule ceiling_mono2)
+ apply (erule ceiling_mono)
apply simp_all
done
--- a/src/HOL/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,5 @@
(* Classical Higher-order Logic -- batteries included *)
-use_thy "Main";
-share_common_data ();
use_thy "Complex_Main";
val HOL_proofs = ! Proofterm.proofs;
--- a/src/HOL/Rational.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Rational.thy Wed Mar 04 10:45:52 2009 +0100
@@ -5,7 +5,7 @@
header {* Rational numbers *}
theory Rational
-imports GCD
+imports GCD Archimedean_Field
uses ("Tools/rat_arith.ML")
begin
@@ -21,8 +21,8 @@
"(x, y) \<in> ratrel \<longleftrightarrow> snd x \<noteq> 0 \<and> snd y \<noteq> 0 \<and> fst x * snd y = fst y * snd x"
by (simp add: ratrel_def)
-lemma refl_ratrel: "refl {x. snd x \<noteq> 0} ratrel"
- by (auto simp add: refl_def ratrel_def)
+lemma refl_on_ratrel: "refl_on {x. snd x \<noteq> 0} ratrel"
+ by (auto simp add: refl_on_def ratrel_def)
lemma sym_ratrel: "sym ratrel"
by (simp add: ratrel_def sym_def)
@@ -44,7 +44,7 @@
qed
lemma equiv_ratrel: "equiv {x. snd x \<noteq> 0} ratrel"
- by (rule equiv.intro [OF refl_ratrel sym_ratrel trans_ratrel])
+ by (rule equiv.intro [OF refl_on_ratrel sym_ratrel trans_ratrel])
lemmas UN_ratrel = UN_equiv_class [OF equiv_ratrel]
lemmas UN_ratrel2 = UN_equiv_class2 [OF equiv_ratrel equiv_ratrel]
@@ -255,7 +255,6 @@
with `b \<noteq> 0` have "a \<noteq> 0" by (simp add: Zero_rat_def eq_rat)
with Fract `q = Fract a b` `b \<noteq> 0` show C by auto
qed
-
subsubsection {* The field of rational numbers *}
@@ -532,8 +531,67 @@
qed
lemma zero_less_Fract_iff:
- "0 < b ==> (0 < Fract a b) = (0 < a)"
-by (simp add: Zero_rat_def order_less_imp_not_eq2 zero_less_mult_iff)
+ "0 < b \<Longrightarrow> 0 < Fract a b \<longleftrightarrow> 0 < a"
+ by (simp add: Zero_rat_def zero_less_mult_iff)
+
+lemma Fract_less_zero_iff:
+ "0 < b \<Longrightarrow> Fract a b < 0 \<longleftrightarrow> a < 0"
+ by (simp add: Zero_rat_def mult_less_0_iff)
+
+lemma zero_le_Fract_iff:
+ "0 < b \<Longrightarrow> 0 \<le> Fract a b \<longleftrightarrow> 0 \<le> a"
+ by (simp add: Zero_rat_def zero_le_mult_iff)
+
+lemma Fract_le_zero_iff:
+ "0 < b \<Longrightarrow> Fract a b \<le> 0 \<longleftrightarrow> a \<le> 0"
+ by (simp add: Zero_rat_def mult_le_0_iff)
+
+lemma one_less_Fract_iff:
+ "0 < b \<Longrightarrow> 1 < Fract a b \<longleftrightarrow> b < a"
+ by (simp add: One_rat_def mult_less_cancel_right_disj)
+
+lemma Fract_less_one_iff:
+ "0 < b \<Longrightarrow> Fract a b < 1 \<longleftrightarrow> a < b"
+ by (simp add: One_rat_def mult_less_cancel_right_disj)
+
+lemma one_le_Fract_iff:
+ "0 < b \<Longrightarrow> 1 \<le> Fract a b \<longleftrightarrow> b \<le> a"
+ by (simp add: One_rat_def mult_le_cancel_right)
+
+lemma Fract_le_one_iff:
+ "0 < b \<Longrightarrow> Fract a b \<le> 1 \<longleftrightarrow> a \<le> b"
+ by (simp add: One_rat_def mult_le_cancel_right)
+
+
+subsubsection {* Rationals are an Archimedean field *}
+
+lemma rat_floor_lemma:
+ assumes "0 < b"
+ shows "of_int (a div b) \<le> Fract a b \<and> Fract a b < of_int (a div b + 1)"
+proof -
+ have "Fract a b = of_int (a div b) + Fract (a mod b) b"
+ using `0 < b` by (simp add: of_int_rat)
+ moreover have "0 \<le> Fract (a mod b) b \<and> Fract (a mod b) b < 1"
+ using `0 < b` by (simp add: zero_le_Fract_iff Fract_less_one_iff)
+ ultimately show ?thesis by simp
+qed
+
+instance rat :: archimedean_field
+proof
+ fix r :: rat
+ show "\<exists>z. r \<le> of_int z"
+ proof (induct r)
+ case (Fract a b)
+ then have "Fract a b \<le> of_int (a div b + 1)"
+ using rat_floor_lemma [of b a] by simp
+ then show "\<exists>z. Fract a b \<le> of_int z" ..
+ qed
+qed
+
+lemma floor_Fract:
+ assumes "0 < b" shows "floor (Fract a b) = a div b"
+ using rat_floor_lemma [OF `0 < b`, of a]
+ by (simp add: floor_unique)
subsection {* Arithmetic setup *}
--- a/src/HOL/RealDef.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/RealDef.thy Wed Mar 04 10:45:52 2009 +0100
@@ -94,7 +94,7 @@
by (simp add: realrel_def)
lemma equiv_realrel: "equiv UNIV realrel"
-apply (auto simp add: equiv_def refl_def sym_def trans_def realrel_def)
+apply (auto simp add: equiv_def refl_on_def sym_def trans_def realrel_def)
apply (blast dest: preal_trans_lemma)
done
@@ -655,7 +655,7 @@
real(n div d) = real n / real d"
apply (frule real_of_int_div_aux [of d n])
apply simp
- apply (simp add: zdvd_iff_zmod_eq_0)
+ apply (simp add: dvd_eq_mod_eq_0)
done
lemma real_of_int_div2:
@@ -705,6 +705,9 @@
lemma real_of_nat_zero [simp]: "real (0::nat) = 0"
by (simp add: real_of_nat_def)
+lemma real_of_nat_1 [simp]: "real (1::nat) = 1"
+by (simp add: real_of_nat_def)
+
lemma real_of_nat_one [simp]: "real (Suc 0) = (1::real)"
by (simp add: real_of_nat_def)
--- a/src/HOL/RealPow.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/RealPow.thy Wed Mar 04 10:45:52 2009 +0100
@@ -44,7 +44,8 @@
by (insert power_decreasing [of 1 "Suc n" r], simp)
lemma realpow_minus_mult [rule_format]:
- "0 < n --> (x::real) ^ (n - 1) * x = x ^ n"
+ "0 < n --> (x::real) ^ (n - 1) * x = x ^ n"
+unfolding One_nat_def
apply (simp split add: nat_diff_split)
done
--- a/src/HOL/RealVector.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/RealVector.thy Wed Mar 04 10:45:52 2009 +0100
@@ -46,8 +46,10 @@
locale vector_space =
fixes scale :: "'a::field \<Rightarrow> 'b::ab_group_add \<Rightarrow> 'b"
- assumes scale_right_distrib: "scale a (x + y) = scale a x + scale a y"
- and scale_left_distrib: "scale (a + b) x = scale a x + scale b x"
+ assumes scale_right_distrib [algebra_simps]:
+ "scale a (x + y) = scale a x + scale a y"
+ and scale_left_distrib [algebra_simps]:
+ "scale (a + b) x = scale a x + scale b x"
and scale_scale [simp]: "scale a (scale b x) = scale (a * b) x"
and scale_one [simp]: "scale 1 x = x"
begin
@@ -58,7 +60,8 @@
lemma scale_zero_left [simp]: "scale 0 x = 0"
and scale_minus_left [simp]: "scale (- a) x = - (scale a x)"
- and scale_left_diff_distrib: "scale (a - b) x = scale a x - scale b x"
+ and scale_left_diff_distrib [algebra_simps]:
+ "scale (a - b) x = scale a x - scale b x"
proof -
interpret s: additive "\<lambda>a. scale a x"
proof qed (rule scale_left_distrib)
@@ -69,7 +72,8 @@
lemma scale_zero_right [simp]: "scale a 0 = 0"
and scale_minus_right [simp]: "scale a (- x) = - (scale a x)"
- and scale_right_diff_distrib: "scale a (x - y) = scale a x - scale a y"
+ and scale_right_diff_distrib [algebra_simps]:
+ "scale a (x - y) = scale a x - scale a y"
proof -
interpret s: additive "\<lambda>x. scale a x"
proof qed (rule scale_right_distrib)
@@ -135,21 +139,11 @@
end
-instantiation real :: scaleR
-begin
-
-definition
- real_scaleR_def [simp]: "scaleR a x = a * x"
-
-instance ..
-
-end
-
class real_vector = scaleR + ab_group_add +
assumes scaleR_right_distrib: "scaleR a (x + y) = scaleR a x + scaleR a y"
and scaleR_left_distrib: "scaleR (a + b) x = scaleR a x + scaleR b x"
- and scaleR_scaleR [simp]: "scaleR a (scaleR b x) = scaleR (a * b) x"
- and scaleR_one [simp]: "scaleR 1 x = x"
+ and scaleR_scaleR: "scaleR a (scaleR b x) = scaleR (a * b) x"
+ and scaleR_one: "scaleR 1 x = x"
interpretation real_vector!:
vector_space "scaleR :: real \<Rightarrow> 'a \<Rightarrow> 'a::real_vector"
@@ -185,15 +179,16 @@
class real_field = real_div_algebra + field
-instance real :: real_field
-apply (intro_classes, unfold real_scaleR_def)
-apply (rule right_distrib)
-apply (rule left_distrib)
-apply (rule mult_assoc [symmetric])
-apply (rule mult_1_left)
-apply (rule mult_assoc)
-apply (rule mult_left_commute)
-done
+instantiation real :: real_field
+begin
+
+definition
+ real_scaleR_def [simp]: "scaleR a x = a * x"
+
+instance proof
+qed (simp_all add: algebra_simps)
+
+end
interpretation scaleR_left!: additive "(\<lambda>a. scaleR a x::'a::real_vector)"
proof qed (rule scaleR_left_distrib)
@@ -307,7 +302,7 @@
definition
Reals :: "'a::real_algebra_1 set" where
- [code del]: "Reals \<equiv> range of_real"
+ [code del]: "Reals = range of_real"
notation (xsymbols)
Reals ("\<real>")
@@ -421,16 +416,6 @@
class norm =
fixes norm :: "'a \<Rightarrow> real"
-instantiation real :: norm
-begin
-
-definition
- real_norm_def [simp]: "norm r \<equiv> \<bar>r\<bar>"
-
-instance ..
-
-end
-
class sgn_div_norm = scaleR + norm + sgn +
assumes sgn_div_norm: "sgn x = x /\<^sub>R norm x"
@@ -462,7 +447,13 @@
thus "norm (1::'a) = 1" by simp
qed
-instance real :: real_normed_field
+instantiation real :: real_normed_field
+begin
+
+definition
+ real_norm_def [simp]: "norm r = \<bar>r\<bar>"
+
+instance
apply (intro_classes, unfold real_norm_def real_scaleR_def)
apply (simp add: real_sgn_def)
apply (rule abs_ge_zero)
@@ -472,6 +463,8 @@
apply (rule abs_mult)
done
+end
+
lemma norm_zero [simp]: "norm (0::'a::real_normed_vector) = 0"
by simp
--- a/src/HOL/Relation.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Relation.thy Wed Mar 04 10:45:52 2009 +0100
@@ -34,8 +34,8 @@
"Id == {p. EX x. p = (x,x)}"
definition
- diag :: "'a set => ('a * 'a) set" where -- {* diagonal: identity over a set *}
- "diag A == \<Union>x\<in>A. {(x,x)}"
+ Id_on :: "'a set => ('a * 'a) set" where -- {* diagonal: identity over a set *}
+ "Id_on A == \<Union>x\<in>A. {(x,x)}"
definition
Domain :: "('a * 'b) set => 'a set" where
@@ -50,12 +50,12 @@
"Field r == Domain r \<union> Range r"
definition
- refl :: "['a set, ('a * 'a) set] => bool" where -- {* reflexivity over a set *}
- "refl A r == r \<subseteq> A \<times> A & (ALL x: A. (x,x) : r)"
+ refl_on :: "['a set, ('a * 'a) set] => bool" where -- {* reflexivity over a set *}
+ "refl_on A r == r \<subseteq> A \<times> A & (ALL x: A. (x,x) : r)"
abbreviation
- reflexive :: "('a * 'a) set => bool" where -- {* reflexivity over a type *}
- "reflexive == refl UNIV"
+ refl :: "('a * 'a) set => bool" where -- {* reflexivity over a type *}
+ "refl == refl_on UNIV"
definition
sym :: "('a * 'a) set => bool" where -- {* symmetry predicate *}
@@ -99,8 +99,8 @@
lemma pair_in_Id_conv [iff]: "((a, b) : Id) = (a = b)"
by (unfold Id_def) blast
-lemma reflexive_Id: "reflexive Id"
-by (simp add: refl_def)
+lemma refl_Id: "refl Id"
+by (simp add: refl_on_def)
lemma antisym_Id: "antisym Id"
-- {* A strange result, since @{text Id} is also symmetric. *}
@@ -115,24 +115,24 @@
subsection {* Diagonal: identity over a set *}
-lemma diag_empty [simp]: "diag {} = {}"
-by (simp add: diag_def)
+lemma Id_on_empty [simp]: "Id_on {} = {}"
+by (simp add: Id_on_def)
-lemma diag_eqI: "a = b ==> a : A ==> (a, b) : diag A"
-by (simp add: diag_def)
+lemma Id_on_eqI: "a = b ==> a : A ==> (a, b) : Id_on A"
+by (simp add: Id_on_def)
-lemma diagI [intro!,noatp]: "a : A ==> (a, a) : diag A"
-by (rule diag_eqI) (rule refl)
+lemma Id_onI [intro!,noatp]: "a : A ==> (a, a) : Id_on A"
+by (rule Id_on_eqI) (rule refl)
-lemma diagE [elim!]:
- "c : diag A ==> (!!x. x : A ==> c = (x, x) ==> P) ==> P"
+lemma Id_onE [elim!]:
+ "c : Id_on A ==> (!!x. x : A ==> c = (x, x) ==> P) ==> P"
-- {* The general elimination rule. *}
-by (unfold diag_def) (iprover elim!: UN_E singletonE)
+by (unfold Id_on_def) (iprover elim!: UN_E singletonE)
-lemma diag_iff: "((x, y) : diag A) = (x = y & x : A)"
+lemma Id_on_iff: "((x, y) : Id_on A) = (x = y & x : A)"
by blast
-lemma diag_subset_Times: "diag A \<subseteq> A \<times> A"
+lemma Id_on_subset_Times: "Id_on A \<subseteq> A \<times> A"
by blast
@@ -184,37 +184,37 @@
subsection {* Reflexivity *}
-lemma reflI: "r \<subseteq> A \<times> A ==> (!!x. x : A ==> (x, x) : r) ==> refl A r"
-by (unfold refl_def) (iprover intro!: ballI)
+lemma refl_onI: "r \<subseteq> A \<times> A ==> (!!x. x : A ==> (x, x) : r) ==> refl_on A r"
+by (unfold refl_on_def) (iprover intro!: ballI)
-lemma reflD: "refl A r ==> a : A ==> (a, a) : r"
-by (unfold refl_def) blast
+lemma refl_onD: "refl_on A r ==> a : A ==> (a, a) : r"
+by (unfold refl_on_def) blast
-lemma reflD1: "refl A r ==> (x, y) : r ==> x : A"
-by (unfold refl_def) blast
+lemma refl_onD1: "refl_on A r ==> (x, y) : r ==> x : A"
+by (unfold refl_on_def) blast
-lemma reflD2: "refl A r ==> (x, y) : r ==> y : A"
-by (unfold refl_def) blast
+lemma refl_onD2: "refl_on A r ==> (x, y) : r ==> y : A"
+by (unfold refl_on_def) blast
-lemma refl_Int: "refl A r ==> refl B s ==> refl (A \<inter> B) (r \<inter> s)"
-by (unfold refl_def) blast
+lemma refl_on_Int: "refl_on A r ==> refl_on B s ==> refl_on (A \<inter> B) (r \<inter> s)"
+by (unfold refl_on_def) blast
-lemma refl_Un: "refl A r ==> refl B s ==> refl (A \<union> B) (r \<union> s)"
-by (unfold refl_def) blast
+lemma refl_on_Un: "refl_on A r ==> refl_on B s ==> refl_on (A \<union> B) (r \<union> s)"
+by (unfold refl_on_def) blast
-lemma refl_INTER:
- "ALL x:S. refl (A x) (r x) ==> refl (INTER S A) (INTER S r)"
-by (unfold refl_def) fast
+lemma refl_on_INTER:
+ "ALL x:S. refl_on (A x) (r x) ==> refl_on (INTER S A) (INTER S r)"
+by (unfold refl_on_def) fast
-lemma refl_UNION:
- "ALL x:S. refl (A x) (r x) \<Longrightarrow> refl (UNION S A) (UNION S r)"
-by (unfold refl_def) blast
+lemma refl_on_UNION:
+ "ALL x:S. refl_on (A x) (r x) \<Longrightarrow> refl_on (UNION S A) (UNION S r)"
+by (unfold refl_on_def) blast
-lemma refl_empty[simp]: "refl {} {}"
-by(simp add:refl_def)
+lemma refl_on_empty[simp]: "refl_on {} {}"
+by(simp add:refl_on_def)
-lemma refl_diag: "refl A (diag A)"
-by (rule reflI [OF diag_subset_Times diagI])
+lemma refl_on_Id_on: "refl_on A (Id_on A)"
+by (rule refl_onI [OF Id_on_subset_Times Id_onI])
subsection {* Antisymmetry *}
@@ -232,7 +232,7 @@
lemma antisym_empty [simp]: "antisym {}"
by (unfold antisym_def) blast
-lemma antisym_diag [simp]: "antisym (diag A)"
+lemma antisym_Id_on [simp]: "antisym (Id_on A)"
by (unfold antisym_def) blast
@@ -256,7 +256,7 @@
lemma sym_UNION: "ALL x:S. sym (r x) ==> sym (UNION S r)"
by (fast intro: symI dest: symD)
-lemma sym_diag [simp]: "sym (diag A)"
+lemma sym_Id_on [simp]: "sym (Id_on A)"
by (rule symI) clarify
@@ -275,7 +275,7 @@
lemma trans_INTER: "ALL x:S. trans (r x) ==> trans (INTER S r)"
by (fast intro: transI elim: transD)
-lemma trans_diag [simp]: "trans (diag A)"
+lemma trans_Id_on [simp]: "trans (Id_on A)"
by (fast intro: transI elim: transD)
lemma trans_diff_Id: " trans r \<Longrightarrow> antisym r \<Longrightarrow> trans (r-Id)"
@@ -331,11 +331,11 @@
lemma converse_Id [simp]: "Id^-1 = Id"
by blast
-lemma converse_diag [simp]: "(diag A)^-1 = diag A"
+lemma converse_Id_on [simp]: "(Id_on A)^-1 = Id_on A"
by blast
-lemma refl_converse [simp]: "refl A (converse r) = refl A r"
-by (unfold refl_def) auto
+lemma refl_on_converse [simp]: "refl_on A (converse r) = refl_on A r"
+by (unfold refl_on_def) auto
lemma sym_converse [simp]: "sym (converse r) = sym r"
by (unfold sym_def) blast
@@ -382,7 +382,7 @@
lemma Domain_Id [simp]: "Domain Id = UNIV"
by blast
-lemma Domain_diag [simp]: "Domain (diag A) = A"
+lemma Domain_Id_on [simp]: "Domain (Id_on A) = A"
by blast
lemma Domain_Un_eq: "Domain(A \<union> B) = Domain(A) \<union> Domain(B)"
@@ -433,7 +433,7 @@
lemma Range_Id [simp]: "Range Id = UNIV"
by blast
-lemma Range_diag [simp]: "Range (diag A) = A"
+lemma Range_Id_on [simp]: "Range (Id_on A) = A"
by auto
lemma Range_Un_eq: "Range(A \<union> B) = Range(A) \<union> Range(B)"
@@ -506,7 +506,7 @@
lemma Image_Id [simp]: "Id `` A = A"
by blast
-lemma Image_diag [simp]: "diag A `` B = A \<inter> B"
+lemma Image_Id_on [simp]: "Id_on A `` B = A \<inter> B"
by blast
lemma Image_Int_subset: "R `` (A \<inter> B) \<subseteq> R `` A \<inter> R `` B"
@@ -571,7 +571,7 @@
lemma single_valued_Id [simp]: "single_valued Id"
by (unfold single_valued_def) blast
-lemma single_valued_diag [simp]: "single_valued (diag A)"
+lemma single_valued_Id_on [simp]: "single_valued (Id_on A)"
by (unfold single_valued_def) blast
--- a/src/HOL/Relation_Power.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Relation_Power.thy Wed Mar 04 10:45:52 2009 +0100
@@ -61,16 +61,16 @@
lemma funpow_swap1: "f((f^n) x) = (f^n)(f x)"
proof -
- have "f((f^n) x) = (f^(n+1)) x" by simp
+ have "f((f^n) x) = (f^(n+1)) x" unfolding One_nat_def by simp
also have "\<dots> = (f^n o f^1) x" by (simp only: funpow_add)
- also have "\<dots> = (f^n)(f x)" by simp
+ also have "\<dots> = (f^n)(f x)" unfolding One_nat_def by simp
finally show ?thesis .
qed
lemma rel_pow_1 [simp]:
fixes R :: "('a*'a)set"
shows "R^1 = R"
- by simp
+ unfolding One_nat_def by simp
lemma rel_pow_0_I: "(x,x) : R^0"
by simp
--- a/src/HOL/Ring_and_Field.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Ring_and_Field.thy Wed Mar 04 10:45:52 2009 +0100
@@ -147,10 +147,10 @@
lemma one_dvd [simp]: "1 dvd a"
by (auto intro!: dvdI)
-lemma dvd_mult: "a dvd c \<Longrightarrow> a dvd (b * c)"
+lemma dvd_mult[simp]: "a dvd c \<Longrightarrow> a dvd (b * c)"
by (auto intro!: mult_left_commute dvdI elim!: dvdE)
-lemma dvd_mult2: "a dvd b \<Longrightarrow> a dvd (b * c)"
+lemma dvd_mult2[simp]: "a dvd b \<Longrightarrow> a dvd (b * c)"
apply (subst mult_commute)
apply (erule dvd_mult)
done
@@ -162,12 +162,12 @@
by (rule dvd_mult2) (rule dvd_refl)
lemma mult_dvd_mono:
- assumes ab: "a dvd b"
- and "cd": "c dvd d"
+ assumes "a dvd b"
+ and "c dvd d"
shows "a * c dvd b * d"
proof -
- from ab obtain b' where "b = a * b'" ..
- moreover from "cd" obtain d' where "d = c * d'" ..
+ from `a dvd b` obtain b' where "b = a * b'" ..
+ moreover from `c dvd d` obtain d' where "d = c * d'" ..
ultimately have "b * d = (a * c) * (b' * d')" by (simp add: mult_ac)
then show ?thesis ..
qed
@@ -310,8 +310,8 @@
then show "- x dvd y" ..
qed
-lemma dvd_diff: "x dvd y \<Longrightarrow> x dvd z \<Longrightarrow> x dvd (y - z)"
-by (simp add: diff_minus dvd_add dvd_minus_iff)
+lemma dvd_diff[simp]: "x dvd y \<Longrightarrow> x dvd z \<Longrightarrow> x dvd (y - z)"
+by (simp add: diff_minus dvd_minus_iff)
end
@@ -384,6 +384,26 @@
then show "a * a = b * b" by auto
qed
+lemma dvd_mult_cancel_right [simp]:
+ "a * c dvd b * c \<longleftrightarrow> c = 0 \<or> a dvd b"
+proof -
+ have "a * c dvd b * c \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
+ unfolding dvd_def by (simp add: mult_ac)
+ also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
+ unfolding dvd_def by simp
+ finally show ?thesis .
+qed
+
+lemma dvd_mult_cancel_left [simp]:
+ "c * a dvd c * b \<longleftrightarrow> c = 0 \<or> a dvd b"
+proof -
+ have "c * a dvd c * b \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
+ unfolding dvd_def by (simp add: mult_ac)
+ also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
+ unfolding dvd_def by simp
+ finally show ?thesis .
+qed
+
end
class division_ring = ring_1 + inverse +
--- a/src/HOL/SEQ.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/SEQ.thy Wed Mar 04 10:45:52 2009 +0100
@@ -338,10 +338,10 @@
done
lemma LIMSEQ_Suc: "f ----> l \<Longrightarrow> (\<lambda>n. f (Suc n)) ----> l"
-by (drule_tac k="1" in LIMSEQ_ignore_initial_segment, simp)
+by (drule_tac k="Suc 0" in LIMSEQ_ignore_initial_segment, simp)
lemma LIMSEQ_imp_Suc: "(\<lambda>n. f (Suc n)) ----> l \<Longrightarrow> f ----> l"
-by (rule_tac k="1" in LIMSEQ_offset, simp)
+by (rule_tac k="Suc 0" in LIMSEQ_offset, simp)
lemma LIMSEQ_Suc_iff: "(\<lambda>n. f (Suc n)) ----> l = f ----> l"
by (blast intro: LIMSEQ_imp_Suc LIMSEQ_Suc)
@@ -646,8 +646,21 @@
apply (drule LIMSEQ_minus, auto)
done
+text{* Given a binary function @{text "f:: nat \<Rightarrow> 'a \<Rightarrow> 'a"}, its values are uniquely determined by a function g *}
-subsection {* Bounded Monotonic Sequences *}
+lemma nat_function_unique: "EX! g. g 0 = e \<and> (\<forall>n. g (Suc n) = f n (g n))"
+ unfolding Ex1_def
+ apply (rule_tac x="nat_rec e f" in exI)
+ apply (rule conjI)+
+apply (rule def_nat_rec_0, simp)
+apply (rule allI, rule def_nat_rec_Suc, simp)
+apply (rule allI, rule impI, rule ext)
+apply (erule conjE)
+apply (induct_tac x)
+apply (simp add: nat_rec_0)
+apply (erule_tac x="n" in allE)
+apply (simp)
+done
text{*Subsequence (alternative definition, (e.g. Hoskins)*}
@@ -746,6 +759,136 @@
qed auto
qed
+text{* for any sequence, there is a mootonic subsequence *}
+lemma seq_monosub: "\<exists>f. subseq f \<and> monoseq (\<lambda> n. (s (f n)))"
+proof-
+ {assume H: "\<forall>n. \<exists>p >n. \<forall> m\<ge>p. s m \<le> s p"
+ let ?P = "\<lambda> p n. p > n \<and> (\<forall>m \<ge> p. s m \<le> s p)"
+ from nat_function_unique[of "SOME p. ?P p 0" "\<lambda>p n. SOME p. ?P p n"]
+ obtain f where f: "f 0 = (SOME p. ?P p 0)" "\<forall>n. f (Suc n) = (SOME p. ?P p (f n))" by blast
+ have "?P (f 0) 0" unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p 0"]
+ using H apply -
+ apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI)
+ unfolding order_le_less by blast
+ hence f0: "f 0 > 0" "\<forall>m \<ge> f 0. s m \<le> s (f 0)" by blast+
+ {fix n
+ have "?P (f (Suc n)) (f n)"
+ unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
+ using H apply -
+ apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI)
+ unfolding order_le_less by blast
+ hence "f (Suc n) > f n" "\<forall>m \<ge> f (Suc n). s m \<le> s (f (Suc n))" by blast+}
+ note fSuc = this
+ {fix p q assume pq: "p \<ge> f q"
+ have "s p \<le> s(f(q))" using f0(2)[rule_format, of p] pq fSuc
+ by (cases q, simp_all) }
+ note pqth = this
+ {fix q
+ have "f (Suc q) > f q" apply (induct q)
+ using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))}
+ note fss = this
+ from fss have th1: "subseq f" unfolding subseq_Suc_iff ..
+ {fix a b
+ have "f a \<le> f (a + b)"
+ proof(induct b)
+ case 0 thus ?case by simp
+ next
+ case (Suc b)
+ from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp
+ qed}
+ note fmon0 = this
+ have "monoseq (\<lambda>n. s (f n))"
+ proof-
+ {fix n
+ have "s (f n) \<ge> s (f (Suc n))"
+ proof(cases n)
+ case 0
+ assume n0: "n = 0"
+ from fSuc(1)[of 0] have th0: "f 0 \<le> f (Suc 0)" by simp
+ from f0(2)[rule_format, OF th0] show ?thesis using n0 by simp
+ next
+ case (Suc m)
+ assume m: "n = Suc m"
+ from fSuc(1)[of n] m have th0: "f (Suc m) \<le> f (Suc (Suc m))" by simp
+ from m fSuc(2)[rule_format, OF th0] show ?thesis by simp
+ qed}
+ thus "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc by blast
+ qed
+ with th1 have ?thesis by blast}
+ moreover
+ {fix N assume N: "\<forall>p >N. \<exists> m\<ge>p. s m > s p"
+ {fix p assume p: "p \<ge> Suc N"
+ hence pN: "p > N" by arith with N obtain m where m: "m \<ge> p" "s m > s p" by blast
+ have "m \<noteq> p" using m(2) by auto
+ with m have "\<exists>m>p. s p < s m" by - (rule exI[where x=m], auto)}
+ note th0 = this
+ let ?P = "\<lambda>m x. m > x \<and> s x < s m"
+ from nat_function_unique[of "SOME x. ?P x (Suc N)" "\<lambda>m x. SOME y. ?P y x"]
+ obtain f where f: "f 0 = (SOME x. ?P x (Suc N))"
+ "\<forall>n. f (Suc n) = (SOME m. ?P m (f n))" by blast
+ have "?P (f 0) (Suc N)" unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p (Suc N)"]
+ using N apply -
+ apply (erule allE[where x="Suc N"], clarsimp)
+ apply (rule_tac x="m" in exI)
+ apply auto
+ apply (subgoal_tac "Suc N \<noteq> m")
+ apply simp
+ apply (rule ccontr, simp)
+ done
+ hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+
+ {fix n
+ have "f n > N \<and> ?P (f (Suc n)) (f n)"
+ unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
+ proof (induct n)
+ case 0 thus ?case
+ using f0 N apply auto
+ apply (erule allE[where x="f 0"], clarsimp)
+ apply (rule_tac x="m" in exI, simp)
+ by (subgoal_tac "f 0 \<noteq> m", auto)
+ next
+ case (Suc n)
+ from Suc.hyps have Nfn: "N < f n" by blast
+ from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast
+ with Nfn have mN: "m > N" by arith
+ note key = Suc.hyps[unfolded some_eq_ex[of "\<lambda>p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]]
+
+ from key have th0: "f (Suc n) > N" by simp
+ from N[rule_format, OF th0]
+ obtain m' where m': "m' \<ge> f (Suc n)" "s (f (Suc n)) < s m'" by blast
+ have "m' \<noteq> f (Suc (n))" apply (rule ccontr) using m'(2) by auto
+ hence "m' > f (Suc n)" using m'(1) by simp
+ with key m'(2) show ?case by auto
+ qed}
+ note fSuc = this
+ {fix n
+ have "f n \<ge> Suc N \<and> f(Suc n) > f n \<and> s(f n) < s(f(Suc n))" using fSuc[of n] by auto
+ hence "f n \<ge> Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+}
+ note thf = this
+ have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp
+ have "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc using thf
+ apply -
+ apply (rule disjI1)
+ apply auto
+ apply (rule order_less_imp_le)
+ apply blast
+ done
+ then have ?thesis using sqf by blast}
+ ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast
+qed
+
+lemma seq_suble: assumes sf: "subseq f" shows "n \<le> f n"
+proof(induct n)
+ case 0 thus ?case by simp
+next
+ case (Suc n)
+ from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps
+ have "n < f (Suc n)" by arith
+ thus ?case by arith
+qed
+
+subsection {* Bounded Monotonic Sequences *}
+
+
text{*Bounded Sequence*}
lemma BseqD: "Bseq X ==> \<exists>K. 0 < K & (\<forall>n. norm (X n) \<le> K)"
--- a/src/HOL/Series.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Series.thy Wed Mar 04 10:45:52 2009 +0100
@@ -312,6 +312,7 @@
shows "\<lbrakk>summable f;
\<forall>d. 0 < f (k + (Suc(Suc 0) * d)) + f (k + ((Suc(Suc 0) * d) + 1))\<rbrakk>
\<Longrightarrow> setsum f {0..<k} < suminf f"
+unfolding One_nat_def
apply (subst suminf_split_initial_segment [where k="k"])
apply assumption
apply simp
@@ -537,7 +538,7 @@
apply (safe, subgoal_tac "\<forall>n. N < n --> f (n) = 0")
prefer 2
apply clarify
- apply(erule_tac x = "n - 1" in allE)
+ apply(erule_tac x = "n - Suc 0" in allE)
apply (simp add:diff_Suc split:nat.splits)
apply (blast intro: norm_ratiotest_lemma)
apply (rule_tac x = "Suc N" in exI, clarify)
--- a/src/HOL/SetInterval.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/SetInterval.thy Wed Mar 04 10:45:52 2009 +0100
@@ -66,10 +66,10 @@
"@INTER_less" :: "nat => nat => 'b set => 'b set" ("(3\<Inter> _<_./ _)" 10)
syntax (xsymbols)
- "@UNION_le" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Union>(00\<^bsub>_ \<le> _\<^esub>)/ _)" 10)
- "@UNION_less" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Union>(00\<^bsub>_ < _\<^esub>)/ _)" 10)
- "@INTER_le" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Inter>(00\<^bsub>_ \<le> _\<^esub>)/ _)" 10)
- "@INTER_less" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Inter>(00\<^bsub>_ < _\<^esub>)/ _)" 10)
+ "@UNION_le" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Union>(00_ \<le> _)/ _)" 10)
+ "@UNION_less" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Union>(00_ < _)/ _)" 10)
+ "@INTER_le" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Inter>(00_ \<le> _)/ _)" 10)
+ "@INTER_less" :: "nat \<Rightarrow> nat => 'b set => 'b set" ("(3\<Inter>(00_ < _)/ _)" 10)
translations
"UN i<=n. A" == "UN i:{..n}. A"
@@ -352,11 +352,11 @@
corollary image_Suc_atLeastAtMost[simp]:
"Suc ` {i..j} = {Suc i..Suc j}"
-using image_add_atLeastAtMost[where k=1] by simp
+using image_add_atLeastAtMost[where k="Suc 0"] by simp
corollary image_Suc_atLeastLessThan[simp]:
"Suc ` {i..<j} = {Suc i..<Suc j}"
-using image_add_atLeastLessThan[where k=1] by simp
+using image_add_atLeastLessThan[where k="Suc 0"] by simp
lemma image_add_int_atLeastLessThan:
"(%x. x + (l::int)) ` {0..<u-l} = {l..<u}"
@@ -556,7 +556,7 @@
qed
lemma card_less_Suc2: "0 \<notin> M \<Longrightarrow> card {k. Suc k \<in> M \<and> k < i} = card {k \<in> M. k < Suc i}"
-apply (rule card_bij_eq [of "Suc" _ _ "\<lambda>x. x - 1"])
+apply (rule card_bij_eq [of "Suc" _ _ "\<lambda>x. x - Suc 0"])
apply simp
apply fastsimp
apply auto
@@ -803,7 +803,7 @@
lemma setsum_head_upt_Suc:
"m < n \<Longrightarrow> setsum f {m..<n} = f m + setsum f {Suc m..<n}"
-apply(insert setsum_head_Suc[of m "n - 1" f])
+apply(insert setsum_head_Suc[of m "n - Suc 0" f])
apply (simp add: atLeastLessThanSuc_atLeastAtMost[symmetric] algebra_simps)
done
@@ -835,11 +835,11 @@
corollary setsum_shift_bounds_cl_Suc_ivl:
"setsum f {Suc m..Suc n} = setsum (%i. f(Suc i)){m..n}"
-by (simp add:setsum_shift_bounds_cl_nat_ivl[where k=1,simplified])
+by (simp add:setsum_shift_bounds_cl_nat_ivl[where k="Suc 0", simplified])
corollary setsum_shift_bounds_Suc_ivl:
"setsum f {Suc m..<Suc n} = setsum (%i. f(Suc i)){m..<n}"
-by (simp add:setsum_shift_bounds_nat_ivl[where k=1,simplified])
+by (simp add:setsum_shift_bounds_nat_ivl[where k="Suc 0", simplified])
lemma setsum_shift_lb_Suc0_0:
"f(0::nat) = (0::nat) \<Longrightarrow> setsum f {Suc 0..k} = setsum f {0..k}"
@@ -883,6 +883,7 @@
by (rule setsum_addf)
also from ngt1 have "\<dots> = ?n*a + (\<Sum>i\<in>{..<n}. ?I i*d)" by simp
also from ngt1 have "\<dots> = (?n*a + d*(\<Sum>i\<in>{1..<n}. ?I i))"
+ unfolding One_nat_def
by (simp add: setsum_right_distrib atLeast0LessThan[symmetric] setsum_shift_lb_Suc0_0_upt mult_ac)
also have "(1+1)*\<dots> = (1+1)*?n*a + d*(1+1)*(\<Sum>i\<in>{1..<n}. ?I i)"
by (simp add: left_distrib right_distrib)
@@ -890,7 +891,7 @@
by (cases n) (auto simp: atLeastLessThanSuc_atLeastAtMost)
also from ngt1
have "(1+1)*?n*a + d*(1+1)*(\<Sum>i\<in>{1..n - 1}. ?I i) = ((1+1)*?n*a + d*?I (n - 1)*?I n)"
- by (simp only: mult_ac gauss_sum [of "n - 1"])
+ by (simp only: mult_ac gauss_sum [of "n - 1"], unfold One_nat_def)
(simp add: mult_ac trans [OF add_commute of_nat_Suc [symmetric]])
finally show ?thesis by (simp add: algebra_simps)
next
@@ -906,7 +907,8 @@
"((1::nat) + 1) * (\<Sum>i\<in>{..<n::nat}. a + of_nat(i)*d) =
of_nat(n) * (a + (a + of_nat(n - 1)*d))"
by (rule arith_series_general)
- thus ?thesis by (auto simp add: of_nat_id)
+ thus ?thesis
+ unfolding One_nat_def by (auto simp add: of_nat_id)
qed
lemma arith_series_int:
@@ -946,4 +948,37 @@
show ?case by simp
qed
+subsection {* Products indexed over intervals *}
+
+syntax
+ "_from_to_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(PROD _ = _.._./ _)" [0,0,0,10] 10)
+ "_from_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(PROD _ = _..<_./ _)" [0,0,0,10] 10)
+ "_upt_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(PROD _<_./ _)" [0,0,10] 10)
+ "_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(PROD _<=_./ _)" [0,0,10] 10)
+syntax (xsymbols)
+ "_from_to_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_ = _.._./ _)" [0,0,0,10] 10)
+ "_from_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_ = _..<_./ _)" [0,0,0,10] 10)
+ "_upt_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_<_./ _)" [0,0,10] 10)
+ "_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_\<le>_./ _)" [0,0,10] 10)
+syntax (HTML output)
+ "_from_to_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_ = _.._./ _)" [0,0,0,10] 10)
+ "_from_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_ = _..<_./ _)" [0,0,0,10] 10)
+ "_upt_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_<_./ _)" [0,0,10] 10)
+ "_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b" ("(3\<Prod>_\<le>_./ _)" [0,0,10] 10)
+syntax (latex_prod output)
+ "_from_to_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b"
+ ("(3\<^raw:$\prod_{>_ = _\<^raw:}^{>_\<^raw:}$> _)" [0,0,0,10] 10)
+ "_from_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b"
+ ("(3\<^raw:$\prod_{>_ = _\<^raw:}^{<>_\<^raw:}$> _)" [0,0,0,10] 10)
+ "_upt_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b"
+ ("(3\<^raw:$\prod_{>_ < _\<^raw:}$> _)" [0,0,10] 10)
+ "_upto_setprod" :: "idt \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b"
+ ("(3\<^raw:$\prod_{>_ \<le> _\<^raw:}$> _)" [0,0,10] 10)
+
+translations
+ "\<Prod>x=a..b. t" == "CONST setprod (%x. t) {a..b}"
+ "\<Prod>x=a..<b. t" == "CONST setprod (%x. t) {a..<b}"
+ "\<Prod>i\<le>n. t" == "CONST setprod (\<lambda>i. t) {..n}"
+ "\<Prod>i<n. t" == "CONST setprod (\<lambda>i. t) {..<n}"
+
end
--- a/src/HOL/Tools/Qelim/langford.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/Qelim/langford.ML Wed Mar 04 10:45:52 2009 +0100
@@ -113,11 +113,6 @@
val eqI = instantiate' [] [SOME ll, SOME rr] @{thm iffI}
in implies_elim (implies_elim eqI thl) thr |> mk_meta_eq end;
-fun partition f [] = ([],[])
- | partition f (x::xs) =
- let val (yes,no) = partition f xs
- in if f x then (x::yes,no) else (yes, x::no) end;
-
fun contains x ct = member (op aconv) (OldTerm.term_frees (term_of ct)) (term_of x);
fun is_eqx x eq = case term_of eq of
@@ -132,11 +127,11 @@
val e = Thm.dest_fun ct
val (x,p) = Thm.dest_abs (SOME xn) (Thm.dest_arg ct)
val Pp = Thm.capply @{cterm "Trueprop"} p
- val (eqs,neqs) = partition (is_eqx x) (all_conjuncts p)
+ val (eqs,neqs) = List.partition (is_eqx x) (all_conjuncts p)
in case eqs of
[] =>
let
- val (dx,ndx) = partition (contains x) neqs
+ val (dx,ndx) = List.partition (contains x) neqs
in case ndx of [] => NONE
| _ =>
conj_aci_rule (Thm.mk_binop @{cterm "op == :: prop => _"} Pp
--- a/src/HOL/Tools/Qelim/presburger.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/Qelim/presburger.ML Wed Mar 04 10:45:52 2009 +0100
@@ -122,14 +122,13 @@
addcongs [@{thm "conj_le_cong"}, @{thm "imp_le_cong"}]
val div_mod_ss = HOL_basic_ss addsimps simp_thms
@ map (symmetric o mk_meta_eq)
- [@{thm "dvd_eq_mod_eq_0"}, @{thm "zdvd_iff_zmod_eq_0"}, @{thm "mod_add1_eq"},
+ [@{thm "dvd_eq_mod_eq_0"},
@{thm "mod_add_left_eq"}, @{thm "mod_add_right_eq"},
- @{thm "mod_add_eq"}, @{thm "zmod_zadd_left_eq"},
- @{thm "zmod_zadd_right_eq"}, @{thm "div_add1_eq"}, @{thm "zdiv_zadd1_eq"}]
+ @{thm "mod_add_eq"}, @{thm "div_add1_eq"}, @{thm "zdiv_zadd1_eq"}]
@ [@{thm "mod_self"}, @{thm "zmod_self"}, @{thm "mod_by_0"},
@{thm "div_by_0"}, @{thm "DIVISION_BY_ZERO"} RS conjunct1,
@{thm "DIVISION_BY_ZERO"} RS conjunct2, @{thm "zdiv_zero"}, @{thm "zmod_zero"},
- @{thm "div_0"}, @{thm "mod_0"}, @{thm "zdiv_1"}, @{thm "zmod_1"}, @{thm "div_1"},
+ @{thm "div_0"}, @{thm "mod_0"}, @{thm "div_by_1"}, @{thm "mod_by_1"}, @{thm "div_1"},
@{thm "mod_1"}, @{thm "Suc_plus1"}]
@ @{thms add_ac}
addsimprocs [cancel_div_mod_proc]
@@ -170,14 +169,14 @@
THEN_ALL_NEW simp_tac ss
THEN_ALL_NEW (TRY o generalize_tac (int_nat_terms ctxt))
THEN_ALL_NEW ObjectLogic.full_atomize_tac
- THEN_ALL_NEW (TRY o thin_prems_tac (is_relevant ctxt))
+ THEN_ALL_NEW (thin_prems_tac (is_relevant ctxt))
THEN_ALL_NEW ObjectLogic.full_atomize_tac
THEN_ALL_NEW div_mod_tac ctxt
THEN_ALL_NEW splits_tac ctxt
THEN_ALL_NEW simp_tac ss
THEN_ALL_NEW CONVERSION Thm.eta_long_conversion
THEN_ALL_NEW nat_to_int_tac ctxt
- THEN_ALL_NEW core_cooper_tac ctxt
+ THEN_ALL_NEW (core_cooper_tac ctxt)
THEN_ALL_NEW finish_tac elim
end;
--- a/src/HOL/Tools/TFL/post.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/TFL/post.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Tools/TFL/post.ML
- ID: $Id$
Author: Konrad Slind, Cambridge University Computer Laboratory
Copyright 1997 University of Cambridge
@@ -31,7 +30,7 @@
*--------------------------------------------------------------------------*)
fun termination_goals rules =
map (Type.freeze o HOLogic.dest_Trueprop)
- (foldr (fn (th,A) => gen_union (op aconv) (prems_of th, A)) [] rules);
+ (List.foldr (fn (th,A) => gen_union (op aconv) (prems_of th, A)) [] rules);
(*---------------------------------------------------------------------------
* Finds the termination conditions in (highly massaged) definition and
--- a/src/HOL/Tools/TFL/rules.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/TFL/rules.ML Wed Mar 04 10:45:52 2009 +0100
@@ -131,7 +131,7 @@
fun FILTER_DISCH_ALL P thm =
let fun check tm = P (#t (Thm.rep_cterm tm))
- in foldr (fn (tm,th) => if check tm then DISCH tm th else th)
+ in List.foldr (fn (tm,th) => if check tm then DISCH tm th else th)
thm (chyps thm)
end;
--- a/src/HOL/Tools/TFL/tfl.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/TFL/tfl.ML Wed Mar 04 10:45:52 2009 +0100
@@ -330,7 +330,7 @@
val dummy = map (no_repeat_vars thy) pats
val rows = ListPair.zip (map (fn x => ([]:term list,[x])) pats,
map (fn (t,i) => (t,(i,true))) (enumerate R))
- val names = foldr OldTerm.add_term_names [] R
+ val names = List.foldr OldTerm.add_term_names [] R
val atype = type_of(hd pats)
and aname = Name.variant names "a"
val a = Free(aname,atype)
@@ -492,7 +492,7 @@
val tych = Thry.typecheck thy
val WFREC_THM0 = R.ISPEC (tych functional) Thms.WFREC_COROLLARY
val Const("All",_) $ Abs(Rname,Rtype,_) = concl WFREC_THM0
- val R = Free (Name.variant (foldr OldTerm.add_term_names [] eqns) Rname,
+ val R = Free (Name.variant (List.foldr OldTerm.add_term_names [] eqns) Rname,
Rtype)
val WFREC_THM = R.ISPECL [tych R, tych g] WFREC_THM0
val ([proto_def, WFR],_) = S.strip_imp(concl WFREC_THM)
@@ -533,7 +533,7 @@
Display.prths extractants;
())
else ()
- val TCs = foldr (gen_union (op aconv)) [] TCl
+ val TCs = List.foldr (gen_union (op aconv)) [] TCl
val full_rqt = WFR::TCs
val R' = S.mk_select{Bvar=R1, Body=S.list_mk_conj full_rqt}
val R'abs = S.rand R'
@@ -690,7 +690,7 @@
let val tych = Thry.typecheck thy
val ty_info = Thry.induct_info thy
in fn pats =>
- let val names = foldr OldTerm.add_term_names [] pats
+ let val names = List.foldr OldTerm.add_term_names [] pats
val T = type_of (hd pats)
val aname = Name.variant names "a"
val vname = Name.variant (aname::names) "v"
@@ -843,7 +843,7 @@
val (pats,TCsl) = ListPair.unzip pat_TCs_list
val case_thm = complete_cases thy pats
val domain = (type_of o hd) pats
- val Pname = Name.variant (foldr (Library.foldr OldTerm.add_term_names)
+ val Pname = Name.variant (List.foldr (Library.foldr OldTerm.add_term_names)
[] (pats::TCsl)) "P"
val P = Free(Pname, domain --> HOLogic.boolT)
val Sinduct = R.SPEC (tych P) Sinduction
@@ -854,7 +854,7 @@
val cases = map (fn pat => Term.betapply (Sinduct_assumf, pat)) pats
val tasks = U.zip3 cases TCl' (R.CONJUNCTS Rinduct_assum)
val proved_cases = map (prove_case fconst thy) tasks
- val v = Free (Name.variant (foldr OldTerm.add_term_names [] (map concl proved_cases))
+ val v = Free (Name.variant (List.foldr OldTerm.add_term_names [] (map concl proved_cases))
"v",
domain)
val vtyped = tych v
--- a/src/HOL/Tools/atp_wrapper.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/atp_wrapper.ML Wed Mar 04 10:45:52 2009 +0100
@@ -78,10 +78,14 @@
val failure = find_failure proof
val success = rc = 0 andalso is_none failure
val message =
- if isSome failure then "Could not prove: " ^ the failure
- else if rc <> 0
- then "Exited with return code " ^ string_of_int rc ^ ": " ^ proof
- else "Try this command: " ^ produce_answer (proof, thm_names, ctxt, goal, subgoalno)
+ if success then "Try this command: " ^ produce_answer (proof, thm_names, ctxt, goal, subgoalno)
+ else "Could not prove goal."
+ val _ = if isSome failure
+ then Output.debug (fn () => "Sledgehammer failure: " ^ the failure ^ "\nOutput: " ^ proof)
+ else ()
+ val _ = if rc <> 0
+ then Output.debug (fn () => "Sledgehammer exited with return code " ^ string_of_int rc ^ ":\n" ^ proof)
+ else ()
in (success, message) end;
@@ -92,7 +96,7 @@
fun tptp_prover_opts_full max_new theory_const full command =
external_prover
- (ResAtp.write_problem_files ResHolClause.tptp_write_file max_new theory_const)
+ (ResAtp.write_problem_files false max_new theory_const)
command
ResReconstruct.find_failure_e_vamp_spass
(if full then ResReconstruct.structured_proof else ResReconstruct.lemma_list_tstp);
@@ -149,7 +153,7 @@
(* SPASS *)
fun spass_opts max_new theory_const = external_prover
- (ResAtp.write_problem_files ResHolClause.dfg_write_file max_new theory_const)
+ (ResAtp.write_problem_files true max_new theory_const)
(Path.explode "$SPASS_HOME/SPASS", "-Auto -SOS=1 -PGiven=0 -PProblem=0 -Splits=0 -FullRed=0 -DocProof")
ResReconstruct.find_failure_e_vamp_spass
ResReconstruct.lemma_list_dfg;
--- a/src/HOL/Tools/datatype_abs_proofs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_abs_proofs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -96,7 +96,7 @@
val descr' = List.concat descr;
val recTs = get_rec_types descr' sorts;
- val used = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val newTs = Library.take (length (hd descr), recTs);
val induct_Ps = map head_of (HOLogic.dest_conj (HOLogic.dest_Trueprop (concl_of induct)));
@@ -140,7 +140,7 @@
end;
val Ts = map (typ_of_dtyp descr' sorts) cargs;
- val (_, _, prems, t1s, t2s) = foldr mk_prem (1, 1, [], [], []) (cargs ~~ Ts)
+ val (_, _, prems, t1s, t2s) = List.foldr mk_prem (1, 1, [], [], []) (cargs ~~ Ts)
in (rec_intr_ts @ [Logic.list_implies (prems, HOLogic.mk_Trueprop
(rec_set $ list_comb (Const (cname, Ts ---> T), t1s) $
@@ -280,7 +280,7 @@
val descr' = List.concat descr;
val recTs = get_rec_types descr' sorts;
- val used = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val newTs = Library.take (length (hd descr), recTs);
val T' = TFree (Name.variant used "'t", HOLogic.typeS);
--- a/src/HOL/Tools/datatype_aux.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_aux.ML Wed Mar 04 10:45:52 2009 +0100
@@ -155,7 +155,7 @@
val prem' = hd (prems_of exhaustion);
val _ $ (_ $ lhs $ _) = hd (rev (Logic.strip_assums_hyp prem'));
val exhaustion' = cterm_instantiate [(cterm_of thy (head_of lhs),
- cterm_of thy (foldr (fn ((_, T), t) => Abs ("z", T, t))
+ cterm_of thy (List.foldr (fn ((_, T), t) => Abs ("z", T, t))
(Bound 0) params))] exhaustion
in compose_tac (false, exhaustion', nprems_of exhaustion) i state
end;
--- a/src/HOL/Tools/datatype_codegen.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_codegen.ML Wed Mar 04 10:45:52 2009 +0100
@@ -6,8 +6,8 @@
signature DATATYPE_CODEGEN =
sig
- val get_eq: theory -> string -> thm list
- val get_case_cert: theory -> string -> thm
+ val mk_eq: theory -> string -> thm list
+ val mk_case_cert: theory -> string -> thm
val setup: theory -> theory
end;
@@ -85,7 +85,7 @@
val dts' = map (DatatypeAux.typ_of_dtyp descr sorts) dts;
val T = Type (tname, dts');
val rest = mk_term_of_def gr "and " xs;
- val (_, eqs) = foldl_map (fn (prfx, (cname, Ts)) =>
+ val (_, eqs) = Library.foldl_map (fn (prfx, (cname, Ts)) =>
let val args = map (fn i =>
str ("x" ^ string_of_int i)) (1 upto length Ts)
in (" | ", Pretty.blk (4,
@@ -216,8 +216,8 @@
let
val ts1 = Library.take (i, ts);
val t :: ts2 = Library.drop (i, ts);
- val names = foldr OldTerm.add_term_names
- (map (fst o fst o dest_Var) (foldr OldTerm.add_term_vars [] ts1)) ts1;
+ val names = List.foldr OldTerm.add_term_names
+ (map (fst o fst o dest_Var) (List.foldr OldTerm.add_term_vars [] ts1)) ts1;
val (Ts, dT) = split_last (Library.take (i+1, fst (strip_type T)));
fun pcase [] [] [] gr = ([], gr)
@@ -323,7 +323,7 @@
(* case certificates *)
-fun get_case_cert thy tyco =
+fun mk_case_cert thy tyco =
let
val raw_thms =
(#case_rewrites o DatatypePackage.the_datatype thy) tyco;
@@ -357,10 +357,13 @@
fun add_datatype_cases dtco thy =
let
val {case_rewrites, ...} = DatatypePackage.the_datatype thy dtco;
- val certs = get_case_cert thy dtco;
+ val cert = mk_case_cert thy dtco;
+ fun add_case_liberal thy = thy
+ |> try (Code.add_case cert)
+ |> the_default thy;
in
thy
- |> Code.add_case certs
+ |> add_case_liberal
|> fold_rev Code.add_default_eqn case_rewrites
end;
@@ -369,10 +372,10 @@
local
-val not_sym = thm "HOL.not_sym";
-val not_false_true = iffD2 OF [nth (thms "HOL.simp_thms") 7, TrueI];
-val refl = thm "refl";
-val eqTrueI = thm "eqTrueI";
+val not_sym = @{thm HOL.not_sym};
+val not_false_true = iffD2 OF [nth @{thms HOL.simp_thms} 7, TrueI];
+val refl = @{thm refl};
+val eqTrueI = @{thm eqTrueI};
fun mk_distinct cos =
let
@@ -397,7 +400,7 @@
in
-fun get_eq thy dtco =
+fun mk_eq thy dtco =
let
val (vs, cs) = DatatypePackage.the_datatype_spec thy dtco;
fun mk_triv_inject co =
@@ -445,7 +448,7 @@
in (thm', lthy') end;
fun tac thms = Class.intro_classes_tac []
THEN ALLGOALS (ProofContext.fact_tac thms);
- fun get_eq' thy dtco = get_eq thy dtco
+ fun mk_eq' thy dtco = mk_eq thy dtco
|> map (Code_Unit.constrain_thm thy [HOLogic.class_eq])
|> map Simpdata.mk_eq
|> map (MetaSimplifier.rewrite_rule [Thm.transfer thy @{thm equals_eq}])
@@ -460,10 +463,10 @@
([pairself (Thm.ctyp_of thy) (TVar (("'a", 0), @{sort eq}), Logic.varifyT ty)], [])
|> Simpdata.mk_eq
|> AxClass.unoverload thy;
- fun get_thms () = (eq_refl, false)
- :: rev (map (rpair true) (get_eq' (Theory.deref thy_ref) dtco));
+ fun mk_thms () = (eq_refl, false)
+ :: rev (map (rpair true) (mk_eq' (Theory.deref thy_ref) dtco));
in
- Code.add_eqnl (const, Lazy.lazy get_thms) thy
+ Code.add_eqnl (const, Lazy.lazy mk_thms) thy
end;
in
thy
--- a/src/HOL/Tools/datatype_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -631,8 +631,8 @@
local
-val sym_datatype = Pretty.str "\\isacommand{datatype}";
-val sym_binder = Pretty.str "\\ {\\isacharequal}";
+val sym_datatype = Pretty.command "datatype";
+val sym_binder = Pretty.str "\\ {\\isacharequal}"; (*FIXME use proper symbol*)
val sym_sep = Pretty.str "{\\isacharbar}\\ ";
in
@@ -659,7 +659,7 @@
| pretty_constr (co, [ty']) =
(Pretty.block o Pretty.breaks)
[Syntax.pretty_term ctxt (Const (co, ty' --> ty)),
- Syntax.pretty_typ ctxt ty']
+ pretty_typ_br ty']
| pretty_constr (co, tys) =
(Pretty.block o Pretty.breaks)
(Syntax.pretty_term ctxt (Const (co, tys ---> ty)) ::
--- a/src/HOL/Tools/datatype_prop.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_prop.ML Wed Mar 04 10:45:52 2009 +0100
@@ -205,7 +205,7 @@
let
val descr' = List.concat descr;
val recTs = get_rec_types descr' sorts;
- val used = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val (rec_result_Ts, reccomb_fn_Ts) = make_primrec_Ts descr sorts used;
@@ -255,7 +255,7 @@
let
val descr' = List.concat descr;
val recTs = get_rec_types descr' sorts;
- val used = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val newTs = Library.take (length (hd descr), recTs);
val T' = TFree (Name.variant used "'t", HOLogic.typeS);
@@ -302,7 +302,7 @@
let
val descr' = List.concat descr;
val recTs = get_rec_types descr' sorts;
- val used' = foldr OldTerm.add_typ_tfree_names [] recTs;
+ val used' = List.foldr OldTerm.add_typ_tfree_names [] recTs;
val newTs = Library.take (length (hd descr), recTs);
val T' = TFree (Name.variant used' "'t", HOLogic.typeS);
val P = Free ("P", T' --> HOLogic.boolT);
@@ -319,13 +319,13 @@
val eqn = HOLogic.mk_eq (Free ("x", T),
list_comb (Const (cname, Ts ---> T), frees));
val P' = P $ list_comb (f, frees)
- in ((foldr (fn (Free (s, T), t) => HOLogic.mk_all (s, T, t))
+ in ((List.foldr (fn (Free (s, T), t) => HOLogic.mk_all (s, T, t))
(HOLogic.imp $ eqn $ P') frees)::t1s,
- (foldr (fn (Free (s, T), t) => HOLogic.mk_exists (s, T, t))
+ (List.foldr (fn (Free (s, T), t) => HOLogic.mk_exists (s, T, t))
(HOLogic.conj $ eqn $ (HOLogic.Not $ P')) frees)::t2s)
end;
- val (t1s, t2s) = foldr process_constr ([], []) (constrs ~~ fs);
+ val (t1s, t2s) = List.foldr process_constr ([], []) (constrs ~~ fs);
val lhs = P $ (comb_t $ Free ("x", T))
in
(HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs, mk_conj t1s)),
@@ -422,7 +422,7 @@
val tnames = Name.variant_list ["v"] (make_tnames Ts);
val frees = tnames ~~ Ts
in
- foldr (fn ((s, T'), t) => HOLogic.mk_exists (s, T', t))
+ List.foldr (fn ((s, T'), t) => HOLogic.mk_exists (s, T', t))
(HOLogic.mk_eq (Free ("v", T),
list_comb (Const (cname, Ts ---> T), map Free frees))) frees
end
--- a/src/HOL/Tools/datatype_realizer.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_realizer.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/Tools/datatype_realizer.ML
- ID: $Id$
Author: Stefan Berghofer, TU Muenchen
Porgram extraction from proofs involving datatypes:
@@ -57,8 +56,8 @@
fun mk_all i s T t =
if i mem is then list_all_free ([(s, T)], t) else t;
- val (prems, rec_fns) = split_list (List.concat (snd (foldl_map
- (fn (j, ((i, (_, _, constrs)), T)) => foldl_map (fn (j, (cname, cargs)) =>
+ val (prems, rec_fns) = split_list (List.concat (snd (Library.foldl_map
+ (fn (j, ((i, (_, _, constrs)), T)) => Library.foldl_map (fn (j, (cname, cargs)) =>
let
val Ts = map (typ_of_dtyp descr sorts) cargs;
val tnames = Name.variant_list pnames (DatatypeProp.make_tnames Ts);
@@ -139,8 +138,8 @@
tname_of (body_type T) mem ["set", "bool"]) ivs);
val ivs2 = map (fn (ixn, _) => Var (ixn, valOf (AList.lookup (op =) rvs ixn))) ivs;
- val prf = foldr forall_intr_prf
- (foldr (fn ((f, p), prf) =>
+ val prf = List.foldr forall_intr_prf
+ (List.foldr (fn ((f, p), prf) =>
(case head_of (strip_abs_body f) of
Free (s, T) =>
let val T' = Logic.varifyT T
@@ -151,7 +150,7 @@
(Proofterm.proof_combP
(prf_of thm', map PBound (length prems - 1 downto 0))) (rec_fns ~~ prems_of thm)) ivs2;
- val r' = if null is then r else Logic.varify (foldr (uncurry lambda)
+ val r' = if null is then r else Logic.varify (List.foldr (uncurry lambda)
r (map Logic.unvarify ivs1 @ filter_out is_unit
(map (head_of o strip_abs_body) rec_fns)));
@@ -201,7 +200,7 @@
val P = Var (("P", 0), rT' --> HOLogic.boolT);
val prf = forall_intr_prf (y, forall_intr_prf (P,
- foldr (fn ((p, r), prf) =>
+ List.foldr (fn ((p, r), prf) =>
forall_intr_prf (Logic.legacy_varify r, AbsP ("H", SOME (Logic.varify p),
prf))) (Proofterm.proof_combP (prf_of thm',
map PBound (length prems - 1 downto 0))) (prems ~~ rs)));
--- a/src/HOL/Tools/datatype_rep_proofs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/datatype_rep_proofs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -83,7 +83,7 @@
val branchT = if null branchTs then HOLogic.unitT
else BalancedTree.make (fn (T, U) => Type ("+", [T, U])) branchTs;
val arities = get_arities descr' \ 0;
- val unneeded_vars = hd tyvars \\ foldr OldTerm.add_typ_tfree_names [] (leafTs' @ branchTs);
+ val unneeded_vars = hd tyvars \\ List.foldr OldTerm.add_typ_tfree_names [] (leafTs' @ branchTs);
val leafTs = leafTs' @ (map (fn n => TFree (n, (the o AList.lookup (op =) sorts) n)) unneeded_vars);
val recTs = get_rec_types descr' sorts;
val newTs = Library.take (length (hd descr), recTs);
@@ -143,7 +143,7 @@
in mk_inj branchT (length branchTs) (1 + find_index_eq T' branchTs)
end;
- val mk_lim = foldr (fn (T, t) => Lim $ mk_fun_inj T (Abs ("x", T, t)));
+ val mk_lim = List.foldr (fn (T, t) => Lim $ mk_fun_inj T (Abs ("x", T, t)));
(************** generate introduction rules for representing set **********)
@@ -169,7 +169,7 @@
in (j + 1, prems, (Leaf $ mk_inj T (mk_Free "x" T j))::ts)
end);
- val (_, prems, ts) = foldr mk_prem (1, [], []) cargs;
+ val (_, prems, ts) = List.foldr mk_prem (1, [], []) cargs;
val concl = HOLogic.mk_Trueprop
(Free (s, UnivT') $ mk_univ_inj ts n i)
in Logic.list_implies (prems, concl)
@@ -229,7 +229,7 @@
| _ => (j + 1, free_t::l_args, (Leaf $ mk_inj T free_t)::r_args))
end;
- val (_, l_args, r_args) = foldr constr_arg (1, [], []) cargs;
+ val (_, l_args, r_args) = List.foldr constr_arg (1, [], []) cargs;
val constrT = (map (typ_of_dtyp descr' sorts) cargs) ---> T;
val abs_name = Sign.intern_const thy ("Abs_" ^ tname);
val rep_name = Sign.intern_const thy ("Rep_" ^ tname);
@@ -357,7 +357,7 @@
in (thy', char_thms' @ char_thms) end;
- val (thy5, iso_char_thms) = apfst Theory.checkpoint (foldr make_iso_defs
+ val (thy5, iso_char_thms) = apfst Theory.checkpoint (List.foldr make_iso_defs
(add_path flat_names big_name thy4, []) (tl descr));
(* prove isomorphism properties *)
@@ -447,7 +447,7 @@
in (inj_thms'' @ inj_thms, elem_thms @ (split_conj_thm elem_thm))
end;
- val (iso_inj_thms_unfolded, iso_elem_thms) = foldr prove_iso_thms
+ val (iso_inj_thms_unfolded, iso_elem_thms) = List.foldr prove_iso_thms
([], map #3 newT_iso_axms) (tl descr);
val iso_inj_thms = map snd newT_iso_inj_thms @
map (fn r => r RS @{thm injD}) iso_inj_thms_unfolded;
--- a/src/HOL/Tools/function_package/fundef_common.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/function_package/fundef_common.ML Wed Mar 04 10:45:52 2009 +0100
@@ -82,7 +82,7 @@
psimps, pinducts, termination, defname}) phi =
let
val term = Morphism.term phi val thm = Morphism.thm phi val fact = Morphism.fact phi
- val name = Binding.base_name o Morphism.binding phi o Binding.name
+ val name = Binding.name_of o Morphism.binding phi o Binding.name
in
FundefCtxData { add_simps = add_simps, case_names = case_names,
fs = map term fs, R = term R, psimps = fact psimps,
--- a/src/HOL/Tools/function_package/fundef_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/function_package/fundef_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -99,8 +99,8 @@
val constrn_fxs = map (fn (b, T, mx) => (b, SOME (the_default default_constraint T), mx))
val ((fixes0, spec0), ctxt') =
prep (constrn_fxs fixspec) (map (single o apsnd single) eqnss) lthy
- val fixes = map (apfst (apfst Binding.base_name)) fixes0;
- val spec = map (apfst (apfst Binding.base_name)) spec0;
+ val fixes = map (apfst (apfst Binding.name_of)) fixes0;
+ val spec = map (apfst (apfst Binding.name_of)) spec0;
val (eqs, post, sort_cont, cnames) = FundefCommon.get_preproc lthy config flags ctxt' fixes spec
val defname = mk_defname fixes
--- a/src/HOL/Tools/function_package/scnp_solve.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/function_package/scnp_solve.ML Wed Mar 04 10:45:52 2009 +0100
@@ -46,7 +46,7 @@
fun num_prog_pts (GP (arities, _)) = length arities ;
fun num_graphs (GP (_, gs)) = length gs ;
fun arity (GP (arities, gl)) i = nth arities i ;
-fun ndigits (GP (arities, _)) = IntInf.log2 (foldl (op +) 0 arities) + 1
+fun ndigits (GP (arities, _)) = IntInf.log2 (List.foldl (op +) 0 arities) + 1
(** Propositional formulas **)
@@ -79,7 +79,7 @@
fun var_constrs (gp as GP (arities, gl)) =
let
val n = Int.max (num_graphs gp, num_prog_pts gp)
- val k = foldl Int.max 1 arities
+ val k = List.foldl Int.max 1 arities
(* Injective, provided a < 8, x < n, and i < k. *)
fun prod a x i j = ((j * k + i) * n + x) * 8 + a + 1
--- a/src/HOL/Tools/function_package/size.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/function_package/size.ML Wed Mar 04 10:45:52 2009 +0100
@@ -115,7 +115,7 @@
then HOLogic.zero
else foldl1 plus (ts @ [HOLogic.Suc_zero])
in
- foldr (fn (T, t') => Abs ("x", T, t')) t (Ts @ replicate k HOLogic.natT)
+ List.foldr (fn (T, t') => Abs ("x", T, t')) t (Ts @ replicate k HOLogic.natT)
end;
val fs = maps (fn (_, (name, _, constrs)) =>
--- a/src/HOL/Tools/inductive_codegen.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/inductive_codegen.ML Wed Mar 04 10:45:52 2009 +0100
@@ -71,7 +71,7 @@
{intros = intros |>
Symtab.update (s, (AList.update Thm.eq_thm_prop
(thm, (thyname_of s, nparms)) rules)),
- graph = foldr (uncurry (Graph.add_edge o pair s))
+ graph = List.foldr (uncurry (Graph.add_edge o pair s))
(Library.foldl add_node (graph, s :: cs)) cs,
eqns = eqns} thy
end
@@ -152,7 +152,7 @@
fun cprod ([], ys) = []
| cprod (x :: xs, ys) = map (pair x) ys @ cprod (xs, ys);
-fun cprods xss = foldr (map op :: o cprod) [[]] xss;
+fun cprods xss = List.foldr (map op :: o cprod) [[]] xss;
datatype mode = Mode of (int list option list * int list) * int list * mode option list;
@@ -357,7 +357,7 @@
val (in_ts, out_ts) = get_args is 1 ts;
val ((all_vs', eqs), in_ts') =
- foldl_map check_constrt ((all_vs, []), in_ts);
+ Library.foldl_map check_constrt ((all_vs, []), in_ts);
fun compile_prems out_ts' vs names [] gr =
let
@@ -365,8 +365,8 @@
(invoke_codegen thy defs dep module false) out_ts gr;
val (eq_ps, gr3) = fold_map compile_eq eqs gr2;
val ((names', eqs'), out_ts'') =
- foldl_map check_constrt ((names, []), out_ts');
- val (nvs, out_ts''') = foldl_map distinct_v
+ Library.foldl_map check_constrt ((names, []), out_ts');
+ val (nvs, out_ts''') = Library.foldl_map distinct_v
((names', map (fn x => (x, [x])) vs), out_ts'');
val (out_ps', gr4) = fold_map
(invoke_codegen thy defs dep module false) (out_ts''') gr3;
@@ -383,8 +383,8 @@
select_mode_prem thy modes' vs' ps;
val ps' = filter_out (equal p) ps;
val ((names', eqs), out_ts') =
- foldl_map check_constrt ((names, []), out_ts);
- val (nvs, out_ts'') = foldl_map distinct_v
+ Library.foldl_map check_constrt ((names, []), out_ts);
+ val (nvs, out_ts'') = Library.foldl_map distinct_v
((names', map (fn x => (x, [x])) vs), out_ts');
val (out_ps, gr0) = fold_map
(invoke_codegen thy defs dep module false) out_ts'' gr;
--- a/src/HOL/Tools/inductive_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/inductive_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -260,7 +260,7 @@
fun check_rule ctxt cs params ((binding, att), rule) =
let
- val err_name = Binding.display binding;
+ val err_name = Binding.str_of binding;
val params' = Term.variant_frees rule (Logic.strip_params rule);
val frees = rev (map Free params');
val concl = subst_bounds (frees, Logic.strip_assums_concl rule);
@@ -517,7 +517,7 @@
(HOLogic.dest_Trueprop (Logic.strip_assums_concl r))
in list_all_free (Logic.strip_params r,
- Logic.list_implies (map HOLogic.mk_Trueprop (foldr mk_prem
+ Logic.list_implies (map HOLogic.mk_Trueprop (List.foldr mk_prem
[] (map HOLogic.dest_Trueprop (Logic.strip_assums_hyp r))),
HOLogic.mk_Trueprop (list_comb (List.nth (preds, i), ys))))
end;
@@ -541,7 +541,7 @@
(* make predicate for instantiation of abstract induction rule *)
val ind_pred = fold_rev lambda (bs @ xs) (foldr1 HOLogic.mk_conj
- (map_index (fn (i, P) => foldr HOLogic.mk_imp
+ (map_index (fn (i, P) => List.foldr HOLogic.mk_imp
(list_comb (P, make_args' argTs xs (binder_types (fastype_of P))))
(make_bool_args HOLogic.mk_not I bs i)) preds));
@@ -624,7 +624,7 @@
map HOLogic.mk_eq (make_args' argTs xs Ts ~~ ts) @
map (subst o HOLogic.dest_Trueprop)
(Logic.strip_assums_hyp r)
- in foldr (fn ((x, T), P) => HOLogic.exists_const T $ (Abs (x, T, P)))
+ in List.foldr (fn ((x, T), P) => HOLogic.exists_const T $ (Abs (x, T, P)))
(if null ps then HOLogic.true_const else foldr1 HOLogic.mk_conj ps)
(Logic.strip_params r)
end
@@ -639,7 +639,7 @@
val rec_name =
if Binding.is_empty alt_name then
- Binding.name (space_implode "_" (map (Binding.base_name o fst) cnames_syn))
+ Binding.name (space_implode "_" (map (Binding.name_of o fst) cnames_syn))
else alt_name;
val ((rec_const, (_, fp_def)), ctxt') = ctxt |>
@@ -674,9 +674,9 @@
fun declare_rules kind rec_binding coind no_ind cnames intrs intr_bindings intr_atts
elims raw_induct ctxt =
let
- val rec_name = Binding.base_name rec_binding;
- val rec_qualified = Binding.qualify rec_name;
- val intr_names = map Binding.base_name intr_bindings;
+ val rec_name = Binding.name_of rec_binding;
+ val rec_qualified = Binding.qualify false rec_name;
+ val intr_names = map Binding.name_of intr_bindings;
val ind_case_names = RuleCases.case_names intr_names;
val induct =
if coind then
@@ -734,11 +734,11 @@
cs intros monos params cnames_syn ctxt =
let
val _ = null cnames_syn andalso error "No inductive predicates given";
- val names = map (Binding.base_name o fst) cnames_syn;
+ val names = map (Binding.name_of o fst) cnames_syn;
val _ = message (quiet_mode andalso not verbose)
("Proofs for " ^ coind_prefix coind ^ "inductive predicate(s) " ^ commas_quote names);
- val cnames = map (Sign.full_name (ProofContext.theory_of ctxt) o #1) cnames_syn; (* FIXME *)
+ val cnames = map (LocalTheory.full_name ctxt o #1) cnames_syn; (* FIXME *)
val ((intr_names, intr_atts), intr_ts) =
apfst split_list (split_list (map (check_rule ctxt cs params) intros));
@@ -749,7 +749,7 @@
val (intrs, unfold) = prove_intrs quiet_mode coind mono fp_def (length bs + length xs)
params intr_ts rec_preds_defs ctxt1;
val elims = if no_elim then [] else
- prove_elims quiet_mode cs params intr_ts (map Binding.base_name intr_names)
+ prove_elims quiet_mode cs params intr_ts (map Binding.name_of intr_names)
unfold rec_preds_defs ctxt1;
val raw_induct = zero_var_indexes
(if no_ind then Drule.asm_rl else
@@ -793,7 +793,7 @@
(* abbrevs *)
- val (_, ctxt1) = Variable.add_fixes (map (Binding.base_name o fst o fst) cnames_syn) lthy;
+ val (_, ctxt1) = Variable.add_fixes (map (Binding.name_of o fst o fst) cnames_syn) lthy;
fun get_abbrev ((name, atts), t) =
if can (Logic.strip_assums_concl #> Logic.dest_equals) t then
@@ -802,7 +802,7 @@
error "Abbreviations may not have names or attributes";
val ((x, T), rhs) = LocalDefs.abs_def (snd (LocalDefs.cert_def ctxt1 t));
val var =
- (case find_first (fn ((c, _), _) => Binding.base_name c = x) cnames_syn of
+ (case find_first (fn ((c, _), _) => Binding.name_of c = x) cnames_syn of
NONE => error ("Undeclared head of abbreviation " ^ quote x)
| SOME ((b, T'), mx) =>
if T <> T' then error ("Bad type specification for abbreviation " ^ quote x)
@@ -811,17 +811,17 @@
else NONE;
val abbrevs = map_filter get_abbrev spec;
- val bs = map (Binding.base_name o fst o fst) abbrevs;
+ val bs = map (Binding.name_of o fst o fst) abbrevs;
(* predicates *)
val pre_intros = filter_out (is_some o get_abbrev) spec;
- val cnames_syn' = filter_out (member (op =) bs o Binding.base_name o fst o fst) cnames_syn;
- val cs = map (Free o apfst Binding.base_name o fst) cnames_syn';
+ val cnames_syn' = filter_out (member (op =) bs o Binding.name_of o fst o fst) cnames_syn;
+ val cs = map (Free o apfst Binding.name_of o fst) cnames_syn';
val ps = map Free pnames;
- val (_, ctxt2) = lthy |> Variable.add_fixes (map (Binding.base_name o fst o fst) cnames_syn');
+ val (_, ctxt2) = lthy |> Variable.add_fixes (map (Binding.name_of o fst o fst) cnames_syn');
val _ = map (fn abbr => LocalDefs.fixed_abbrev abbr ctxt2) abbrevs;
val ctxt3 = ctxt2 |> fold (snd oo LocalDefs.fixed_abbrev) abbrevs;
val expand = Assumption.export_term ctxt3 lthy #> ProofContext.cert_term lthy;
@@ -854,7 +854,7 @@
in
lthy
|> LocalTheory.set_group (serial_string ())
- |> gen_add_inductive_i mk_def flags cs (map (apfst Binding.base_name o fst) ps) intrs monos
+ |> gen_add_inductive_i mk_def flags cs (map (apfst Binding.name_of o fst) ps) intrs monos
end;
val add_inductive_i = gen_add_inductive_i add_ind_def;
@@ -954,7 +954,7 @@
else if Binding.is_empty b then ((a, atts), B)
else error "Illegal nested case names"
| ((b, _), _) => error "Illegal simultaneous specification")
- | (a, _) => error ("Illegal local specification parameters for " ^ quote (Binding.base_name a)));
+ | (a, _) => error ("Illegal local specification parameters for " ^ quote (Binding.str_of a)));
fun gen_ind_decl mk_def coind =
P.fixes -- P.for_fixes --
--- a/src/HOL/Tools/inductive_realizer.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/inductive_realizer.ML Wed Mar 04 10:45:52 2009 +0100
@@ -55,7 +55,7 @@
(subst_bound (Free (name, T), P), subst_bound (Free (name, T), Q))
| strip_one _ (Const ("==>", _) $ P $ Q) = (P, Q);
-fun relevant_vars prop = foldr (fn
+fun relevant_vars prop = List.foldr (fn
(Var ((a, i), T), vs) => (case strip_type T of
(_, Type (s, _)) => if s mem ["bool"] then (a, T) :: vs else vs
| _ => vs)
@@ -264,7 +264,7 @@
val rlz'' = fold_rev Logic.all vs2 rlz
in (name, (vs,
if rt = Extraction.nullt then rt else
- foldr (uncurry lambda) rt vs1,
+ List.foldr (uncurry lambda) rt vs1,
ProofRewriteRules.un_hhf_proof rlz' rlz''
(fold_rev forall_intr_prf (vs2 @ rs) (prf_of rrule))))
end;
@@ -315,7 +315,7 @@
fun get f = (these oo Option.map) f;
val rec_names = distinct (op =) (map (fst o dest_Const o head_of o fst o
HOLogic.dest_eq o HOLogic.dest_Trueprop o prop_of) (get #rec_thms dt_info));
- val (_, constrss) = foldl_map (fn ((recs, dummies), (s, rs)) =>
+ val (_, constrss) = Library.foldl_map (fn ((recs, dummies), (s, rs)) =>
if s mem rsets then
let
val (d :: dummies') = dummies;
--- a/src/HOL/Tools/inductive_set_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/inductive_set_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -464,7 +464,7 @@
| NONE => u)) |>
Pattern.rewrite_term thy [] [to_pred_proc thy eqns'] |>
eta_contract (member op = cs' orf is_pred pred_arities))) intros;
- val cnames_syn' = map (fn (b, _) => (Binding.map_base (suffix "p") b, NoSyn)) cnames_syn;
+ val cnames_syn' = map (fn (b, _) => (Binding.map_name (suffix "p") b, NoSyn)) cnames_syn;
val monos' = map (to_pred [] (Context.Proof ctxt)) monos;
val ({preds, intrs, elims, raw_induct, ...}, ctxt1) =
InductivePackage.add_ind_def
@@ -501,9 +501,9 @@
(* convert theorems to set notation *)
val rec_name =
if Binding.is_empty alt_name then
- Binding.name (space_implode "_" (map (Binding.base_name o fst) cnames_syn))
+ Binding.name (space_implode "_" (map (Binding.name_of o fst) cnames_syn))
else alt_name;
- val cnames = map (Sign.full_name (ProofContext.theory_of ctxt3) o #1) cnames_syn; (* FIXME *)
+ val cnames = map (LocalTheory.full_name ctxt3 o #1) cnames_syn; (* FIXME *)
val (intr_names, intr_atts) = split_list (map fst intros);
val raw_induct' = to_set [] (Context.Proof ctxt3) raw_induct;
val (intrs', elims', induct, ctxt4) =
--- a/src/HOL/Tools/int_factor_simprocs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/int_factor_simprocs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -216,7 +216,7 @@
(** Final simplification for the CancelFactor simprocs **)
val simplify_one = Int_Numeral_Simprocs.simplify_meta_eq
- [@{thm mult_1_left}, @{thm mult_1_right}, @{thm zdiv_1}, @{thm numeral_1_eq_1}];
+ [@{thm mult_1_left}, @{thm mult_1_right}, @{thm div_by_1}, @{thm numeral_1_eq_1}];
fun cancel_simplify_meta_eq cancel_th ss th =
simplify_one ss (([th, cancel_th]) MRS trans);
@@ -263,8 +263,8 @@
(open CancelFactorCommon
val prove_conv = Int_Numeral_Base_Simprocs.prove_conv
val mk_bal = HOLogic.mk_binrel @{const_name Ring_and_Field.dvd}
- val dest_bal = HOLogic.dest_bin @{const_name Ring_and_Field.dvd} HOLogic.intT
- val simplify_meta_eq = cancel_simplify_meta_eq @{thm zdvd_zmult_cancel_disj}
+ val dest_bal = HOLogic.dest_bin @{const_name Ring_and_Field.dvd} Term.dummyT
+ val simplify_meta_eq = cancel_simplify_meta_eq @{thm dvd_mult_cancel_left}
);
(*Version for all fields, including unordered ones (type complex).*)
@@ -288,8 +288,8 @@
("int_mod_cancel_factor",
["((l::int) * m) mod n", "(l::int) mod (m * n)"],
K IntModCancelFactor.proc),
- ("int_dvd_cancel_factor",
- ["((l::int) * m) dvd n", "(l::int) dvd (m * n)"],
+ ("dvd_cancel_factor",
+ ["((l::'a::idom) * m) dvd n", "(l::'a::idom) dvd (m * n)"],
K IntDvdCancelFactor.proc),
("divide_cancel_factor",
["((l::'a::{division_by_zero,field}) * m) / n",
--- a/src/HOL/Tools/lin_arith.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/lin_arith.ML Wed Mar 04 10:45:52 2009 +0100
@@ -672,7 +672,7 @@
let
fun filter_prems (t, (left, right)) =
if p t then (left, right @ [t]) else (left @ right, [])
- val (left, right) = foldl filter_prems ([], []) terms
+ val (left, right) = List.foldl filter_prems ([], []) terms
in
right @ left
end;
--- a/src/HOL/Tools/meson.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/meson.ML Wed Mar 04 10:45:52 2009 +0100
@@ -92,7 +92,7 @@
| pairs =>
let val thy = theory_of_thm th
val (tyenv,tenv) =
- foldl (uncurry (Pattern.first_order_match thy)) (tyenv0,tenv0) pairs
+ List.foldl (uncurry (Pattern.first_order_match thy)) (tyenv0,tenv0) pairs
val t_pairs = map term_pair_of (Vartab.dest tenv)
val th' = Thm.instantiate ([], map (pairself (cterm_of thy)) t_pairs) th
in th' end
@@ -428,7 +428,7 @@
fun name_thms label =
let fun name1 (th, (k,ths)) =
(k-1, Thm.put_name_hint (label ^ string_of_int k) th :: ths)
- in fn ths => #2 (foldr name1 (length ths, []) ths) end;
+ in fn ths => #2 (List.foldr name1 (length ths, []) ths) end;
(*Is the given disjunction an all-negative support clause?*)
fun is_negative th = forall (not o #1) (literals (prop_of th));
@@ -477,7 +477,7 @@
(*Sums the sizes of the subgoals, ignoring hypotheses (ancestors)*)
fun addconcl(prem,sz) = size_of_term(Logic.strip_assums_concl prem) + sz
-fun size_of_subgoals st = foldr addconcl 0 (prems_of st);
+fun size_of_subgoals st = List.foldr addconcl 0 (prems_of st);
(*Negation Normal Form*)
@@ -544,12 +544,12 @@
(*Make clauses from a list of theorems, previously Skolemized and put into nnf.
The resulting clauses are HOL disjunctions.*)
-fun make_clauses ths = sort_clauses (foldr add_clauses [] ths);
+fun make_clauses ths = sort_clauses (List.foldr add_clauses [] ths);
(*Convert a list of clauses (disjunctions) to Horn clauses (contrapositives)*)
fun make_horns ths =
name_thms "Horn#"
- (distinct Thm.eq_thm_prop (foldr (add_contras clause_rules) [] ths));
+ (distinct Thm.eq_thm_prop (List.foldr (add_contras clause_rules) [] ths));
(*Could simply use nprems_of, which would count remaining subgoals -- no
discrimination as to their size! With BEST_FIRST, fails for problem 41.*)
--- a/src/HOL/Tools/metis_tools.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/metis_tools.ML Wed Mar 04 10:45:52 2009 +0100
@@ -543,9 +543,9 @@
val all_thms_FO = forall (Meson.is_fol_term thy o prop_of)
val isFO = (mode = ResAtp.Fol) orelse
(mode <> ResAtp.Hol andalso all_thms_FO (cls @ ths))
- val lmap0 = foldl (add_thm true ctxt)
+ val lmap0 = List.foldl (add_thm true ctxt)
{isFO = isFO, axioms = [], tfrees = init_tfrees ctxt} cls
- val lmap = foldl (add_thm false ctxt) (add_tfrees lmap0) ths
+ val lmap = List.foldl (add_thm false ctxt) (add_tfrees lmap0) ths
val clause_lists = map (Metis.Thm.clause o #1) (#axioms lmap)
fun used c = exists (Metis.LiteralSet.exists (const_in_metis c)) clause_lists
(*Now check for the existence of certain combinators*)
@@ -556,7 +556,7 @@
val thS = if used "c_COMBS" then [comb_S] else []
val thEQ = if used "c_fequal" then [fequal_imp_equal', equal_imp_fequal'] else []
val lmap' = if isFO then lmap
- else foldl (add_thm false ctxt) lmap (thEQ @ thS @ thC @ thB @ thK @ thI)
+ else List.foldl (add_thm false ctxt) lmap (thEQ @ thS @ thC @ thB @ thK @ thI)
in
add_type_thm (type_ext thy (map prop_of (cls @ ths))) lmap'
end;
--- a/src/HOL/Tools/old_primrec_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/old_primrec_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -37,8 +37,8 @@
fun varify (t, (i, ts)) =
let val t' = map_types (Logic.incr_tvar (i + 1)) (snd (Type.varify [] t))
in (maxidx_of_term t', t'::ts) end;
- val (i, cs') = foldr varify (~1, []) cs;
- val (i', intr_ts') = foldr varify (i, []) intr_ts;
+ val (i, cs') = List.foldr varify (~1, []) cs;
+ val (i', intr_ts') = List.foldr varify (i, []) intr_ts;
val rec_consts = fold Term.add_consts cs' [];
val intr_consts = fold Term.add_consts intr_ts' [];
fun unify (cname, cT) =
--- a/src/HOL/Tools/primrec_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/primrec_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -194,7 +194,7 @@
val def_name = Thm.def_name (Sign.base_name fname);
val rhs = singleton (Syntax.check_terms ctxt) raw_rhs;
val SOME var = get_first (fn ((b, _), mx) =>
- if Binding.base_name b = fname then SOME (b, mx) else NONE) fixes;
+ if Binding.name_of b = fname then SOME (b, mx) else NONE) fixes;
in (var, ((Binding.name def_name, []), rhs)) end;
@@ -231,7 +231,7 @@
let
val (fixes, spec) = prepare_spec prep_spec lthy raw_fixes raw_spec;
val eqns = fold_rev (process_eqn (fn v => Variable.is_fixed lthy v
- orelse exists (fn ((w, _), _) => v = Binding.base_name w) fixes) o snd) spec [];
+ orelse exists (fn ((w, _), _) => v = Binding.name_of w) fixes) o snd) spec [];
val tnames = distinct (op =) (map (#1 o snd) eqns);
val dts = find_dts (DatatypePackage.get_datatypes (ProofContext.theory_of lthy)) tnames tnames;
val main_fns = map (fn (tname, {index, ...}) =>
@@ -248,7 +248,7 @@
else primrec_error ("functions " ^ commas_quote names2 ^
"\nare not mutually recursive");
val prefix = space_implode "_" (map (Sign.base_name o #1) defs);
- val qualify = Binding.qualify prefix;
+ val qualify = Binding.qualify false prefix;
val spec' = (map o apfst)
(fn (b, attrs) => (qualify b, Code.add_default_eqn_attrib :: attrs)) spec;
val simp_atts = map (Attrib.internal o K)
@@ -299,7 +299,7 @@
P.name >> pair false) --| P.$$$ ")")) (false, "");
val old_primrec_decl =
- opt_unchecked_name -- Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.base_name) -- P.prop);
+ opt_unchecked_name -- Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.name_of) -- P.prop);
fun pipe_error t = P.!!! (Scan.fail_with (K
(cat_lines ["Equations must be separated by " ^ quote "|", quote t])));
--- a/src/HOL/Tools/recdef_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/recdef_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -320,7 +320,7 @@
val _ =
OuterSyntax.local_theory_to_proof' "recdef_tc" "recommence proof of termination condition (TFL)"
K.thy_goal
- ((SpecParse.opt_thm_name ":" >> apfst Binding.base_name) -- P.xname --
+ ((SpecParse.opt_thm_name ":" >> apfst Binding.name_of) -- P.xname --
Scan.option (P.$$$ "(" |-- P.nat --| P.$$$ ")")
>> (fn ((thm_name, name), i) => recdef_tc thm_name name i));
--- a/src/HOL/Tools/recfun_codegen.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/recfun_codegen.ML Wed Mar 04 10:45:52 2009 +0100
@@ -143,7 +143,7 @@
val eqs'' = map (dest_eq o prop_of) (List.concat (map fst thmss));
val (fundef', gr3) = mk_fundef module' "" true eqs''
(add_edge (dname, dep)
- (foldr (uncurry new_node) (del_nodes xs gr2)
+ (List.foldr (uncurry new_node) (del_nodes xs gr2)
(map (fn k =>
(k, (SOME (EQN ("", dummyT, dname)), module', ""))) xs)))
in (module', put_code module' fundef' gr3) end
--- a/src/HOL/Tools/record_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/record_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1778,7 +1778,7 @@
val names = map fst fields;
val extN = full bname;
val types = map snd fields;
- val alphas_fields = foldr OldTerm.add_typ_tfree_names [] types;
+ val alphas_fields = List.foldr OldTerm.add_typ_tfree_names [] types;
val alphas_ext = alphas inter alphas_fields;
val len = length fields;
val variants = Name.variant_list (moreN::rN::rN ^ "'"::wN::parent_variants) (map fst bfields);
@@ -1835,7 +1835,7 @@
let val (args',more) = chop_last args;
fun mk_ext' (((name,T),args),more) = mk_ext (name,T) (args@[more]);
fun build Ts =
- foldr mk_ext' more (prune n (extension_names ~~ Ts ~~ (chunks parent_chunks args')))
+ List.foldr mk_ext' more (prune n (extension_names ~~ Ts ~~ (chunks parent_chunks args')))
in
if more = HOLogic.unit
then build (map recT (0 upto parent_len))
@@ -2003,13 +2003,13 @@
end;
val split_object_prop =
- let fun ALL vs t = foldr (fn ((v,T),t) => HOLogic.mk_all (v,T,t)) t vs
+ let fun ALL vs t = List.foldr (fn ((v,T),t) => HOLogic.mk_all (v,T,t)) t vs
in (ALL [dest_Free r0] (P $ r0)) === (ALL (map dest_Free all_vars_more) (P $ r_rec0))
end;
val split_ex_prop =
- let fun EX vs t = foldr (fn ((v,T),t) => HOLogic.mk_exists (v,T,t)) t vs
+ let fun EX vs t = List.foldr (fn ((v,T),t) => HOLogic.mk_exists (v,T,t)) t vs
in (EX [dest_Free r0] (P $ r0)) === (EX (map dest_Free all_vars_more) (P $ r_rec0))
end;
@@ -2228,7 +2228,7 @@
val init_env =
(case parent of
NONE => []
- | SOME (types, _) => foldr OldTerm.add_typ_tfrees [] types);
+ | SOME (types, _) => List.foldr OldTerm.add_typ_tfrees [] types);
(* fields *)
--- a/src/HOL/Tools/refute.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/refute.ML Wed Mar 04 10:45:52 2009 +0100
@@ -63,7 +63,6 @@
val close_form : Term.term -> Term.term
val get_classdef : theory -> string -> (string * Term.term) option
- val norm_rhs : Term.term -> Term.term
val get_def : theory -> string * Term.typ -> (string * Term.term) option
val get_typedef : theory -> Term.typ -> (string * Term.term) option
val is_IDT_constructor : theory -> string * Term.typ -> bool
@@ -549,21 +548,6 @@
end;
(* ------------------------------------------------------------------------- *)
-(* norm_rhs: maps f ?t1 ... ?tn == rhs to %t1...tn. rhs *)
-(* ------------------------------------------------------------------------- *)
-
- (* Term.term -> Term.term *)
- fun norm_rhs eqn =
- let
- fun lambda (v as Var ((x, _), T)) t = Abs (x, T, abstract_over (v, t))
- | lambda v t = raise TERM ("lambda", [v, t])
- val (lhs, rhs) = Logic.dest_equals eqn
- val (_, args) = Term.strip_comb lhs
- in
- fold lambda (rev args) rhs
- end
-
-(* ------------------------------------------------------------------------- *)
(* get_def: looks up the definition of a constant, as created by "constdefs" *)
(* ------------------------------------------------------------------------- *)
@@ -571,6 +555,16 @@
fun get_def thy (s, T) =
let
+ (* maps f ?t1 ... ?tn == rhs to %t1...tn. rhs *)
+ fun norm_rhs eqn =
+ let
+ fun lambda (v as Var ((x, _), T)) t = Abs (x, T, abstract_over (v, t))
+ | lambda v t = raise TERM ("lambda", [v, t])
+ val (lhs, rhs) = Logic.dest_equals eqn
+ val (_, args) = Term.strip_comb lhs
+ in
+ fold lambda (rev args) rhs
+ end
(* (string * Term.term) list -> (string * Term.term) option *)
fun get_def_ax [] = NONE
| get_def_ax ((axname, ax) :: axioms) =
@@ -991,7 +985,7 @@
DatatypeAux.DtTFree _ =>
collect_types dT acc
| DatatypeAux.DtType (_, ds) =>
- collect_types dT (foldr collect_dtyp acc ds)
+ collect_types dT (List.foldr collect_dtyp acc ds)
| DatatypeAux.DtRec i =>
if dT mem acc then
acc (* prevent infinite recursion *)
@@ -1005,9 +999,9 @@
insert (op =) dT acc
else acc
(* collect argument types *)
- val acc_dtyps = foldr collect_dtyp acc_dT dtyps
+ val acc_dtyps = List.foldr collect_dtyp acc_dT dtyps
(* collect constructor types *)
- val acc_dconstrs = foldr collect_dtyp acc_dtyps
+ val acc_dconstrs = List.foldr collect_dtyp acc_dtyps
(List.concat (map snd dconstrs))
in
acc_dconstrs
@@ -1108,7 +1102,7 @@
case next (maxsize-minsize) 0 0 diffs of
SOME diffs' =>
(* merge with those types for which the size is fixed *)
- SOME (snd (foldl_map (fn (ds, (T, _)) =>
+ SOME (snd (Library.foldl_map (fn (ds, (T, _)) =>
case AList.lookup (op =) sizes (string_of_typ T) of
(* return the fixed size *)
SOME n => (ds, (T, n))
@@ -1202,7 +1196,7 @@
val _ = Output.immediate_output ("Translating term (sizes: "
^ commas (map (fn (_, n) => string_of_int n) universe) ^ ") ...")
(* translate 'u' and all axioms *)
- val ((model, args), intrs) = foldl_map (fn ((m, a), t') =>
+ val ((model, args), intrs) = Library.foldl_map (fn ((m, a), t') =>
let
val (i, m', a') = interpret thy m a t'
in
@@ -1618,7 +1612,7 @@
val Ts = Term.binder_types (Term.fastype_of t)
val t' = Term.incr_boundvars i t
in
- foldr (fn (T, term) => Abs ("<eta_expand>", T, term))
+ List.foldr (fn (T, term) => Abs ("<eta_expand>", T, term))
(Term.list_comb (t', map Bound (i-1 downto 0))) (List.take (Ts, i))
end;
@@ -1628,7 +1622,7 @@
(* int list -> int *)
- fun sum xs = foldl op+ 0 xs;
+ fun sum xs = List.foldl op+ 0 xs;
(* ------------------------------------------------------------------------- *)
(* product: returns the product of a list 'xs' of integers *)
@@ -1636,7 +1630,7 @@
(* int list -> int *)
- fun product xs = foldl op* 1 xs;
+ fun product xs = List.foldl op* 1 xs;
(* ------------------------------------------------------------------------- *)
(* size_of_dtyp: the size of (an initial fragment of) an inductive data type *)
@@ -1756,7 +1750,7 @@
(* create all constants of type 'T' *)
val constants = make_constants thy model T
(* interpret the 'body' separately for each constant *)
- val ((model', args'), bodies) = foldl_map
+ val ((model', args'), bodies) = Library.foldl_map
(fn ((m, a), c) =>
let
(* add 'c' to 'bounds' *)
@@ -2100,7 +2094,7 @@
Const (@{const_name insert}, HOLogic_prodT --> HOLogic_setT --> HOLogic_setT)
in
(* functions as graphs, i.e. as a (HOL) set of pairs "(x, y)" *)
- map (foldr (fn (pair, acc) => HOLogic_insert $ pair $ acc)
+ map (List.foldr (fn (pair, acc) => HOLogic_insert $ pair $ acc)
HOLogic_empty_set) pairss
end
| Type (s, Ts) =>
@@ -2292,7 +2286,7 @@
| search [] _ = ()
in search terms' terms end
(* int * interpretation list *)
- val (new_offset, intrs) = foldl_map (fn (off, t_elem) =>
+ val (new_offset, intrs) = Library.foldl_map (fn (off, t_elem) =>
(* if 't_elem' existed at the previous depth, *)
(* proceed recursively, otherwise map the entire *)
(* subtree to "undefined" *)
@@ -2358,7 +2352,7 @@
else (* mconstrs_count = length params *)
let
(* interpret each parameter separately *)
- val ((model', args'), p_intrs) = foldl_map (fn ((m, a), p) =>
+ val ((model', args'), p_intrs) = Library.foldl_map (fn ((m, a), p) =>
let
val (i, m', a') = interpret thy m a p
in
@@ -2470,7 +2464,7 @@
end) descr
(* associate constructors with corresponding parameters *)
(* (int * (interpretation * interpretation) list) list *)
- val (p_intrs', mc_p_intrs) = foldl_map
+ val (p_intrs', mc_p_intrs) = Library.foldl_map
(fn (p_intrs', (idx, c_intrs)) =>
let
val len = length c_intrs
@@ -2636,7 +2630,7 @@
(* interpretation list *)
val arg_intrs = map (uncurry rec_intr) rec_dtyps_intrs
(* apply 'intr' to all recursive arguments *)
- val result = foldl (fn (arg_i, i) =>
+ val result = List.foldl (fn (arg_i, i) =>
interpretation_apply (i, arg_i)) intr arg_intrs
(* update 'REC_OPERATORS' *)
val _ = Array.update (arr, elem, (true, result))
@@ -2916,7 +2910,7 @@
(* of width 'size_elem' and depth 'length_list' (with 'size_list' *)
(* nodes total) *)
(* (int * (int * int)) list *)
- val (_, lenoff_lists) = foldl_map (fn ((offsets, off), elem) =>
+ val (_, lenoff_lists) = Library.foldl_map (fn ((offsets, off), elem) =>
(* corresponds to a pre-order traversal of the tree *)
let
val len = length offsets
@@ -3010,7 +3004,7 @@
"intersection: interpretation for set is not a node")
(* interpretation -> interpretaion *)
fun lfp (Node resultsets) =
- foldl (fn ((set, resultset), acc) =>
+ List.foldl (fn ((set, resultset), acc) =>
if is_subset (resultset, set) then
intersection (acc, set)
else
@@ -3061,7 +3055,7 @@
"union: interpretation for set is not a node")
(* interpretation -> interpretaion *)
fun gfp (Node resultsets) =
- foldl (fn ((set, resultset), acc) =>
+ List.foldl (fn ((set, resultset), acc) =>
if is_subset (set, resultset) then
union (acc, set)
else
@@ -3164,7 +3158,7 @@
val HOLogic_insert =
Const (@{const_name insert}, HOLogic_prodT --> HOLogic_setT --> HOLogic_setT)
in
- SOME (foldr (fn (pair, acc) => HOLogic_insert $ pair $ acc)
+ SOME (List.foldr (fn (pair, acc) => HOLogic_insert $ pair $ acc)
HOLogic_empty_set pairs)
end
| Type ("prop", []) =>
@@ -3299,8 +3293,6 @@
(* subterms that are then passed to other interpreters! *)
(* ------------------------------------------------------------------------- *)
- (* (theory -> theory) list *)
-
val setup =
add_interpreter "stlc" stlc_interpreter #>
add_interpreter "Pure" Pure_interpreter #>
--- a/src/HOL/Tools/res_atp.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/res_atp.ML Wed Mar 04 10:45:52 2009 +0100
@@ -6,10 +6,7 @@
val tvar_classes_of_terms : term list -> string list
val tfree_classes_of_terms : term list -> string list
val type_consts_of_terms : theory -> term list -> string list
- val write_problem_files : (theory -> bool -> Thm.thm list -> string ->
- (thm * (ResHolClause.axiom_name * ResHolClause.clause_id)) list * ResClause.classrelClause list *
- ResClause.arityClause list -> string list -> ResHolClause.axiom_name list)
- -> int -> bool
+ val write_problem_files : bool -> int -> bool
-> (int -> Path.T) -> Proof.context * thm list * thm
-> string list * ResHolClause.axiom_name Vector.vector list
end;
@@ -118,9 +115,9 @@
fun add_standard_const (s,tab) = Symtab.update (s,[]) tab;
val null_const_tab : const_typ list list Symtab.table =
- foldl add_standard_const Symtab.empty standard_consts;
+ List.foldl add_standard_const Symtab.empty standard_consts;
-fun get_goal_consts_typs thy = foldl (add_term_consts_typs_rm thy) null_const_tab;
+fun get_goal_consts_typs thy = List.foldl (add_term_consts_typs_rm thy) null_const_tab;
(*Inserts a dummy "constant" referring to the theory name, so that relevance
takes the given theory into account.*)
@@ -193,7 +190,7 @@
end;
(*Multiplies out to a list of pairs: 'a * 'b list -> ('a * 'b) list -> ('a * 'b) list*)
-fun add_expand_pairs (x,ys) xys = foldl (fn (y,acc) => (x,y)::acc) xys ys;
+fun add_expand_pairs (x,ys) xys = List.foldl (fn (y,acc) => (x,y)::acc) xys ys;
fun consts_typs_of_term thy t =
let val tab = add_term_consts_typs_rm thy (t, null_const_tab)
@@ -253,7 +250,7 @@
| relevant (newpairs,rejects) [] =
let val (newrels,more_rejects) = take_best max_new newpairs
val new_consts = List.concat (map #2 newrels)
- val rel_consts' = foldl add_const_typ_table rel_consts new_consts
+ val rel_consts' = List.foldl add_const_typ_table rel_consts new_consts
val newp = p + (1.0-p) / convergence
in
Output.debug (fn () => ("relevant this iteration: " ^ Int.toString (length newrels)));
@@ -379,7 +376,7 @@
fun add_single_names ((a, []), pairs) = pairs
| add_single_names ((a, [th]), pairs) = (a,th)::pairs
- | add_single_names ((a, ths), pairs) = #2 (foldl (multi_name a) (1,pairs) ths);
+ | add_single_names ((a, ths), pairs) = #2 (List.foldl (multi_name a) (1,pairs) ths);
(*Ignore blacklisted basenames*)
fun add_multi_names ((a, ths), pairs) =
@@ -396,7 +393,7 @@
in
app (fn th => ignore (Polyhash.peekInsert ht (th,()))) (ResBlacklist.get ctxt);
filter (not o blacklisted o #2)
- (foldl add_single_names (foldl add_multi_names [] mults) singles)
+ (List.foldl add_single_names (List.foldl add_multi_names [] mults) singles)
end;
fun check_named ("",th) = (warning ("No name for theorem " ^ Display.string_of_thm th); false)
@@ -434,18 +431,18 @@
(* Type Classes Present in the Axiom or Conjecture Clauses *)
(***************************************************************)
-fun add_classes (sorts, cset) = foldl setinsert cset (List.concat sorts);
+fun add_classes (sorts, cset) = List.foldl setinsert cset (List.concat sorts);
(*Remove this trivial type class*)
fun delete_type cset = Symtab.delete_safe "HOL.type" cset;
fun tvar_classes_of_terms ts =
let val sorts_list = map (map #2 o OldTerm.term_tvars) ts
- in Symtab.keys (delete_type (foldl add_classes Symtab.empty sorts_list)) end;
+ in Symtab.keys (delete_type (List.foldl add_classes Symtab.empty sorts_list)) end;
fun tfree_classes_of_terms ts =
let val sorts_list = map (map #2 o OldTerm.term_tfrees) ts
- in Symtab.keys (delete_type (foldl add_classes Symtab.empty sorts_list)) end;
+ in Symtab.keys (delete_type (List.foldl add_classes Symtab.empty sorts_list)) end;
(*fold type constructors*)
fun fold_type_consts f (Type (a, Ts)) x = fold (fold_type_consts f) Ts (f (a,x))
@@ -524,11 +521,10 @@
(* TODO: problem file for *one* subgoal would be sufficient *)
(*Write out problem files for each subgoal.
Argument probfile generates filenames from subgoal-number
- Argument writer is either a tptp_write_file or dfg_write_file from ResHolClause
Arguments max_new and theory_const are booleans controlling relevance_filter
(necessary for different provers)
- *)
-fun write_problem_files writer max_new theory_const probfile (ctxt, chain_ths, th) =
+*)
+fun write_problem_files dfg max_new theory_const probfile (ctxt, chain_ths, th) =
let val goals = Thm.prems_of th
val thy = ProofContext.theory_of ctxt
fun get_neg_subgoals [] _ = []
@@ -548,6 +544,7 @@
val white_cls = ResAxioms.cnf_rules_pairs thy white_thms
(*clauses relevant to goal gl*)
val axcls_list = map (fn ngcls => white_cls @ relevance_filter max_new theory_const thy included_cls (map prop_of ngcls)) goal_cls
+ val writer = if dfg then ResHolClause.dfg_write_file else ResHolClause.tptp_write_file
fun write_all [] [] _ = []
| write_all (ccls::ccls_list) (axcls::axcls_list) k =
let val fname = File.platform_path (probfile k)
@@ -561,7 +558,7 @@
and supers = tvar_classes_of_terms axtms
and tycons = type_consts_of_terms thy (ccltms@axtms)
(*TFrees in conjecture clauses; TVars in axiom clauses*)
- val (supers',arity_clauses) = ResClause.make_arity_clauses thy tycons supers
+ val (supers',arity_clauses) = ResClause.make_arity_clauses_dfg dfg thy tycons supers
val classrel_clauses = ResClause.make_classrel_clauses thy subs supers'
val clnames = writer thy isFO ccls fname (axcls,classrel_clauses,arity_clauses) []
val thm_names = Vector.fromList clnames
--- a/src/HOL/Tools/res_axioms.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/res_axioms.ML Wed Mar 04 10:45:52 2009 +0100
@@ -494,7 +494,7 @@
val remaining_hyps = filter_out (member (op aconv) (map Thm.term_of defs))
(map Thm.term_of hyps)
val fixed = OldTerm.term_frees (concl_of st) @
- foldl (gen_union (op aconv)) [] (map OldTerm.term_frees remaining_hyps)
+ List.foldl (gen_union (op aconv)) [] (map OldTerm.term_frees remaining_hyps)
in Seq.of_list [LocalDefs.expand (filter (is_okdef fixed o Thm.term_of) defs) st] end;
--- a/src/HOL/Tools/res_clause.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/res_clause.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Author: Jia Meng, Cambridge University Computer Laboratory
- ID: $Id$
Copyright 2004 University of Cambridge
Storing/printing FOL clauses and arity clauses.
@@ -27,9 +26,8 @@
val make_fixed_var : string -> string
val make_schematic_type_var : string * int -> string
val make_fixed_type_var : string -> string
- val dfg_format: bool ref
- val make_fixed_const : string -> string
- val make_fixed_type_const : string -> string
+ val make_fixed_const : bool -> string -> string
+ val make_fixed_type_const : bool -> string -> string
val make_type_class : string -> string
datatype kind = Axiom | Conjecture
type axiom_name = string
@@ -50,6 +48,7 @@
datatype classrelClause = ClassrelClause of
{axiom_name: axiom_name, subclass: class, superclass: class}
val make_classrel_clauses: theory -> class list -> class list -> classrelClause list
+ val make_arity_clauses_dfg: bool -> theory -> string list -> class list -> class list * arityClause list
val make_arity_clauses: theory -> string list -> class list -> class list * arityClause list
val add_type_sort_preds: typ * int Symtab.table -> int Symtab.table
val add_classrelClause_preds : classrelClause * int Symtab.table -> int Symtab.table
@@ -95,7 +94,7 @@
val tconst_prefix = "tc_";
val class_prefix = "class_";
-fun union_all xss = foldl (op union) [] xss;
+fun union_all xss = List.foldl (op union) [] xss;
(*Provide readable names for the more common symbolic functions*)
val const_trans_table =
@@ -197,28 +196,26 @@
fun make_fixed_type_var x = tfree_prefix ^ (ascii_of (trim_type_var x));
(*HACK because SPASS truncates identifiers to 63 characters :-(( *)
-val dfg_format = ref false;
-
(*32-bit hash,so we expect no collisions unless there are around 65536 long identifiers...*)
-fun controlled_length s =
- if size s > 60 andalso !dfg_format
+fun controlled_length dfg_format s =
+ if size s > 60 andalso dfg_format
then Word.toString (Polyhash.hashw_string(s,0w0))
else s;
-fun lookup_const c =
+fun lookup_const dfg c =
case Symtab.lookup const_trans_table c of
SOME c' => c'
- | NONE => controlled_length (ascii_of c);
+ | NONE => controlled_length dfg (ascii_of c);
-fun lookup_type_const c =
+fun lookup_type_const dfg c =
case Symtab.lookup type_const_trans_table c of
SOME c' => c'
- | NONE => controlled_length (ascii_of c);
+ | NONE => controlled_length dfg (ascii_of c);
-fun make_fixed_const "op =" = "equal" (*MUST BE "equal" because it's built-in to ATPs*)
- | make_fixed_const c = const_prefix ^ lookup_const c;
+fun make_fixed_const _ "op =" = "equal" (*MUST BE "equal" because it's built-in to ATPs*)
+ | make_fixed_const dfg c = const_prefix ^ lookup_const dfg c;
-fun make_fixed_type_const c = tconst_prefix ^ lookup_type_const c;
+fun make_fixed_type_const dfg c = tconst_prefix ^ lookup_type_const dfg c;
fun make_type_class clas = class_prefix ^ ascii_of clas;
@@ -251,13 +248,13 @@
(*Flatten a type to a fol_type while accumulating sort constraints on the TFrees and
TVars it contains.*)
-fun type_of (Type (a, Ts)) =
- let val (folTyps, ts) = types_of Ts
- val t = make_fixed_type_const a
+fun type_of dfg (Type (a, Ts)) =
+ let val (folTyps, ts) = types_of dfg Ts
+ val t = make_fixed_type_const dfg a
in (Comp(t,folTyps), ts) end
- | type_of T = (atomic_type T, [T])
-and types_of Ts =
- let val (folTyps,ts) = ListPair.unzip (map type_of Ts)
+ | type_of dfg T = (atomic_type T, [T])
+and types_of dfg Ts =
+ let val (folTyps,ts) = ListPair.unzip (map (type_of dfg) Ts)
in (folTyps, union_all ts) end;
(*Make literals for sorted type variables*)
@@ -277,7 +274,7 @@
| pred_of_sort (LTFree (s,ty)) = (s,1)
(*Given a list of sorted type variables, return a list of type literals.*)
-fun add_typs Ts = foldl (op union) [] (map sorts_on_typs Ts);
+fun add_typs Ts = List.foldl (op union) [] (map sorts_on_typs Ts);
(*The correct treatment of TFrees like 'a in lemmas (axiom clauses) is not clear.
* Ignoring them leads to unsound proofs, since we do nothing to ensure that 'a
@@ -317,12 +314,12 @@
| pack_sort(tvar, cls::srt) = (cls, tvar) :: pack_sort(tvar, srt);
(*Arity of type constructor tcon :: (arg1,...,argN)res*)
-fun make_axiom_arity_clause (tcons, axiom_name, (cls,args)) =
+fun make_axiom_arity_clause dfg (tcons, axiom_name, (cls,args)) =
let val tvars = gen_TVars (length args)
val tvars_srts = ListPair.zip (tvars,args)
in
ArityClause {axiom_name = axiom_name,
- conclLit = TConsLit (cls, make_fixed_type_const tcons, tvars),
+ conclLit = TConsLit (cls, make_fixed_type_const dfg tcons, tvars),
premLits = map TVarLit (union_all(map pack_sort tvars_srts))}
end;
@@ -340,8 +337,8 @@
let val class_less = Sorts.class_less(Sign.classes_of thy)
fun add_super sub (super,pairs) =
if class_less (sub,super) then (sub,super)::pairs else pairs
- fun add_supers (sub,pairs) = foldl (add_super sub) pairs supers
- in foldl add_supers [] subs end;
+ fun add_supers (sub,pairs) = List.foldl (add_super sub) pairs supers
+ in List.foldl add_supers [] subs end;
fun make_classrelClause (sub,super) =
ClassrelClause {axiom_name = clrelclause_prefix ^ ascii_of sub ^ "_" ^ ascii_of super,
@@ -354,20 +351,20 @@
(** Isabelle arities **)
-fun arity_clause _ _ (tcons, []) = []
- | arity_clause seen n (tcons, ("HOL.type",_)::ars) = (*ignore*)
- arity_clause seen n (tcons,ars)
- | arity_clause seen n (tcons, (ar as (class,_)) :: ars) =
+fun arity_clause dfg _ _ (tcons, []) = []
+ | arity_clause dfg seen n (tcons, ("HOL.type",_)::ars) = (*ignore*)
+ arity_clause dfg seen n (tcons,ars)
+ | arity_clause dfg seen n (tcons, (ar as (class,_)) :: ars) =
if class mem_string seen then (*multiple arities for the same tycon, class pair*)
- make_axiom_arity_clause (tcons, lookup_type_const tcons ^ "_" ^ class ^ "_" ^ Int.toString n, ar) ::
- arity_clause seen (n+1) (tcons,ars)
+ make_axiom_arity_clause dfg (tcons, lookup_type_const dfg tcons ^ "_" ^ class ^ "_" ^ Int.toString n, ar) ::
+ arity_clause dfg seen (n+1) (tcons,ars)
else
- make_axiom_arity_clause (tcons, lookup_type_const tcons ^ "_" ^ class, ar) ::
- arity_clause (class::seen) n (tcons,ars)
+ make_axiom_arity_clause dfg (tcons, lookup_type_const dfg tcons ^ "_" ^ class, ar) ::
+ arity_clause dfg (class::seen) n (tcons,ars)
-fun multi_arity_clause [] = []
- | multi_arity_clause ((tcons,ars) :: tc_arlists) =
- arity_clause [] 1 (tcons, ars) @ multi_arity_clause tc_arlists
+fun multi_arity_clause dfg [] = []
+ | multi_arity_clause dfg ((tcons,ars) :: tc_arlists) =
+ arity_clause dfg [] 1 (tcons, ars) @ multi_arity_clause dfg tc_arlists
(*Generate all pairs (tycon,class,sorts) such that tycon belongs to class in theory thy
provided its arguments have the corresponding sorts.*)
@@ -377,7 +374,7 @@
fun add_class tycon (class,pairs) =
(class, domain_sorts(tycon,class))::pairs
handle Sorts.CLASS_ERROR _ => pairs
- fun try_classes tycon = (tycon, foldl (add_class tycon) [] classes)
+ fun try_classes tycon = (tycon, List.foldl (add_class tycon) [] classes)
in map try_classes tycons end;
(*Proving one (tycon, class) membership may require proving others, so iterate.*)
@@ -390,17 +387,17 @@
val (classes', cpairs') = iter_type_class_pairs thy tycons newclasses
in (classes' union classes, cpairs' union cpairs) end;
-fun make_arity_clauses thy tycons classes =
+fun make_arity_clauses_dfg dfg thy tycons classes =
let val (classes', cpairs) = iter_type_class_pairs thy tycons classes
- in (classes', multi_arity_clause cpairs) end;
-
+ in (classes', multi_arity_clause dfg cpairs) end;
+val make_arity_clauses = make_arity_clauses_dfg false;
(**** Find occurrences of predicates in clauses ****)
(*FIXME: multiple-arity checking doesn't work, as update_new is the wrong
function (it flags repeated declarations of a function, even with the same arity)*)
-fun update_many (tab, keypairs) = foldl (uncurry Symtab.update) tab keypairs;
+fun update_many (tab, keypairs) = List.foldl (uncurry Symtab.update) tab keypairs;
fun add_type_sort_preds (T, preds) =
update_many (preds, map pred_of_sort (sorts_on_typs T));
@@ -414,14 +411,14 @@
fun add_arityClause_preds (ArityClause {conclLit,premLits,...}, preds) =
let val classes = map (make_type_class o class_of_arityLit) (conclLit::premLits)
fun upd (class,preds) = Symtab.update (class,1) preds
- in foldl upd preds classes end;
+ in List.foldl upd preds classes end;
(*** Find occurrences of functions in clauses ***)
fun add_foltype_funcs (AtomV _, funcs) = funcs
| add_foltype_funcs (AtomF a, funcs) = Symtab.update (a,0) funcs
| add_foltype_funcs (Comp(a,tys), funcs) =
- foldl add_foltype_funcs (Symtab.update (a, length tys) funcs) tys;
+ List.foldl add_foltype_funcs (Symtab.update (a, length tys) funcs) tys;
(*TFrees are recorded as constants*)
fun add_type_sort_funcs (TVar _, funcs) = funcs
--- a/src/HOL/Tools/res_hol_clause.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/res_hol_clause.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,4 @@
-(* ID: $Id$
+(*
Author: Jia Meng, NICTA
FOL clauses translated from HOL formulae.
@@ -13,8 +13,8 @@
val comb_C: thm
val comb_S: thm
datatype type_level = T_FULL | T_CONST
- val typ_level: type_level ref
- val minimize_applies: bool ref
+ val typ_level: type_level
+ val minimize_applies: bool
type axiom_name = string
type polarity = bool
type clause_id = int
@@ -53,22 +53,18 @@
(*The different translations of types*)
datatype type_level = T_FULL | T_CONST;
-val typ_level = ref T_CONST;
+val typ_level = T_CONST;
(*If true, each function will be directly applied to as many arguments as possible, avoiding
use of the "apply" operator. Use of hBOOL is also minimized.*)
-val minimize_applies = ref true;
-
-val const_min_arity = ref (Symtab.empty : int Symtab.table);
+val minimize_applies = true;
-val const_needs_hBOOL = ref (Symtab.empty : bool Symtab.table);
-
-fun min_arity_of c = getOpt (Symtab.lookup(!const_min_arity) c, 0);
+fun min_arity_of const_min_arity c = getOpt (Symtab.lookup const_min_arity c, 0);
(*True if the constant ever appears outside of the top-level position in literals.
If false, the constant always receives all of its arguments and is used as a predicate.*)
-fun needs_hBOOL c = not (!minimize_applies) orelse
- getOpt (Symtab.lookup(!const_needs_hBOOL) c, false);
+fun needs_hBOOL const_needs_hBOOL c = not minimize_applies orelse
+ getOpt (Symtab.lookup const_needs_hBOOL c, false);
(******************************************************)
@@ -110,67 +106,68 @@
fun isTaut (Clause {literals,...}) = exists isTrue literals;
-fun type_of (Type (a, Ts)) =
- let val (folTypes,ts) = types_of Ts
- in (RC.Comp(RC.make_fixed_type_const a, folTypes), ts) end
- | type_of (tp as (TFree(a,s))) =
+fun type_of dfg (Type (a, Ts)) =
+ let val (folTypes,ts) = types_of dfg Ts
+ in (RC.Comp(RC.make_fixed_type_const dfg a, folTypes), ts) end
+ | type_of dfg (tp as (TFree(a,s))) =
(RC.AtomF (RC.make_fixed_type_var a), [tp])
- | type_of (tp as (TVar(v,s))) =
+ | type_of dfg (tp as (TVar(v,s))) =
(RC.AtomV (RC.make_schematic_type_var v), [tp])
-and types_of Ts =
- let val (folTyps,ts) = ListPair.unzip (map type_of Ts)
+and types_of dfg Ts =
+ let val (folTyps,ts) = ListPair.unzip (map (type_of dfg) Ts)
in (folTyps, RC.union_all ts) end;
(* same as above, but no gathering of sort information *)
-fun simp_type_of (Type (a, Ts)) =
- RC.Comp(RC.make_fixed_type_const a, map simp_type_of Ts)
- | simp_type_of (TFree (a,s)) = RC.AtomF(RC.make_fixed_type_var a)
- | simp_type_of (TVar (v,s)) = RC.AtomV(RC.make_schematic_type_var v);
+fun simp_type_of dfg (Type (a, Ts)) =
+ RC.Comp(RC.make_fixed_type_const dfg a, map (simp_type_of dfg) Ts)
+ | simp_type_of dfg (TFree (a,s)) = RC.AtomF(RC.make_fixed_type_var a)
+ | simp_type_of dfg (TVar (v,s)) = RC.AtomV(RC.make_schematic_type_var v);
-fun const_type_of thy (c,t) =
- let val (tp,ts) = type_of t
- in (tp, ts, map simp_type_of (Sign.const_typargs thy (c,t))) end;
+fun const_type_of dfg thy (c,t) =
+ let val (tp,ts) = type_of dfg t
+ in (tp, ts, map (simp_type_of dfg) (Sign.const_typargs thy (c,t))) end;
(* convert a Term.term (with combinators) into a combterm, also accummulate sort info *)
-fun combterm_of thy (Const(c,t)) =
- let val (tp,ts,tvar_list) = const_type_of thy (c,t)
- val c' = CombConst(RC.make_fixed_const c, tp, tvar_list)
+fun combterm_of dfg thy (Const(c,t)) =
+ let val (tp,ts,tvar_list) = const_type_of dfg thy (c,t)
+ val c' = CombConst(RC.make_fixed_const dfg c, tp, tvar_list)
in (c',ts) end
- | combterm_of thy (Free(v,t)) =
- let val (tp,ts) = type_of t
+ | combterm_of dfg thy (Free(v,t)) =
+ let val (tp,ts) = type_of dfg t
val v' = CombConst(RC.make_fixed_var v, tp, [])
in (v',ts) end
- | combterm_of thy (Var(v,t)) =
- let val (tp,ts) = type_of t
+ | combterm_of dfg thy (Var(v,t)) =
+ let val (tp,ts) = type_of dfg t
val v' = CombVar(RC.make_schematic_var v,tp)
in (v',ts) end
- | combterm_of thy (P $ Q) =
- let val (P',tsP) = combterm_of thy P
- val (Q',tsQ) = combterm_of thy Q
+ | combterm_of dfg thy (P $ Q) =
+ let val (P',tsP) = combterm_of dfg thy P
+ val (Q',tsQ) = combterm_of dfg thy Q
in (CombApp(P',Q'), tsP union tsQ) end
- | combterm_of thy (t as Abs _) = raise RC.CLAUSE("HOL CLAUSE",t);
+ | combterm_of _ thy (t as Abs _) = raise RC.CLAUSE("HOL CLAUSE",t);
-fun predicate_of thy ((Const("Not",_) $ P), polarity) = predicate_of thy (P, not polarity)
- | predicate_of thy (t,polarity) = (combterm_of thy (Envir.eta_contract t), polarity);
+fun predicate_of dfg thy ((Const("Not",_) $ P), polarity) = predicate_of dfg thy (P, not polarity)
+ | predicate_of dfg thy (t,polarity) = (combterm_of dfg thy (Envir.eta_contract t), polarity);
-fun literals_of_term1 thy args (Const("Trueprop",_) $ P) = literals_of_term1 thy args P
- | literals_of_term1 thy args (Const("op |",_) $ P $ Q) =
- literals_of_term1 thy (literals_of_term1 thy args P) Q
- | literals_of_term1 thy (lits,ts) P =
- let val ((pred,ts'),pol) = predicate_of thy (P,true)
+fun literals_of_term1 dfg thy args (Const("Trueprop",_) $ P) = literals_of_term1 dfg thy args P
+ | literals_of_term1 dfg thy args (Const("op |",_) $ P $ Q) =
+ literals_of_term1 dfg thy (literals_of_term1 dfg thy args P) Q
+ | literals_of_term1 dfg thy (lits,ts) P =
+ let val ((pred,ts'),pol) = predicate_of dfg thy (P,true)
in
(Literal(pol,pred)::lits, ts union ts')
end;
-fun literals_of_term thy P = literals_of_term1 thy ([],[]) P;
+fun literals_of_term_dfg dfg thy P = literals_of_term1 dfg thy ([],[]) P;
+val literals_of_term = literals_of_term_dfg false;
(* Problem too trivial for resolution (empty clause) *)
exception TOO_TRIVIAL;
(* making axiom and conjecture clauses *)
-fun make_clause thy (clause_id,axiom_name,kind,th) =
- let val (lits,ctypes_sorts) = literals_of_term thy (prop_of th)
+fun make_clause dfg thy (clause_id,axiom_name,kind,th) =
+ let val (lits,ctypes_sorts) = literals_of_term_dfg dfg thy (prop_of th)
in
if forall isFalse lits
then raise TOO_TRIVIAL
@@ -180,20 +177,20 @@
end;
-fun add_axiom_clause thy ((th,(name,id)), pairs) =
- let val cls = make_clause thy (id, name, RC.Axiom, th)
+fun add_axiom_clause dfg thy ((th,(name,id)), pairs) =
+ let val cls = make_clause dfg thy (id, name, RC.Axiom, th)
in
if isTaut cls then pairs else (name,cls)::pairs
end;
-fun make_axiom_clauses thy = foldl (add_axiom_clause thy) [];
+fun make_axiom_clauses dfg thy = List.foldl (add_axiom_clause dfg thy) [];
-fun make_conjecture_clauses_aux _ _ [] = []
- | make_conjecture_clauses_aux thy n (th::ths) =
- make_clause thy (n,"conjecture", RC.Conjecture, th) ::
- make_conjecture_clauses_aux thy (n+1) ths;
+fun make_conjecture_clauses_aux dfg _ _ [] = []
+ | make_conjecture_clauses_aux dfg thy n (th::ths) =
+ make_clause dfg thy (n,"conjecture", RC.Conjecture, th) ::
+ make_conjecture_clauses_aux dfg thy (n+1) ths;
-fun make_conjecture_clauses thy = make_conjecture_clauses_aux thy 0;
+fun make_conjecture_clauses dfg thy = make_conjecture_clauses_aux dfg thy 0;
(**********************************************************************)
@@ -218,11 +215,11 @@
val type_wrapper = "ti";
-fun head_needs_hBOOL (CombConst(c,_,_)) = needs_hBOOL c
- | head_needs_hBOOL _ = true;
+fun head_needs_hBOOL const_needs_hBOOL (CombConst(c,_,_)) = needs_hBOOL const_needs_hBOOL c
+ | head_needs_hBOOL const_needs_hBOOL _ = true;
fun wrap_type (s, tp) =
- if !typ_level=T_FULL then
+ if typ_level=T_FULL then
type_wrapper ^ RC.paren_pack [s, RC.string_of_fol_type tp]
else s;
@@ -235,43 +232,43 @@
(*Apply an operator to the argument strings, using either the "apply" operator or
direct function application.*)
-fun string_of_applic (CombConst(c,tp,tvars), args) =
+fun string_of_applic cma (CombConst(c,tp,tvars), args) =
let val c = if c = "equal" then "c_fequal" else c
- val nargs = min_arity_of c
+ val nargs = min_arity_of cma c
val args1 = List.take(args, nargs)
handle Subscript => error ("string_of_applic: " ^ c ^ " has arity " ^
Int.toString nargs ^ " but is applied to " ^
space_implode ", " args)
val args2 = List.drop(args, nargs)
- val targs = if !typ_level = T_CONST then map RC.string_of_fol_type tvars
+ val targs = if typ_level = T_CONST then map RC.string_of_fol_type tvars
else []
in
string_apply (c ^ RC.paren_pack (args1@targs), args2)
end
- | string_of_applic (CombVar(v,tp), args) = string_apply (v, args)
- | string_of_applic _ = error "string_of_applic";
+ | string_of_applic cma (CombVar(v,tp), args) = string_apply (v, args)
+ | string_of_applic _ _ = error "string_of_applic";
-fun wrap_type_if (head, s, tp) = if head_needs_hBOOL head then wrap_type (s, tp) else s;
+fun wrap_type_if cnh (head, s, tp) = if head_needs_hBOOL cnh head then wrap_type (s, tp) else s;
-fun string_of_combterm t =
+fun string_of_combterm cma cnh t =
let val (head, args) = strip_comb t
- in wrap_type_if (head,
- string_of_applic (head, map string_of_combterm args),
+ in wrap_type_if cnh (head,
+ string_of_applic cma (head, map (string_of_combterm cma cnh) args),
type_of_combterm t)
end;
(*Boolean-valued terms are here converted to literals.*)
-fun boolify t = "hBOOL" ^ RC.paren_pack [string_of_combterm t];
+fun boolify cma cnh t = "hBOOL" ^ RC.paren_pack [string_of_combterm cma cnh t];
-fun string_of_predicate t =
+fun string_of_predicate cma cnh t =
case t of
(CombApp(CombApp(CombConst("equal",_,_), t1), t2)) =>
(*DFG only: new TPTP prefers infix equality*)
- ("equal" ^ RC.paren_pack [string_of_combterm t1, string_of_combterm t2])
+ ("equal" ^ RC.paren_pack [string_of_combterm cma cnh t1, string_of_combterm cma cnh t2])
| _ =>
case #1 (strip_comb t) of
- CombConst(c,_,_) => if needs_hBOOL c then boolify t else string_of_combterm t
- | _ => boolify t;
+ CombConst(c,_,_) => if needs_hBOOL cnh c then boolify cma cnh t else string_of_combterm cma cnh t
+ | _ => boolify cma cnh t;
fun string_of_clausename (cls_id,ax_name) =
RC.clause_prefix ^ RC.ascii_of ax_name ^ "_" ^ Int.toString cls_id;
@@ -282,23 +279,23 @@
(*** tptp format ***)
-fun tptp_of_equality pol (t1,t2) =
+fun tptp_of_equality cma cnh pol (t1,t2) =
let val eqop = if pol then " = " else " != "
- in string_of_combterm t1 ^ eqop ^ string_of_combterm t2 end;
+ in string_of_combterm cma cnh t1 ^ eqop ^ string_of_combterm cma cnh t2 end;
-fun tptp_literal (Literal(pol, CombApp(CombApp(CombConst("equal",_,_), t1), t2))) =
- tptp_of_equality pol (t1,t2)
- | tptp_literal (Literal(pol,pred)) =
- RC.tptp_sign pol (string_of_predicate pred);
+fun tptp_literal cma cnh (Literal(pol, CombApp(CombApp(CombConst("equal",_,_), t1), t2))) =
+ tptp_of_equality cma cnh pol (t1,t2)
+ | tptp_literal cma cnh (Literal(pol,pred)) =
+ RC.tptp_sign pol (string_of_predicate cma cnh pred);
(*Given a clause, returns its literals paired with a list of literals concerning TFrees;
the latter should only occur in conjecture clauses.*)
-fun tptp_type_lits pos (Clause{literals, ctypes_sorts, ...}) =
- (map tptp_literal literals,
+fun tptp_type_lits cma cnh pos (Clause{literals, ctypes_sorts, ...}) =
+ (map (tptp_literal cma cnh) literals,
map (RC.tptp_of_typeLit pos) (RC.add_typs ctypes_sorts));
-fun clause2tptp (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
- let val (lits,tylits) = tptp_type_lits (kind = RC.Conjecture) cls
+fun clause2tptp cma cnh (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
+ let val (lits,tylits) = tptp_type_lits cma cnh (kind = RC.Conjecture) cls
in
(RC.gen_tptp_cls(clause_id,axiom_name,kind,lits,tylits), tylits)
end;
@@ -306,10 +303,10 @@
(*** dfg format ***)
-fun dfg_literal (Literal(pol,pred)) = RC.dfg_sign pol (string_of_predicate pred);
+fun dfg_literal cma cnh (Literal(pol,pred)) = RC.dfg_sign pol (string_of_predicate cma cnh pred);
-fun dfg_type_lits pos (Clause{literals, ctypes_sorts, ...}) =
- (map dfg_literal literals,
+fun dfg_type_lits cma cnh pos (Clause{literals, ctypes_sorts, ...}) =
+ (map (dfg_literal cma cnh) literals,
map (RC.dfg_of_typeLit pos) (RC.add_typs ctypes_sorts));
fun get_uvars (CombConst _) vars = vars
@@ -320,8 +317,8 @@
fun dfg_vars (Clause {literals,...}) = RC.union_all (map get_uvars_l literals);
-fun clause2dfg (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
- let val (lits,tylits) = dfg_type_lits (kind = RC.Conjecture) cls
+fun clause2dfg cma cnh (cls as Clause{axiom_name,clause_id,kind,ctypes_sorts,...}) =
+ let val (lits,tylits) = dfg_type_lits cma cnh (kind = RC.Conjecture) cls
val vars = dfg_vars cls
val tvars = RC.get_tvar_strs ctypes_sorts
in
@@ -331,47 +328,47 @@
(** For DFG format: accumulate function and predicate declarations **)
-fun addtypes tvars tab = foldl RC.add_foltype_funcs tab tvars;
+fun addtypes tvars tab = List.foldl RC.add_foltype_funcs tab tvars;
-fun add_decls (CombConst(c,tp,tvars), (funcs,preds)) =
+fun add_decls cma cnh (CombConst(c,tp,tvars), (funcs,preds)) =
if c = "equal" then (addtypes tvars funcs, preds)
else
- let val arity = min_arity_of c
- val ntys = if !typ_level = T_CONST then length tvars else 0
+ let val arity = min_arity_of cma c
+ val ntys = if typ_level = T_CONST then length tvars else 0
val addit = Symtab.update(c, arity+ntys)
in
- if needs_hBOOL c then (addtypes tvars (addit funcs), preds)
+ if needs_hBOOL cnh c then (addtypes tvars (addit funcs), preds)
else (addtypes tvars funcs, addit preds)
end
- | add_decls (CombVar(_,ctp), (funcs,preds)) =
+ | add_decls _ _ (CombVar(_,ctp), (funcs,preds)) =
(RC.add_foltype_funcs (ctp,funcs), preds)
- | add_decls (CombApp(P,Q),decls) = add_decls(P,add_decls (Q,decls));
+ | add_decls cma cnh (CombApp(P,Q),decls) = add_decls cma cnh (P,add_decls cma cnh (Q,decls));
-fun add_literal_decls (Literal(_,c), decls) = add_decls (c,decls);
+fun add_literal_decls cma cnh (Literal(_,c), decls) = add_decls cma cnh (c,decls);
-fun add_clause_decls (Clause {literals, ...}, decls) =
- foldl add_literal_decls decls literals
+fun add_clause_decls cma cnh (Clause {literals, ...}, decls) =
+ List.foldl (add_literal_decls cma cnh) decls literals
handle Symtab.DUP a => error ("function " ^ a ^ " has multiple arities")
-fun decls_of_clauses clauses arity_clauses =
+fun decls_of_clauses cma cnh clauses arity_clauses =
let val init_functab = Symtab.update (type_wrapper,2) (Symtab.update ("hAPP",2) RC.init_functab)
val init_predtab = Symtab.update ("hBOOL",1) Symtab.empty
- val (functab,predtab) = (foldl add_clause_decls (init_functab, init_predtab) clauses)
+ val (functab,predtab) = (List.foldl (add_clause_decls cma cnh) (init_functab, init_predtab) clauses)
in
- (Symtab.dest (foldl RC.add_arityClause_funcs functab arity_clauses),
+ (Symtab.dest (List.foldl RC.add_arityClause_funcs functab arity_clauses),
Symtab.dest predtab)
end;
fun add_clause_preds (Clause {ctypes_sorts, ...}, preds) =
- foldl RC.add_type_sort_preds preds ctypes_sorts
+ List.foldl RC.add_type_sort_preds preds ctypes_sorts
handle Symtab.DUP a => error ("predicate " ^ a ^ " has multiple arities")
(*Higher-order clauses have only the predicates hBOOL and type classes.*)
fun preds_of_clauses clauses clsrel_clauses arity_clauses =
Symtab.dest
- (foldl RC.add_classrelClause_preds
- (foldl RC.add_arityClause_preds
- (foldl add_clause_preds Symtab.empty clauses)
+ (List.foldl RC.add_classrelClause_preds
+ (List.foldl RC.add_arityClause_preds
+ (List.foldl add_clause_preds Symtab.empty clauses)
arity_clauses)
clsrel_clauses)
@@ -393,20 +390,20 @@
fun count_literal (Literal(_,t), ct) = count_combterm(t,ct);
-fun count_clause (Clause{literals,...}, ct) = foldl count_literal ct literals;
+fun count_clause (Clause{literals,...}, ct) = List.foldl count_literal ct literals;
fun count_user_clause user_lemmas (Clause{axiom_name,literals,...}, ct) =
- if axiom_name mem_string user_lemmas then foldl count_literal ct literals
+ if axiom_name mem_string user_lemmas then List.foldl count_literal ct literals
else ct;
fun cnf_helper_thms thy =
ResAxioms.cnf_rules_pairs thy o map ResAxioms.pairname
-fun get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas) =
+fun get_helper_clauses dfg thy isFO (conjectures, axclauses, user_lemmas) =
if isFO then [] (*first-order*)
else
- let val ct0 = foldl count_clause init_counters conjectures
- val ct = foldl (count_user_clause user_lemmas) ct0 axclauses
+ let val ct0 = List.foldl count_clause init_counters conjectures
+ val ct = List.foldl (count_user_clause user_lemmas) ct0 axclauses
fun needed c = valOf (Symtab.lookup ct c) > 0
val IK = if needed "c_COMBI" orelse needed "c_COMBK"
then (Output.debug (fn () => "Include combinator I K"); cnf_helper_thms thy [comb_I,comb_K])
@@ -419,66 +416,67 @@
else []
val other = cnf_helper_thms thy [ext,fequal_imp_equal,equal_imp_fequal]
in
- map #2 (make_axiom_clauses thy (other @ IK @ BC @ S))
+ map #2 (make_axiom_clauses dfg thy (other @ IK @ BC @ S))
end;
(*Find the minimal arity of each function mentioned in the term. Also, note which uses
are not at top level, to see if hBOOL is needed.*)
-fun count_constants_term toplev t =
+fun count_constants_term toplev t (const_min_arity, const_needs_hBOOL) =
let val (head, args) = strip_comb t
val n = length args
- val _ = List.app (count_constants_term false) args
+ val (const_min_arity, const_needs_hBOOL) = fold (count_constants_term false) args (const_min_arity, const_needs_hBOOL)
in
case head of
CombConst (a,_,_) => (*predicate or function version of "equal"?*)
let val a = if a="equal" andalso not toplev then "c_fequal" else a
+ val const_min_arity = Symtab.map_default (a,n) (curry Int.min n) const_min_arity
in
- const_min_arity := Symtab.map_default (a,n) (curry Int.min n) (!const_min_arity);
- if toplev then ()
- else const_needs_hBOOL := Symtab.update (a,true) (!const_needs_hBOOL)
+ if toplev then (const_min_arity, const_needs_hBOOL)
+ else (const_min_arity, Symtab.update (a,true) (const_needs_hBOOL))
end
- | ts => ()
+ | ts => (const_min_arity, const_needs_hBOOL)
end;
(*A literal is a top-level term*)
-fun count_constants_lit (Literal (_,t)) = count_constants_term true t;
-
-fun count_constants_clause (Clause{literals,...}) = List.app count_constants_lit literals;
+fun count_constants_lit (Literal (_,t)) (const_min_arity, const_needs_hBOOL) =
+ count_constants_term true t (const_min_arity, const_needs_hBOOL);
-fun display_arity (c,n) =
+fun count_constants_clause (Clause{literals,...}) (const_min_arity, const_needs_hBOOL) =
+ fold count_constants_lit literals (const_min_arity, const_needs_hBOOL);
+
+fun display_arity const_needs_hBOOL (c,n) =
Output.debug (fn () => "Constant: " ^ c ^ " arity:\t" ^ Int.toString n ^
- (if needs_hBOOL c then " needs hBOOL" else ""));
+ (if needs_hBOOL const_needs_hBOOL c then " needs hBOOL" else ""));
fun count_constants (conjectures, axclauses, helper_clauses) =
- if !minimize_applies then
- (const_min_arity := Symtab.empty;
- const_needs_hBOOL := Symtab.empty;
- List.app count_constants_clause conjectures;
- List.app count_constants_clause axclauses;
- List.app count_constants_clause helper_clauses;
- List.app display_arity (Symtab.dest (!const_min_arity)))
- else ();
+ if minimize_applies then
+ let val (const_min_arity, const_needs_hBOOL) =
+ fold count_constants_clause conjectures (Symtab.empty, Symtab.empty)
+ |> fold count_constants_clause axclauses
+ |> fold count_constants_clause helper_clauses
+ val _ = List.app (display_arity const_needs_hBOOL) (Symtab.dest (const_min_arity))
+ in (const_min_arity, const_needs_hBOOL) end
+ else (Symtab.empty, Symtab.empty);
(* tptp format *)
(* write TPTP format to a single file *)
fun tptp_write_file thy isFO thms filename (ax_tuples,classrel_clauses,arity_clauses) user_lemmas =
let val _ = Output.debug (fn () => ("Preparing to write the TPTP file " ^ filename))
- val _ = RC.dfg_format := false
- val conjectures = make_conjecture_clauses thy thms
- val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses thy ax_tuples)
- val helper_clauses = get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas)
- val _ = count_constants (conjectures, axclauses, helper_clauses);
- val (tptp_clss,tfree_litss) = ListPair.unzip (map clause2tptp conjectures)
- val tfree_clss = map RC.tptp_tfree_clause (foldl (op union_string) [] tfree_litss)
+ val conjectures = make_conjecture_clauses false thy thms
+ val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses false thy ax_tuples)
+ val helper_clauses = get_helper_clauses false thy isFO (conjectures, axclauses, user_lemmas)
+ val (const_min_arity, const_needs_hBOOL) = count_constants (conjectures, axclauses, helper_clauses);
+ val (tptp_clss,tfree_litss) = ListPair.unzip (map (clause2tptp const_min_arity const_needs_hBOOL) conjectures)
+ val tfree_clss = map RC.tptp_tfree_clause (List.foldl (op union_string) [] tfree_litss)
val out = TextIO.openOut filename
in
- List.app (curry TextIO.output out o #1 o clause2tptp) axclauses;
+ List.app (curry TextIO.output out o #1 o (clause2tptp const_min_arity const_needs_hBOOL)) axclauses;
RC.writeln_strs out tfree_clss;
RC.writeln_strs out tptp_clss;
List.app (curry TextIO.output out o RC.tptp_classrelClause) classrel_clauses;
List.app (curry TextIO.output out o RC.tptp_arity_clause) arity_clauses;
- List.app (curry TextIO.output out o #1 o clause2tptp) helper_clauses;
+ List.app (curry TextIO.output out o #1 o (clause2tptp const_min_arity const_needs_hBOOL)) helper_clauses;
TextIO.closeOut out;
clnames
end;
@@ -488,18 +486,17 @@
fun dfg_write_file thy isFO thms filename (ax_tuples,classrel_clauses,arity_clauses) user_lemmas =
let val _ = Output.debug (fn () => ("Preparing to write the DFG file " ^ filename))
- val _ = RC.dfg_format := true
- val conjectures = make_conjecture_clauses thy thms
- val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses thy ax_tuples)
- val helper_clauses = get_helper_clauses thy isFO (conjectures, axclauses, user_lemmas)
- val _ = count_constants (conjectures, axclauses, helper_clauses);
- val (dfg_clss, tfree_litss) = ListPair.unzip (map clause2dfg conjectures)
+ val conjectures = make_conjecture_clauses true thy thms
+ val (clnames,axclauses) = ListPair.unzip (make_axiom_clauses true thy ax_tuples)
+ val helper_clauses = get_helper_clauses true thy isFO (conjectures, axclauses, user_lemmas)
+ val (const_min_arity, const_needs_hBOOL) = count_constants (conjectures, axclauses, helper_clauses);
+ val (dfg_clss, tfree_litss) = ListPair.unzip (map (clause2dfg const_min_arity const_needs_hBOOL) conjectures)
and probname = Path.implode (Path.base (Path.explode filename))
- val axstrs = map (#1 o clause2dfg) axclauses
+ val axstrs = map (#1 o (clause2dfg const_min_arity const_needs_hBOOL)) axclauses
val tfree_clss = map RC.dfg_tfree_clause (RC.union_all tfree_litss)
val out = TextIO.openOut filename
- val helper_clauses_strs = map (#1 o clause2dfg) helper_clauses
- val (funcs,cl_preds) = decls_of_clauses (helper_clauses @ conjectures @ axclauses) arity_clauses
+ val helper_clauses_strs = map (#1 o (clause2dfg const_min_arity const_needs_hBOOL)) helper_clauses
+ val (funcs,cl_preds) = decls_of_clauses const_min_arity const_needs_hBOOL (helper_clauses @ conjectures @ axclauses) arity_clauses
and ty_preds = preds_of_clauses axclauses classrel_clauses arity_clauses
in
TextIO.output (out, RC.string_of_start probname);
--- a/src/HOL/Tools/res_reconstruct.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/res_reconstruct.ML Wed Mar 04 10:45:52 2009 +0100
@@ -51,7 +51,7 @@
fun atom x = Br(x,[]);
fun scons (x,y) = Br("cons", [x,y]);
-val listof = foldl scons (atom "nil");
+val listof = List.foldl scons (atom "nil");
(*Strings enclosed in single quotes, e.g. filenames*)
val quoted = $$"'" |-- Scan.repeat (~$$"'") --| $$"'" >> implode;
@@ -243,7 +243,7 @@
fun gen_all_vars t = fold_rev Logic.all (OldTerm.term_vars t) t;
fun ints_of_stree_aux (Int n, ns) = n::ns
- | ints_of_stree_aux (Br(_,ts), ns) = foldl ints_of_stree_aux ns ts;
+ | ints_of_stree_aux (Br(_,ts), ns) = List.foldl ints_of_stree_aux ns ts;
fun ints_of_stree t = ints_of_stree_aux (t, []);
@@ -362,7 +362,7 @@
fun replace_dep (old:int, new) dep = if dep=old then new else [dep];
fun replace_deps (old:int, new) (lno, t, deps) =
- (lno, t, foldl (op union_int) [] (map (replace_dep (old, new)) deps));
+ (lno, t, List.foldl (op union_int) [] (map (replace_dep (old, new)) deps));
(*Discard axioms; consolidate adjacent lines that prove the same clause, since they differ
only in type information.*)
@@ -392,7 +392,7 @@
then delete_dep lno lines
else (lno, t, []) :: lines
| add_nonnull_prfline ((lno, t, deps), lines) = (lno, t, deps) :: lines
-and delete_dep lno lines = foldr add_nonnull_prfline [] (map (replace_deps (lno, [])) lines);
+and delete_dep lno lines = List.foldr add_nonnull_prfline [] (map (replace_deps (lno, [])) lines);
fun bad_free (Free (a,_)) = String.isPrefix "sko_" a
| bad_free _ = false;
@@ -435,11 +435,11 @@
val tuples = map (dest_tstp o tstp_line o explode) cnfs
val _ = trace (Int.toString (length tuples) ^ " tuples extracted\n")
val ctxt = ProofContext.set_mode ProofContext.mode_schematic ctxt
- val raw_lines = foldr add_prfline [] (decode_tstp_list ctxt tuples)
+ val raw_lines = List.foldr add_prfline [] (decode_tstp_list ctxt tuples)
val _ = trace (Int.toString (length raw_lines) ^ " raw_lines extracted\n")
- val nonnull_lines = foldr add_nonnull_prfline [] raw_lines
+ val nonnull_lines = List.foldr add_nonnull_prfline [] raw_lines
val _ = trace (Int.toString (length nonnull_lines) ^ " nonnull_lines extracted\n")
- val (_,lines) = foldr (add_wanted_prfline ctxt) (0,[]) nonnull_lines
+ val (_,lines) = List.foldr (add_wanted_prfline ctxt) (0,[]) nonnull_lines
val _ = trace (Int.toString (length lines) ^ " lines extracted\n")
val (ccls,fixes) = ResAxioms.neg_conjecture_clauses th sgno
val _ = trace (Int.toString (length ccls) ^ " conjecture clauses\n")
--- a/src/HOL/Tools/sat_solver.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/sat_solver.ML Wed Mar 04 10:45:52 2009 +0100
@@ -914,6 +914,10 @@
fun zchaff fm =
let
val _ = if (getenv "ZCHAFF_HOME") = "" then raise SatSolver.NOT_CONFIGURED else ()
+ val _ = if (getenv "ZCHAFF_VERSION") <> "2004.5.13" andalso
+ (getenv "ZCHAFF_VERSION") <> "2004.11.15" then raise SatSolver.NOT_CONFIGURED else ()
+ (* both versions of zChaff appear to have the same interface, so we do *)
+ (* not actually need to distinguish between them in the following code *)
val serial_str = serial_string ()
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
@@ -939,12 +943,11 @@
let
fun berkmin fm =
let
- val _ = if (getenv "BERKMIN_HOME") = "" then raise SatSolver.NOT_CONFIGURED else ()
+ val _ = if (getenv "BERKMIN_HOME") = "" orelse (getenv "BERKMIN_EXE") = "" then raise SatSolver.NOT_CONFIGURED else ()
val serial_str = serial_string ()
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
- val exec = getenv "BERKMIN_EXE"
- val cmd = (getenv "BERKMIN_HOME") ^ "/" ^ (if exec = "" then "BerkMin561" else exec) ^ " " ^ (Path.implode inpath) ^ " > " ^ (Path.implode outpath)
+ val cmd = (getenv "BERKMIN_HOME") ^ "/" ^ (getenv "BERKMIN_EXE") ^ " " ^ (Path.implode inpath) ^ " > " ^ (Path.implode outpath)
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath (PropLogic.defcnf fm)
fun readfn () = SatSolver.read_std_result_file outpath ("Satisfiable !!", "solution =", "UNSATISFIABLE !!")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
--- a/src/HOL/Tools/simpdata.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/simpdata.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOL/simpdata.ML
- ID: $Id$
Author: Tobias Nipkow
Copyright 1991 University of Cambridge
@@ -65,7 +64,7 @@
else
let
val Ps = map (fn k => Free ("P" ^ string_of_int k, propT)) (1 upto j);
- fun mk_simp_implies Q = foldr (fn (R, S) =>
+ fun mk_simp_implies Q = List.foldr (fn (R, S) =>
Const ("HOL.simp_implies", propT --> propT --> propT) $ R $ S) Q Ps
val aT = TFree ("'a", HOLogic.typeS);
val x = Free ("x", aT);
--- a/src/HOL/Tools/specification_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Tools/specification_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -120,7 +120,7 @@
val frees = OldTerm.term_frees prop
val _ = forall (fn v => Sign.of_sort thy (type_of v,HOLogic.typeS)) frees
orelse error "Specificaton: Only free variables of sort 'type' allowed"
- val prop_closed = foldr (fn ((vname,T),prop) => HOLogic.mk_all (vname,T,prop)) prop (map dest_Free frees)
+ val prop_closed = List.foldr (fn ((vname,T),prop) => HOLogic.mk_all (vname,T,prop)) prop (map dest_Free frees)
in
(prop_closed,frees)
end
@@ -161,7 +161,7 @@
in
HOLogic.exists_const T $ Abs(vname,T,Term.abstract_over (c,prop))
end
- val ex_prop = foldr mk_exist prop proc_consts
+ val ex_prop = List.foldr mk_exist prop proc_consts
val cnames = map (fst o dest_Const) proc_consts
fun post_process (arg as (thy,thm)) =
let
@@ -232,7 +232,7 @@
val specification_decl =
P.$$$ "(" |-- Scan.repeat1 (opt_name -- P.term -- opt_overloaded) --| P.$$$ ")" --
- Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.base_name) -- P.prop)
+ Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.name_of) -- P.prop)
val _ =
OuterSyntax.command "specification" "define constants by specification" K.thy_goal
@@ -243,7 +243,7 @@
val ax_specification_decl =
P.name --
(P.$$$ "(" |-- Scan.repeat1 (opt_name -- P.term -- opt_overloaded) --| P.$$$ ")" --
- Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.base_name) -- P.prop))
+ Scan.repeat1 ((SpecParse.opt_thm_name ":" >> apfst Binding.name_of) -- P.prop))
val _ =
OuterSyntax.command "ax_specification" "define constants by specification" K.thy_goal
--- a/src/HOL/Transcendental.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Transcendental.thy Wed Mar 04 10:45:52 2009 +0100
@@ -120,7 +120,7 @@
case (Suc n)
have "(\<Sum> i = 0 ..< 2 * Suc n. if even i then f i else g i) =
(\<Sum> i = 0 ..< n. f (2 * i)) + (\<Sum> i = 0 ..< n. g (2 * i + 1)) + (f (2 * n) + g (2 * n + 1))"
- using Suc.hyps by auto
+ using Suc.hyps unfolding One_nat_def by auto
also have "\<dots> = (\<Sum> i = 0 ..< Suc n. f (2 * i)) + (\<Sum> i = 0 ..< Suc n. g (2 * i + 1))" by auto
finally show ?case .
qed auto
@@ -187,16 +187,18 @@
((\<forall>n. l \<le> (\<Sum>i=0..<2*n + 1. -1^i*a i)) \<and> (\<lambda> n. \<Sum>i=0..<2*n + 1. -1^i*a i) ----> l)"
(is "\<exists>l. ((\<forall>n. ?f n \<le> l) \<and> _) \<and> ((\<forall>n. l \<le> ?g n) \<and> _)")
proof -
- have fg_diff: "\<And>n. ?f n - ?g n = - a (2 * n)" by auto
+ have fg_diff: "\<And>n. ?f n - ?g n = - a (2 * n)" unfolding One_nat_def by auto
have "\<forall> n. ?f n \<le> ?f (Suc n)"
proof fix n show "?f n \<le> ?f (Suc n)" using mono[of "2*n"] by auto qed
moreover
have "\<forall> n. ?g (Suc n) \<le> ?g n"
- proof fix n show "?g (Suc n) \<le> ?g n" using mono[of "Suc (2*n)"] by auto qed
+ proof fix n show "?g (Suc n) \<le> ?g n" using mono[of "Suc (2*n)"]
+ unfolding One_nat_def by auto qed
moreover
have "\<forall> n. ?f n \<le> ?g n"
- proof fix n show "?f n \<le> ?g n" using fg_diff a_pos by auto qed
+ proof fix n show "?f n \<le> ?g n" using fg_diff a_pos
+ unfolding One_nat_def by auto qed
moreover
have "(\<lambda> n. ?f n - ?g n) ----> 0" unfolding fg_diff
proof (rule LIMSEQ_I)
@@ -904,7 +906,7 @@
proof -
have "(\<Sum>n = 0..<1. f n * 0 ^ n) = (\<Sum>n. f n * 0 ^ n)"
by (rule sums_unique [OF series_zero], simp add: power_0_left)
- thus ?thesis by simp
+ thus ?thesis unfolding One_nat_def by simp
qed
lemma exp_zero [simp]: "exp 0 = 1"
@@ -1234,10 +1236,11 @@
show "x - 1 \<in> {- 1<..<1}" and "(0 :: real) < 1" using `0 < x` `x < 2` by auto
{ fix x :: real assume "x \<in> {- 1<..<1}" hence "norm (-x) < 1" by auto
show "summable (\<lambda>n. -1 ^ n * (1 / real (n + 1)) * real (Suc n) * x ^ n)"
+ unfolding One_nat_def
by (auto simp del: power_mult_distrib simp add: power_mult_distrib[symmetric] summable_geometric[OF `norm (-x) < 1`])
}
qed
- hence "DERIV (\<lambda>x. suminf (?f x)) (x - 1) :> suminf (?f' x)" by auto
+ hence "DERIV (\<lambda>x. suminf (?f x)) (x - 1) :> suminf (?f' x)" unfolding One_nat_def by auto
hence "DERIV (\<lambda>x. suminf (?f (x - 1))) x :> suminf (?f' x)" unfolding DERIV_iff repos .
ultimately have "DERIV (\<lambda>x. ln x - suminf (?f (x - 1))) x :> (suminf (?f' x) - suminf (?f' x))"
by (rule DERIV_diff)
@@ -1514,6 +1517,7 @@
lemma DERIV_fun_pow: "DERIV g x :> m ==>
DERIV (%x. (g x) ^ n) x :> real n * (g x) ^ (n - 1) * m"
+unfolding One_nat_def
apply (rule lemma_DERIV_subst)
apply (rule_tac f = "(%x. x ^ n)" in DERIV_chain2)
apply (rule DERIV_pow, auto)
@@ -1635,7 +1639,7 @@
sums sin x"
unfolding sin_def
by (rule sin_converges [THEN sums_summable, THEN sums_group], simp)
- thus ?thesis by (simp add: mult_ac)
+ thus ?thesis unfolding One_nat_def by (simp add: mult_ac)
qed
lemma sin_gt_zero: "[|0 < x; x < 2 |] ==> 0 < sin x"
@@ -1647,6 +1651,7 @@
apply (rule sin_paired [THEN sums_summable, THEN sums_group], simp)
apply (rotate_tac 2)
apply (drule sin_paired [THEN sums_unique, THEN ssubst])
+unfolding One_nat_def
apply (auto simp del: fact_Suc realpow_Suc)
apply (frule sums_unique)
apply (auto simp del: fact_Suc realpow_Suc)
@@ -1720,6 +1725,7 @@
apply (simp (no_asm) add: mult_assoc del: setsum_op_ivl_Suc)
apply (rule sumr_pos_lt_pair)
apply (erule sums_summable, safe)
+unfolding One_nat_def
apply (simp (no_asm) add: divide_inverse real_0_less_add_iff mult_assoc [symmetric]
del: fact_Suc)
apply (rule real_mult_inverse_cancel2)
@@ -2792,7 +2798,7 @@
lemma monoseq_arctan_series: fixes x :: real
assumes "\<bar>x\<bar> \<le> 1" shows "monoseq (\<lambda> n. 1 / real (n*2+1) * x^(n*2+1))" (is "monoseq ?a")
-proof (cases "x = 0") case True thus ?thesis unfolding monoseq_def by auto
+proof (cases "x = 0") case True thus ?thesis unfolding monoseq_def One_nat_def by auto
next
case False
have "norm x \<le> 1" and "x \<le> 1" and "-1 \<le> x" using assms by auto
@@ -2823,7 +2829,7 @@
lemma zeroseq_arctan_series: fixes x :: real
assumes "\<bar>x\<bar> \<le> 1" shows "(\<lambda> n. 1 / real (n*2+1) * x^(n*2+1)) ----> 0" (is "?a ----> 0")
-proof (cases "x = 0") case True thus ?thesis by (auto simp add: LIMSEQ_const)
+proof (cases "x = 0") case True thus ?thesis unfolding One_nat_def by (auto simp add: LIMSEQ_const)
next
case False
have "norm x \<le> 1" and "x \<le> 1" and "-1 \<le> x" using assms by auto
@@ -2831,12 +2837,14 @@
proof (cases "\<bar>x\<bar> < 1")
case True hence "norm x < 1" by auto
from LIMSEQ_mult[OF LIMSEQ_inverse_real_of_nat LIMSEQ_power_zero[OF `norm x < 1`, THEN LIMSEQ_Suc]]
- show ?thesis unfolding inverse_eq_divide Suc_plus1 using LIMSEQ_linear[OF _ pos2] by auto
+ have "(\<lambda>n. 1 / real (n + 1) * x ^ (n + 1)) ----> 0"
+ unfolding inverse_eq_divide Suc_plus1 by simp
+ then show ?thesis using pos2 by (rule LIMSEQ_linear)
next
case False hence "x = -1 \<or> x = 1" using `\<bar>x\<bar> \<le> 1` by auto
- hence n_eq: "\<And> n. x ^ (n * 2 + 1) = x" by auto
+ hence n_eq: "\<And> n. x ^ (n * 2 + 1) = x" unfolding One_nat_def by auto
from LIMSEQ_mult[OF LIMSEQ_inverse_real_of_nat[THEN LIMSEQ_linear, OF pos2, unfolded inverse_eq_divide] LIMSEQ_const[of x]]
- show ?thesis unfolding n_eq by auto
+ show ?thesis unfolding n_eq Suc_plus1 by auto
qed
qed
@@ -2989,7 +2997,7 @@
from `even n` obtain m where "2 * m = n" unfolding even_mult_two_ex by auto
from bounds[of m, unfolded this atLeastAtMost_iff]
have "\<bar>arctan x - (\<Sum>i = 0..<n. (?c x i))\<bar> \<le> (\<Sum>i = 0..<n + 1. (?c x i)) - (\<Sum>i = 0..<n. (?c x i))" by auto
- also have "\<dots> = ?c x n" by auto
+ also have "\<dots> = ?c x n" unfolding One_nat_def by auto
also have "\<dots> = ?a x n" unfolding sgn_pos a_pos by auto
finally show ?thesis .
next
@@ -2998,7 +3006,7 @@
hence m_plus: "2 * (m + 1) = n + 1" by auto
from bounds[of "m + 1", unfolded this atLeastAtMost_iff, THEN conjunct1] bounds[of m, unfolded m_def atLeastAtMost_iff, THEN conjunct2]
have "\<bar>arctan x - (\<Sum>i = 0..<n. (?c x i))\<bar> \<le> (\<Sum>i = 0..<n. (?c x i)) - (\<Sum>i = 0..<n+1. (?c x i))" by auto
- also have "\<dots> = - ?c x n" by auto
+ also have "\<dots> = - ?c x n" unfolding One_nat_def by auto
also have "\<dots> = ?a x n" unfolding sgn_neg a_pos by auto
finally show ?thesis .
qed
@@ -3011,7 +3019,9 @@
ultimately have "0 \<le> ?a 1 n - ?diff 1 n" by (rule LIM_less_bound)
hence "?diff 1 n \<le> ?a 1 n" by auto
}
- have "?a 1 ----> 0" unfolding LIMSEQ_rabs_zero power_one divide_inverse by (auto intro!: LIMSEQ_mult LIMSEQ_linear LIMSEQ_inverse_real_of_nat)
+ have "?a 1 ----> 0"
+ unfolding LIMSEQ_rabs_zero power_one divide_inverse One_nat_def
+ by (auto intro!: LIMSEQ_mult LIMSEQ_linear LIMSEQ_inverse_real_of_nat)
have "?diff 1 ----> 0"
proof (rule LIMSEQ_I)
fix r :: real assume "0 < r"
@@ -3031,7 +3041,7 @@
have "- (pi / 2) < 0" using pi_gt_zero by auto
have "- (2 * pi) < 0" using pi_gt_zero by auto
- have c_minus_minus: "\<And> i. ?c (- 1) i = - ?c 1 i" by auto
+ have c_minus_minus: "\<And> i. ?c (- 1) i = - ?c 1 i" unfolding One_nat_def by auto
have "arctan (- 1) = arctan (tan (-(pi / 4)))" unfolding tan_45 tan_minus ..
also have "\<dots> = - (pi / 4)" by (rule arctan_tan, auto simp add: order_less_trans[OF `- (pi / 2) < 0` pi_gt_zero])
@@ -3179,4 +3189,4 @@
apply (erule polar_ex2)
done
-end
+end
--- a/src/HOL/Transitive_Closure.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Transitive_Closure.thy Wed Mar 04 10:45:52 2009 +0100
@@ -64,8 +64,8 @@
subsection {* Reflexive closure *}
-lemma reflexive_reflcl[simp]: "reflexive(r^=)"
-by(simp add:refl_def)
+lemma refl_reflcl[simp]: "refl(r^=)"
+by(simp add:refl_on_def)
lemma antisym_reflcl[simp]: "antisym(r^=) = antisym r"
by(simp add:antisym_def)
@@ -118,8 +118,8 @@
rtrancl_induct[of "(ax,ay)" "(bx,by)", split_format (complete),
consumes 1, case_names refl step]
-lemma reflexive_rtrancl: "reflexive (r^*)"
- by (unfold refl_def) fast
+lemma refl_rtrancl: "refl (r^*)"
+by (unfold refl_on_def) fast
text {* Transitivity of transitive closure. *}
lemma trans_rtrancl: "trans (r^*)"
@@ -646,7 +646,7 @@
val trancl_rtrancl_trancl = @{thm trancl_rtrancl_trancl};
val rtrancl_trans = @{thm rtrancl_trans};
- fun decomp (Trueprop $ t) =
+ fun decomp (@{const Trueprop} $ t) =
let fun dec (Const ("op :", _) $ (Const ("Pair", _) $ a $ b) $ rel ) =
let fun decr (Const ("Transitive_Closure.rtrancl", _ ) $ r) = (r,"r*")
| decr (Const ("Transitive_Closure.trancl", _ ) $ r) = (r,"r+")
@@ -654,7 +654,8 @@
val (rel,r) = decr (Envir.beta_eta_contract rel);
in SOME (a,b,rel,r) end
| dec _ = NONE
- in dec t end;
+ in dec t end
+ | decomp _ = NONE;
end);
@@ -669,7 +670,7 @@
val trancl_rtrancl_trancl = @{thm tranclp_rtranclp_tranclp};
val rtrancl_trans = @{thm rtranclp_trans};
- fun decomp (Trueprop $ t) =
+ fun decomp (@{const Trueprop} $ t) =
let fun dec (rel $ a $ b) =
let fun decr (Const ("Transitive_Closure.rtranclp", _ ) $ r) = (r,"r*")
| decr (Const ("Transitive_Closure.tranclp", _ ) $ r) = (r,"r+")
@@ -677,7 +678,8 @@
val (rel,r) = decr rel;
in SOME (a, b, rel, r) end
| dec _ = NONE
- in dec t end;
+ in dec t end
+ | decomp _ = NONE;
end);
*}
--- a/src/HOL/UNITY/ListOrder.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/UNITY/ListOrder.thy Wed Mar 04 10:45:52 2009 +0100
@@ -90,16 +90,15 @@
subsection{*genPrefix is a partial order*}
-lemma refl_genPrefix: "reflexive r ==> reflexive (genPrefix r)"
-
-apply (unfold refl_def, auto)
+lemma refl_genPrefix: "refl r ==> refl (genPrefix r)"
+apply (unfold refl_on_def, auto)
apply (induct_tac "x")
prefer 2 apply (blast intro: genPrefix.prepend)
apply (blast intro: genPrefix.Nil)
done
-lemma genPrefix_refl [simp]: "reflexive r ==> (l,l) : genPrefix r"
-by (erule reflD [OF refl_genPrefix UNIV_I])
+lemma genPrefix_refl [simp]: "refl r ==> (l,l) : genPrefix r"
+by (erule refl_onD [OF refl_genPrefix UNIV_I])
lemma genPrefix_mono: "r<=s ==> genPrefix r <= genPrefix s"
apply clarify
@@ -178,8 +177,8 @@
done
lemma same_genPrefix_genPrefix [simp]:
- "reflexive r ==> ((xs@ys, xs@zs) : genPrefix r) = ((ys,zs) : genPrefix r)"
-apply (unfold refl_def)
+ "refl r ==> ((xs@ys, xs@zs) : genPrefix r) = ((ys,zs) : genPrefix r)"
+apply (unfold refl_on_def)
apply (induct_tac "xs")
apply (simp_all (no_asm_simp))
done
@@ -190,7 +189,7 @@
by (case_tac "xs", auto)
lemma genPrefix_take_append:
- "[| reflexive r; (xs,ys) : genPrefix r |]
+ "[| refl r; (xs,ys) : genPrefix r |]
==> (xs@zs, take (length xs) ys @ zs) : genPrefix r"
apply (erule genPrefix.induct)
apply (frule_tac [3] genPrefix_length_le)
@@ -198,7 +197,7 @@
done
lemma genPrefix_append_both:
- "[| reflexive r; (xs,ys) : genPrefix r; length xs = length ys |]
+ "[| refl r; (xs,ys) : genPrefix r; length xs = length ys |]
==> (xs@zs, ys @ zs) : genPrefix r"
apply (drule genPrefix_take_append, assumption)
apply (simp add: take_all)
@@ -210,7 +209,7 @@
by auto
lemma aolemma:
- "[| (xs,ys) : genPrefix r; reflexive r |]
+ "[| (xs,ys) : genPrefix r; refl r |]
==> length xs < length ys --> (xs @ [ys ! length xs], ys) : genPrefix r"
apply (erule genPrefix.induct)
apply blast
@@ -225,7 +224,7 @@
done
lemma append_one_genPrefix:
- "[| (xs,ys) : genPrefix r; length xs < length ys; reflexive r |]
+ "[| (xs,ys) : genPrefix r; length xs < length ys; refl r |]
==> (xs @ [ys ! length xs], ys) : genPrefix r"
by (blast intro: aolemma [THEN mp])
@@ -259,7 +258,7 @@
subsection{*The type of lists is partially ordered*}
-declare reflexive_Id [iff]
+declare refl_Id [iff]
antisym_Id [iff]
trans_Id [iff]
@@ -383,8 +382,8 @@
(** pfixLe **)
-lemma reflexive_Le [iff]: "reflexive Le"
-by (unfold refl_def Le_def, auto)
+lemma refl_Le [iff]: "refl Le"
+by (unfold refl_on_def Le_def, auto)
lemma antisym_Le [iff]: "antisym Le"
by (unfold antisym_def Le_def, auto)
@@ -406,8 +405,8 @@
apply (blast intro: genPrefix_mono [THEN [2] rev_subsetD])
done
-lemma reflexive_Ge [iff]: "reflexive Ge"
-by (unfold refl_def Ge_def, auto)
+lemma refl_Ge [iff]: "refl Ge"
+by (unfold refl_on_def Ge_def, auto)
lemma antisym_Ge [iff]: "antisym Ge"
by (unfold antisym_def Ge_def, auto)
--- a/src/HOL/UNITY/ProgressSets.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/UNITY/ProgressSets.thy Wed Mar 04 10:45:52 2009 +0100
@@ -344,8 +344,8 @@
apply (blast intro: clD cl_in_lattice)
done
-lemma refl_relcl: "lattice L ==> refl UNIV (relcl L)"
-by (simp add: reflI relcl_def subset_cl [THEN subsetD])
+lemma refl_relcl: "lattice L ==> refl (relcl L)"
+by (simp add: refl_onI relcl_def subset_cl [THEN subsetD])
lemma trans_relcl: "lattice L ==> trans (relcl L)"
by (blast intro: relcl_trans transI)
@@ -362,12 +362,12 @@
text{*Equation (4.71) of Meier's thesis. He gives no proof.*}
lemma cl_latticeof:
- "[|refl UNIV r; trans r|]
+ "[|refl r; trans r|]
==> cl (latticeof r) X = {t. \<exists>s. s\<in>X & (s,t) \<in> r}"
apply (rule equalityI)
apply (rule cl_least)
apply (simp (no_asm_use) add: latticeof_def trans_def, blast)
- apply (simp add: latticeof_def refl_def, blast)
+ apply (simp add: latticeof_def refl_on_def, blast)
apply (simp add: latticeof_def, clarify)
apply (unfold cl_def, blast)
done
@@ -400,7 +400,7 @@
done
theorem relcl_latticeof_eq:
- "[|refl UNIV r; trans r|] ==> relcl (latticeof r) = r"
+ "[|refl r; trans r|] ==> relcl (latticeof r) = r"
by (simp add: relcl_def cl_latticeof)
--- a/src/HOL/UNITY/UNITY.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/UNITY/UNITY.thy Wed Mar 04 10:45:52 2009 +0100
@@ -359,7 +359,7 @@
constdefs
totalize_act :: "('a * 'a)set => ('a * 'a)set"
- "totalize_act act == act \<union> diag (-(Domain act))"
+ "totalize_act act == act \<union> Id_on (-(Domain act))"
totalize :: "'a program => 'a program"
"totalize F == mk_program (Init F,
--- a/src/HOL/Word/BinGeneral.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Word/BinGeneral.thy Wed Mar 04 10:45:52 2009 +0100
@@ -433,7 +433,7 @@
"!!w. sbintrunc n w = ((w + 2 ^ n) mod 2 ^ (Suc n) - 2 ^ n :: int)"
apply (induct n)
apply clarsimp
- apply (subst zmod_zadd_left_eq)
+ apply (subst mod_add_left_eq)
apply (simp add: bin_last_mod)
apply (simp add: number_of_eq)
apply clarsimp
@@ -767,23 +767,23 @@
lemmas zpower_zmod' = zpower_zmod [where m="c" and y="k", standard]
lemmas brdmod1s' [symmetric] =
- zmod_zadd_left_eq zmod_zadd_right_eq
+ mod_add_left_eq mod_add_right_eq
zmod_zsub_left_eq zmod_zsub_right_eq
zmod_zmult1_eq zmod_zmult1_eq_rev
lemmas brdmods' [symmetric] =
zpower_zmod' [symmetric]
- trans [OF zmod_zadd_left_eq zmod_zadd_right_eq]
+ trans [OF mod_add_left_eq mod_add_right_eq]
trans [OF zmod_zsub_left_eq zmod_zsub_right_eq]
trans [OF zmod_zmult1_eq zmod_zmult1_eq_rev]
zmod_uminus' [symmetric]
- zmod_zadd_left_eq [where b = "1"]
+ mod_add_left_eq [where b = "1::int"]
zmod_zsub_left_eq [where b = "1"]
lemmas bintr_arith1s =
- brdmod1s' [where c="2^n", folded pred_def succ_def bintrunc_mod2p, standard]
+ brdmod1s' [where c="2^n::int", folded pred_def succ_def bintrunc_mod2p, standard]
lemmas bintr_ariths =
- brdmods' [where c="2^n", folded pred_def succ_def bintrunc_mod2p, standard]
+ brdmods' [where c="2^n::int", folded pred_def succ_def bintrunc_mod2p, standard]
lemmas m2pths = pos_mod_sign pos_mod_bound [OF zless2p, standard]
--- a/src/HOL/Word/Num_Lemmas.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Word/Num_Lemmas.thy Wed Mar 04 10:45:52 2009 +0100
@@ -95,7 +95,7 @@
lemma z1pdiv2:
"(2 * b + 1) div 2 = (b::int)" by arith
-lemmas zdiv_le_dividend = xtr3 [OF zdiv_1 [symmetric] zdiv_mono2,
+lemmas zdiv_le_dividend = xtr3 [OF div_by_1 [symmetric] zdiv_mono2,
simplified int_one_le_iff_zero_less, simplified, standard]
lemma axxbyy:
@@ -127,12 +127,12 @@
lemma zmod_zsub_right_eq: "((a::int) - b) mod c = (a - b mod c) mod c"
apply (unfold diff_int_def)
- apply (rule trans [OF _ zmod_zadd_right_eq [symmetric]])
- apply (simp add : zmod_uminus zmod_zadd_right_eq [symmetric])
+ apply (rule trans [OF _ mod_add_right_eq [symmetric]])
+ apply (simp add : zmod_uminus mod_add_right_eq [symmetric])
done
lemma zmod_zsub_left_eq: "((a::int) - b) mod c = (a mod c - b) mod c"
- by (rule zmod_zadd_left_eq [where b = "- b", simplified diff_int_def [symmetric]])
+ by (rule mod_add_left_eq [where b = "- b", simplified diff_int_def [symmetric]])
lemma zmod_zsub_self [simp]:
"((b :: int) - a) mod a = b mod a"
@@ -146,8 +146,8 @@
done
lemmas rdmods [symmetric] = zmod_uminus [symmetric]
- zmod_zsub_left_eq zmod_zsub_right_eq zmod_zadd_left_eq
- zmod_zadd_right_eq zmod_zmult1_eq zmod_zmult1_eq_rev
+ zmod_zsub_left_eq zmod_zsub_right_eq mod_add_left_eq
+ mod_add_right_eq zmod_zmult1_eq zmod_zmult1_eq_rev
lemma mod_plus_right:
"((a + x) mod m = (b + x) mod m) = (a mod m = b mod (m :: nat))"
@@ -169,7 +169,8 @@
lemmas push_mods = push_mods' [THEN eq_reflection, standard]
lemmas pull_mods = push_mods [symmetric] rdmods [THEN eq_reflection, standard]
lemmas mod_simps =
- zmod_zmult_self1 [THEN eq_reflection] zmod_zmult_self2 [THEN eq_reflection]
+ mod_mult_self2_is_0 [THEN eq_reflection]
+ mod_mult_self1_is_0 [THEN eq_reflection]
mod_mod_trivial [THEN eq_reflection]
lemma nat_mod_eq:
@@ -259,7 +260,7 @@
(** Rep_Integ **)
lemma eqne: "equiv A r ==> X : A // r ==> X ~= {}"
- unfolding equiv_def refl_def quotient_def Image_def by auto
+ unfolding equiv_def refl_on_def quotient_def Image_def by auto
lemmas Rep_Integ_ne = Integ.Rep_Integ
[THEN equiv_intrel [THEN eqne, simplified Integ_def [symmetric]], standard]
@@ -313,7 +314,7 @@
"a > 1 ==> a ^ n mod a ^ m = (if m <= n then 0 else (a :: int) ^ n)"
apply clarsimp
apply safe
- apply (simp add: zdvd_iff_zmod_eq_0 [symmetric])
+ apply (simp add: dvd_eq_mod_eq_0 [symmetric])
apply (drule le_iff_add [THEN iffD1])
apply (force simp: zpower_zadd_distrib)
apply (rule mod_pos_pos_trivial)
--- a/src/HOL/Word/WordGenLib.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Word/WordGenLib.thy Wed Mar 04 10:45:52 2009 +0100
@@ -273,7 +273,7 @@
have x: "2^len_of TYPE('a) - i = -i + 2^len_of TYPE('a)" by simp
show ?thesis
apply (subst x)
- apply (subst word_uint.Abs_norm [symmetric], subst zmod_zadd_self2)
+ apply (subst word_uint.Abs_norm [symmetric], subst mod_add_self2)
apply simp
done
qed
--- a/src/HOL/Word/WordShift.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/Word/WordShift.thy Wed Mar 04 10:45:52 2009 +0100
@@ -530,7 +530,7 @@
done
lemma and_mask_dvd: "2 ^ n dvd uint w = (w AND mask n = 0)"
- apply (simp add: zdvd_iff_zmod_eq_0 and_mask_mod_2p)
+ apply (simp add: dvd_eq_mod_eq_0 and_mask_mod_2p)
apply (simp add: word_uint.norm_eq_iff [symmetric] word_of_int_homs)
apply (subst word_uint.norm_Rep [symmetric])
apply (simp only: bintrunc_bintrunc_min bintrunc_mod2p [symmetric] min_def)
--- a/src/HOL/ZF/Games.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ZF/Games.thy Wed Mar 04 10:45:52 2009 +0100
@@ -847,7 +847,7 @@
by (auto simp add: quotient_def)
lemma equiv_eq_game[simp]: "equiv UNIV eq_game_rel"
- by (auto simp add: equiv_def refl_def sym_def trans_def eq_game_rel_def
+ by (auto simp add: equiv_def refl_on_def sym_def trans_def eq_game_rel_def
eq_game_sym intro: eq_game_refl eq_game_trans)
instantiation Pg :: "{ord, zero, plus, minus, uminus}"
--- a/src/HOL/ex/ApproximationEx.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/ApproximationEx.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,7 @@
-(* Title: HOL/ex/ApproximationEx.thy
- Author: Johannes Hoelzl <hoelzl@in.tum.de> 2009
+(* Title: HOL/ex/ApproximationEx.thy
+ Author: Johannes Hoelzl <hoelzl@in.tum.de> 2009
*)
+
theory ApproximationEx
imports "~~/src/HOL/Reflection/Approximation"
begin
--- a/src/HOL/ex/Eval_Examples.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/Eval_Examples.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,4 @@
-(* ID: $Id$
- Author: Florian Haftmann, TU Muenchen
-*)
+(* Author: Florian Haftmann, TU Muenchen *)
header {* Small examples for evaluation mechanisms *}
--- a/src/HOL/ex/Numeral.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/Numeral.thy Wed Mar 04 10:45:52 2009 +0100
@@ -157,6 +157,18 @@
by (simp_all add: num_eq_iff nat_of_num_add nat_of_num_mult
left_distrib right_distrib)
+lemma Dig_eq:
+ "One = One \<longleftrightarrow> True"
+ "One = Dig0 n \<longleftrightarrow> False"
+ "One = Dig1 n \<longleftrightarrow> False"
+ "Dig0 m = One \<longleftrightarrow> False"
+ "Dig1 m = One \<longleftrightarrow> False"
+ "Dig0 m = Dig0 n \<longleftrightarrow> m = n"
+ "Dig0 m = Dig1 n \<longleftrightarrow> False"
+ "Dig1 m = Dig0 n \<longleftrightarrow> False"
+ "Dig1 m = Dig1 n \<longleftrightarrow> m = n"
+ by simp_all
+
lemma less_eq_num_code [numeral, simp, code]:
"One \<le> n \<longleftrightarrow> True"
"Dig0 m \<le> One \<longleftrightarrow> False"
@@ -433,21 +445,12 @@
text {* Could be perhaps more general than here. *}
-lemma (in ordered_semidom) of_num_pos: "0 < of_num n"
-proof -
- have "(0::nat) < of_num n"
- by (induct n) (simp_all add: semiring_numeral_class.of_num.simps)
- then have "of_nat 0 \<noteq> of_nat (of_num n)"
- by (cases n) (simp_all only: semiring_numeral_class.of_num.simps of_nat_eq_iff)
- then have "0 \<noteq> of_num n"
- by (simp add: of_nat_of_num)
- moreover have "0 \<le> of_nat (of_num n)" by simp
- ultimately show ?thesis by (simp add: of_nat_of_num)
-qed
-
context ordered_semidom
begin
+lemma of_num_pos [numeral]: "0 < of_num n"
+ by (induct n) (simp_all add: of_num.simps add_pos_pos)
+
lemma of_num_less_eq_iff [numeral]: "of_num m \<le> of_num n \<longleftrightarrow> m \<le> n"
proof -
have "of_nat (of_num m) \<le> of_nat (of_num n) \<longleftrightarrow> m \<le> n"
@@ -490,6 +493,68 @@
then show ?thesis by (simp add: of_num_one)
qed
+lemma of_num_nonneg [numeral]: "0 \<le> of_num n"
+ by (induct n) (simp_all add: of_num.simps add_nonneg_nonneg)
+
+lemma of_num_less_zero_iff [numeral]: "\<not> of_num n < 0"
+ by (simp add: not_less of_num_nonneg)
+
+lemma of_num_le_zero_iff [numeral]: "\<not> of_num n \<le> 0"
+ by (simp add: not_le of_num_pos)
+
+end
+
+context ordered_idom
+begin
+
+lemma minus_of_num_less_of_num_iff [numeral]: "- of_num m < of_num n"
+proof -
+ have "- of_num m < 0" by (simp add: of_num_pos)
+ also have "0 < of_num n" by (simp add: of_num_pos)
+ finally show ?thesis .
+qed
+
+lemma minus_of_num_less_one_iff [numeral]: "- of_num n < 1"
+proof -
+ have "- of_num n < 0" by (simp add: of_num_pos)
+ also have "0 < 1" by simp
+ finally show ?thesis .
+qed
+
+lemma minus_one_less_of_num_iff [numeral]: "- 1 < of_num n"
+proof -
+ have "- 1 < 0" by simp
+ also have "0 < of_num n" by (simp add: of_num_pos)
+ finally show ?thesis .
+qed
+
+lemma minus_of_num_le_of_num_iff [numeral]: "- of_num m \<le> of_num n"
+ by (simp add: less_imp_le minus_of_num_less_of_num_iff)
+
+lemma minus_of_num_le_one_iff [numeral]: "- of_num n \<le> 1"
+ by (simp add: less_imp_le minus_of_num_less_one_iff)
+
+lemma minus_one_le_of_num_iff [numeral]: "- 1 \<le> of_num n"
+ by (simp add: less_imp_le minus_one_less_of_num_iff)
+
+lemma of_num_le_minus_of_num_iff [numeral]: "\<not> of_num m \<le> - of_num n"
+ by (simp add: not_le minus_of_num_less_of_num_iff)
+
+lemma one_le_minus_of_num_iff [numeral]: "\<not> 1 \<le> - of_num n"
+ by (simp add: not_le minus_of_num_less_one_iff)
+
+lemma of_num_le_minus_one_iff [numeral]: "\<not> of_num n \<le> - 1"
+ by (simp add: not_le minus_one_less_of_num_iff)
+
+lemma of_num_less_minus_of_num_iff [numeral]: "\<not> of_num m < - of_num n"
+ by (simp add: not_less minus_of_num_le_of_num_iff)
+
+lemma one_less_minus_of_num_iff [numeral]: "\<not> 1 < - of_num n"
+ by (simp add: not_less minus_of_num_le_one_iff)
+
+lemma of_num_less_minus_one_iff [numeral]: "\<not> of_num n < - 1"
+ by (simp add: not_less minus_one_le_of_num_iff)
+
end
subsubsection {*
--- a/src/HOL/ex/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -93,4 +93,5 @@
use_thy "Sudoku"
else ();
-HTML.with_charset "utf-8" (no_document use_thys) ["Hebrew", "Chinese"];
+HTML.with_charset "utf-8" (no_document use_thys)
+ ["Hebrew", "Chinese", "Serbian"];
--- a/src/HOL/ex/Tarski.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/Tarski.thy Wed Mar 04 10:45:52 2009 +0100
@@ -73,7 +73,7 @@
definition
PartialOrder :: "('a potype) set" where
- "PartialOrder = {P. refl (pset P) (order P) & antisym (order P) &
+ "PartialOrder = {P. refl_on (pset P) (order P) & antisym (order P) &
trans (order P)}"
definition
@@ -158,7 +158,7 @@
unfolding PartialOrder_def dual_def
by auto
-lemma (in PO) PO_imp_refl [simp]: "refl A r"
+lemma (in PO) PO_imp_refl_on [simp]: "refl_on A r"
apply (insert cl_po)
apply (simp add: PartialOrder_def A_def r_def)
done
@@ -175,7 +175,7 @@
lemma (in PO) reflE: "x \<in> A ==> (x, x) \<in> r"
apply (insert cl_po)
-apply (simp add: PartialOrder_def refl_def A_def r_def)
+apply (simp add: PartialOrder_def refl_on_def A_def r_def)
done
lemma (in PO) antisymE: "[| (a, b) \<in> r; (b, a) \<in> r |] ==> a = b"
@@ -198,7 +198,7 @@
apply (simp (no_asm) add: PartialOrder_def)
apply auto
-- {* refl *}
-apply (simp add: refl_def induced_def)
+apply (simp add: refl_on_def induced_def)
apply (blast intro: reflE)
-- {* antisym *}
apply (simp add: antisym_def induced_def)
@@ -235,7 +235,7 @@
lemma (in PO) dualPO: "dual cl \<in> PartialOrder"
apply (insert cl_po)
-apply (simp add: PartialOrder_def dual_def refl_converse
+apply (simp add: PartialOrder_def dual_def refl_on_converse
trans_converse antisym_converse)
done
@@ -266,8 +266,8 @@
declare CL_imp_PO [THEN PO.PO_imp_sym, simp]
declare CL_imp_PO [THEN PO.PO_imp_trans, simp]*)
-lemma (in CL) CO_refl: "refl A r"
-by (rule PO_imp_refl)
+lemma (in CL) CO_refl_on: "refl_on A r"
+by (rule PO_imp_refl_on)
lemma (in CL) CO_antisym: "antisym r"
by (rule PO_imp_sym)
@@ -533,7 +533,7 @@
lemma (in CLF) fix_in_H:
"[| H = {x. (x, f x) \<in> r & x \<in> A}; x \<in> P |] ==> x \<in> H"
-by (simp add: P_def fix_imp_eq [of _ f A] reflE CO_refl
+by (simp add: P_def fix_imp_eq [of _ f A] reflE CO_refl_on
fix_subset [of f A, THEN subsetD])
lemma (in CLF) fixf_le_lubH:
@@ -583,8 +583,8 @@
subsection {* interval *}
lemma (in CLF) rel_imp_elem: "(x, y) \<in> r ==> x \<in> A"
-apply (insert CO_refl)
-apply (simp add: refl_def, blast)
+apply (insert CO_refl_on)
+apply (simp add: refl_on_def, blast)
done
lemma (in CLF) interval_subset: "[| a \<in> A; b \<in> A |] ==> interval r a b \<subseteq> A"
@@ -754,7 +754,7 @@
apply (rule notI)
apply (drule_tac a = "Top cl" in equals0D)
apply (simp add: interval_def)
-apply (simp add: refl_def Top_in_lattice Top_prop)
+apply (simp add: refl_on_def Top_in_lattice Top_prop)
done
lemma (in CLF) Bot_intv_not_empty: "x \<in> A ==> interval r (Bot cl) x \<noteq> {}"
--- a/src/HOL/ex/ThreeDivides.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOL/ex/ThreeDivides.thy Wed Mar 04 10:45:52 2009 +0100
@@ -187,9 +187,8 @@
"nd = nlen (m div 10) \<Longrightarrow>
m div 10 = (\<Sum>x<nd. m div 10 div 10^x mod 10 * 10^x)"
by blast
- have "\<exists>c. m = 10*(m div 10) + c \<and> c < 10" by presburger
- then obtain c where mexp: "m = 10*(m div 10) + c \<and> c < 10" ..
- then have cdef: "c = m mod 10" by arith
+ obtain c where mexp: "m = 10*(m div 10) + c \<and> c < 10"
+ and cdef: "c = m mod 10" by simp
show "m = (\<Sum>x<nlen m. m div 10^x mod 10 * 10^x)"
proof -
from `Suc nd = nlen m`
--- a/src/HOLCF/ConvexPD.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/ConvexPD.thy Wed Mar 04 10:45:52 2009 +0100
@@ -291,22 +291,26 @@
apply (simp add: PDPlus_commute)
done
-lemma convex_plus_absorb: "xs +\<natural> xs = xs"
+lemma convex_plus_absorb [simp]: "xs +\<natural> xs = xs"
apply (induct xs rule: convex_pd.principal_induct, simp)
apply (simp add: PDPlus_absorb)
done
-interpretation aci_convex_plus!: ab_semigroup_idem_mult "op +\<natural>"
- proof qed (rule convex_plus_assoc convex_plus_commute convex_plus_absorb)+
+lemma convex_plus_left_commute: "xs +\<natural> (ys +\<natural> zs) = ys +\<natural> (xs +\<natural> zs)"
+by (rule mk_left_commute
+ [of "op +\<natural>", OF convex_plus_assoc convex_plus_commute])
-lemma convex_plus_left_commute: "xs +\<natural> (ys +\<natural> zs) = ys +\<natural> (xs +\<natural> zs)"
-by (rule aci_convex_plus.mult_left_commute)
+lemma convex_plus_left_absorb [simp]: "xs +\<natural> (xs +\<natural> ys) = xs +\<natural> ys"
+by (simp only: convex_plus_assoc [symmetric] convex_plus_absorb)
-lemma convex_plus_left_absorb: "xs +\<natural> (xs +\<natural> ys) = xs +\<natural> ys"
-by (rule aci_convex_plus.mult_left_idem)
-(*
-lemmas convex_plus_aci = aci_convex_plus.mult_ac_idem
-*)
+text {* Useful for @{text "simp add: convex_plus_ac"} *}
+lemmas convex_plus_ac =
+ convex_plus_assoc convex_plus_commute convex_plus_left_commute
+
+text {* Useful for @{text "simp only: convex_plus_aci"} *}
+lemmas convex_plus_aci =
+ convex_plus_ac convex_plus_absorb convex_plus_left_absorb
+
lemma convex_unit_less_plus_iff [simp]:
"{x}\<natural> \<sqsubseteq> ys +\<natural> zs \<longleftrightarrow> {x}\<natural> \<sqsubseteq> ys \<and> {x}\<natural> \<sqsubseteq> zs"
apply (rule iffI)
@@ -413,7 +417,7 @@
apply unfold_locales
apply (simp add: convex_plus_assoc)
apply (simp add: convex_plus_commute)
-apply (simp add: convex_plus_absorb eta_cfun)
+apply (simp add: eta_cfun)
done
lemma convex_bind_basis_simps [simp]:
--- a/src/HOLCF/Fixrec.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/Fixrec.thy Wed Mar 04 10:45:52 2009 +0100
@@ -583,6 +583,20 @@
use "Tools/fixrec_package.ML"
+setup {* FixrecPackage.setup *}
+
+setup {*
+ FixrecPackage.add_matchers
+ [ (@{const_name up}, @{const_name match_up}),
+ (@{const_name sinl}, @{const_name match_sinl}),
+ (@{const_name sinr}, @{const_name match_sinr}),
+ (@{const_name spair}, @{const_name match_spair}),
+ (@{const_name cpair}, @{const_name match_cpair}),
+ (@{const_name ONE}, @{const_name match_ONE}),
+ (@{const_name TT}, @{const_name match_TT}),
+ (@{const_name FF}, @{const_name match_FF}) ]
+*}
+
hide (open) const return bind fail run cases
end
--- a/src/HOLCF/IsaMakefile Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/IsaMakefile Wed Mar 04 10:45:52 2009 +0100
@@ -89,6 +89,7 @@
$(LOG)/HOLCF-ex.gz: $(OUT)/HOLCF ex/Stream.thy ex/Dagstuhl.thy \
ex/Dnat.thy ex/Fix2.thy ex/Focus_ex.thy ex/Hoare.thy ex/Loop.thy \
+ ex/Powerdomain_ex.thy \
ex/ROOT.ML ex/Fixrec_ex.thy ../HOL/Library/Nat_Infinity.thy
@$(ISABELLE_TOOL) usedir $(OUT)/HOLCF ex
--- a/src/HOLCF/LowerPD.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/LowerPD.thy Wed Mar 04 10:45:52 2009 +0100
@@ -245,22 +245,25 @@
apply (simp add: PDPlus_commute)
done
-lemma lower_plus_absorb: "xs +\<flat> xs = xs"
+lemma lower_plus_absorb [simp]: "xs +\<flat> xs = xs"
apply (induct xs rule: lower_pd.principal_induct, simp)
apply (simp add: PDPlus_absorb)
done
-interpretation aci_lower_plus!: ab_semigroup_idem_mult "op +\<flat>"
- proof qed (rule lower_plus_assoc lower_plus_commute lower_plus_absorb)+
+lemma lower_plus_left_commute: "xs +\<flat> (ys +\<flat> zs) = ys +\<flat> (xs +\<flat> zs)"
+by (rule mk_left_commute [of "op +\<flat>", OF lower_plus_assoc lower_plus_commute])
-lemma lower_plus_left_commute: "xs +\<flat> (ys +\<flat> zs) = ys +\<flat> (xs +\<flat> zs)"
-by (rule aci_lower_plus.mult_left_commute)
+lemma lower_plus_left_absorb [simp]: "xs +\<flat> (xs +\<flat> ys) = xs +\<flat> ys"
+by (simp only: lower_plus_assoc [symmetric] lower_plus_absorb)
-lemma lower_plus_left_absorb: "xs +\<flat> (xs +\<flat> ys) = xs +\<flat> ys"
-by (rule aci_lower_plus.mult_left_idem)
-(*
-lemmas lower_plus_aci = aci_lower_plus.mult_ac_idem
-*)
+text {* Useful for @{text "simp add: lower_plus_ac"} *}
+lemmas lower_plus_ac =
+ lower_plus_assoc lower_plus_commute lower_plus_left_commute
+
+text {* Useful for @{text "simp only: lower_plus_aci"} *}
+lemmas lower_plus_aci =
+ lower_plus_ac lower_plus_absorb lower_plus_left_absorb
+
lemma lower_plus_less1: "xs \<sqsubseteq> xs +\<flat> ys"
apply (induct xs ys rule: lower_pd.principal_induct2, simp, simp)
apply (simp add: PDPlus_lower_less)
@@ -315,14 +318,8 @@
lower_plus_less_iff
lower_unit_less_plus_iff
-lemma fooble:
- fixes f :: "'a::po \<Rightarrow> 'b::po"
- assumes f: "\<And>x y. f x \<sqsubseteq> f y \<longleftrightarrow> x \<sqsubseteq> y"
- shows "f x = f y \<longleftrightarrow> x = y"
-unfolding po_eq_conv by (simp add: f)
-
lemma lower_unit_eq_iff [simp]: "{x}\<flat> = {y}\<flat> \<longleftrightarrow> x = y"
-by (rule lower_unit_less_iff [THEN fooble])
+by (simp add: po_eq_conv)
lemma lower_unit_strict [simp]: "{\<bottom>}\<flat> = \<bottom>"
unfolding inst_lower_pd_pcpo Rep_compact_bot [symmetric] by simp
@@ -399,7 +396,7 @@
apply unfold_locales
apply (simp add: lower_plus_assoc)
apply (simp add: lower_plus_commute)
-apply (simp add: lower_plus_absorb eta_cfun)
+apply (simp add: eta_cfun)
done
lemma lower_bind_basis_simps [simp]:
--- a/src/HOLCF/Tools/domain/domain_axioms.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/Tools/domain/domain_axioms.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOLCF/Tools/domain/domain_axioms.ML
- ID: $Id$
Author: David von Oheimb
Syntax generator for domain command.
@@ -29,7 +28,7 @@
val rep_iso_ax = ("rep_iso", mk_trp(dc_abs`(dc_rep`%x_name') === %:x_name'));
val when_def = ("when_def",%%:(dname^"_when") ==
- foldr (uncurry /\ ) (/\x_name'((when_body cons (fn (x,y) =>
+ List.foldr (uncurry /\ ) (/\x_name'((when_body cons (fn (x,y) =>
Bound(1+length cons+x-y)))`(dc_rep`Bound 0))) (when_funs cons));
val copy_def = let
@@ -37,9 +36,9 @@
then (cproj (Bound z) eqs (rec_of arg))`Bound(z-x)
else Bound(z-x);
fun one_con (con,args) =
- foldr /\# (list_ccomb (%%:con, mapn (idxs (length args)) 1 args)) args;
+ List.foldr /\# (list_ccomb (%%:con, mapn (idxs (length args)) 1 args)) args;
in ("copy_def", %%:(dname^"_copy") ==
- /\"f" (list_ccomb (%%:(dname^"_when"), map one_con cons))) end;
+ /\ "f" (list_ccomb (%%:(dname^"_when"), map one_con cons))) end;
(* -- definitions concerning the constructors, discriminators and selectors - *)
@@ -49,7 +48,7 @@
fun inj y 1 _ = y
| inj y _ 0 = mk_sinl y
| inj y i j = mk_sinr (inj y (i-1) (j-1));
- in foldr /\# (dc_abs`(inj (parms args) m n)) args end;
+ in List.foldr /\# (dc_abs`(inj (parms args) m n)) args end;
val con_defs = mapn (fn n => fn (con,args) =>
(extern_name con ^"_def", %%:con == con_def (length cons) n (con,args))) 0 cons;
@@ -57,14 +56,14 @@
val dis_defs = let
fun ddef (con,_) = (dis_name con ^"_def",%%:(dis_name con) ==
list_ccomb(%%:(dname^"_when"),map
- (fn (con',args) => (foldr /\#
+ (fn (con',args) => (List.foldr /\#
(if con'=con then TT else FF) args)) cons))
in map ddef cons end;
val mat_defs = let
fun mdef (con,_) = (mat_name con ^"_def",%%:(mat_name con) ==
list_ccomb(%%:(dname^"_when"),map
- (fn (con',args) => (foldr /\#
+ (fn (con',args) => (List.foldr /\#
(if con'=con
then mk_return (mk_ctuple (map (bound_arg args) args))
else mk_fail) args)) cons))
@@ -79,7 +78,7 @@
val r = Bound (length args);
val rhs = case args of [] => mk_return HOLogic.unit
| _ => mk_ctuple_pat ps ` mk_ctuple xs;
- fun one_con (con',args') = foldr /\# (if con'=con then rhs else mk_fail) args';
+ fun one_con (con',args') = List.foldr /\# (if con'=con then rhs else mk_fail) args';
in (pat_name con ^"_def", list_comb (%%:(pat_name con), ps) ==
list_ccomb(%%:(dname^"_when"), map one_con cons))
end
@@ -89,7 +88,7 @@
fun sdef con n arg = Option.map (fn sel => (sel^"_def",%%:sel ==
list_ccomb(%%:(dname^"_when"),map
(fn (con',args) => if con'<>con then UU else
- foldr /\# (Bound (length args - n)) args) cons))) (sel_of arg);
+ List.foldr /\# (Bound (length args - n)) args) cons))) (sel_of arg);
in List.mapPartial I (List.concat(map (fn (con,args) => mapn (sdef con) 1 args) cons)) end;
@@ -107,7 +106,7 @@
[when_def, copy_def] @
con_defs @ dis_defs @ mat_defs @ pat_defs @ sel_defs @
[take_def, finite_def])
-end; (* let *)
+end; (* let (calc_axioms) *)
fun infer_props thy = map (apsnd (FixrecPackage.legacy_infer_prop thy));
@@ -117,6 +116,14 @@
fun add_defs_i x = snd o (PureThy.add_defs false) (map (Thm.no_attributes o apfst Binding.name) x);
fun add_defs_infer defs thy = add_defs_i (infer_props thy defs) thy;
+fun add_matchers (((dname,_),cons) : eq) thy =
+ let
+ val con_names = map fst cons;
+ val mat_names = map mat_name con_names;
+ fun qualify n = Sign.full_name thy (Binding.name n);
+ val ms = map qualify con_names ~~ map qualify mat_names;
+ in FixrecPackage.add_matchers ms thy end;
+
in (* local *)
fun add_axioms (comp_dnam, eqs : eq list) thy' = let
@@ -125,7 +132,7 @@
val x_name = idx_name dnames "x";
fun copy_app dname = %%:(dname^"_copy")`Bound 0;
val copy_def = ("copy_def" , %%:(comp_dname^"_copy") ==
- /\"f"(mk_ctuple (map copy_app dnames)));
+ /\ "f"(mk_ctuple (map copy_app dnames)));
val bisim_def = ("bisim_def",%%:(comp_dname^"_bisim")==mk_lam("R",
let
fun one_con (con,args) = let
@@ -144,11 +151,11 @@
(allargs~~((allargs_cnt-1) downto 0)));
fun rel_app i ra = proj (Bound(allargs_cnt+2)) eqs (rec_of ra) $
Bound (2*recs_cnt-i) $ Bound (recs_cnt-i);
- val capps = foldr mk_conj (mk_conj(
+ val capps = List.foldr mk_conj (mk_conj(
Bound(allargs_cnt+1)===list_ccomb(%%:con,map (bound_arg allvns) vns1),
Bound(allargs_cnt+0)===list_ccomb(%%:con,map (bound_arg allvns) vns2)))
(mapn rel_app 1 rec_args);
- in foldr mk_ex (Library.foldr mk_conj
+ in List.foldr mk_ex (Library.foldr mk_conj
(map (defined o Bound) nonlazy_idxs,capps)) allvns end;
fun one_comp n (_,cons) =mk_all(x_name(n+1),mk_all(x_name(n+1)^"'",mk_imp(
proj (Bound 2) eqs n $ Bound 1 $ Bound 0,
@@ -164,7 +171,8 @@
in thy |> Sign.add_path comp_dnam
|> add_defs_infer (bisim_def::(if length eqs>1 then [copy_def] else []))
|> Sign.parent_path
-end;
+ |> fold add_matchers eqs
+end; (* let (add_axioms) *)
end; (* local *)
end; (* struct *)
--- a/src/HOLCF/Tools/domain/domain_library.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/Tools/domain/domain_library.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOLCF/Tools/domain/domain_library.ML
- ID: $Id$
Author: David von Oheimb
Library for domain command.
@@ -15,7 +14,7 @@
| itr [a] = f2 a
| itr (a::l) = f(a, itr l)
in itr l end;
-fun map_cumulr f start xs = foldr (fn (x,(ys,res))=>case f(x,res) of (y,res2) =>
+fun map_cumulr f start xs = List.foldr (fn (x,(ys,res))=>case f(x,res) of (y,res2) =>
(y::ys,res2)) ([],start) xs;
--- a/src/HOLCF/Tools/domain/domain_syntax.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/Tools/domain/domain_syntax.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOLCF/Tools/domain/domain_syntax.ML
- ID: $Id$
Author: David von Oheimb
Syntax generator for domain command.
@@ -22,14 +21,14 @@
else foldr1 mk_sprodT (map opt_lazy args);
fun freetvar s = let val tvar = mk_TFree s in
if tvar mem typevars then freetvar ("t"^s) else tvar end;
- fun when_type (_ ,_,args) = foldr (op ->>) (freetvar "t") (map third args);
+ fun when_type (_ ,_,args) = List.foldr (op ->>) (freetvar "t") (map third args);
in
val dtype = Type(dname,typevars);
val dtype2 = foldr1 mk_ssumT (map prod cons');
val dnam = Sign.base_name dname;
val const_rep = (dnam^"_rep" , dtype ->> dtype2, NoSyn);
val const_abs = (dnam^"_abs" , dtype2 ->> dtype , NoSyn);
- val const_when = (dnam^"_when",foldr (op ->>) (dtype ->> freetvar "t") (map when_type cons'), NoSyn);
+ val const_when = (dnam^"_when", List.foldr (op ->>) (dtype ->> freetvar "t") (map when_type cons'), NoSyn);
val const_copy = (dnam^"_copy", dtypeprod ->> dtype ->> dtype , NoSyn);
end;
@@ -41,7 +40,7 @@
else c::esc cs
| esc [] = []
in implode o esc o Symbol.explode end;
- fun con (name,s,args) = (name,foldr (op ->>) dtype (map third args),s);
+ fun con (name,s,args) = (name, List.foldr (op ->>) dtype (map third args),s);
fun dis (con ,s,_ ) = (dis_name_ con, dtype->>trT,
Mixfix(escape ("is_" ^ con), [], Syntax.max_pri));
(* strictly speaking, these constants have one argument,
@@ -86,7 +85,7 @@
val capp = app "Rep_CFun";
fun con1 n (con,mx,args) = Library.foldl capp (c_ast con mx, argvars n args);
fun case1 n (con,mx,args) = app "_case1" (con1 n (con,mx,args), expvar n);
- fun arg1 n (con,_,args) = foldr cabs (expvar n) (argvars n args);
+ fun arg1 n (con,_,args) = List.foldr cabs (expvar n) (argvars n args);
fun when1 n m = if n = m then arg1 n else K (Constant "UU");
fun app_var x = mk_appl (Constant "_variable") [x, Variable "rhs"];
--- a/src/HOLCF/Tools/fixrec_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/Tools/fixrec_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -8,17 +8,24 @@
sig
val legacy_infer_term: theory -> term -> term
val legacy_infer_prop: theory -> term -> term
- val add_fixrec: bool -> (Attrib.binding * string) list list -> theory -> theory
- val add_fixrec_i: bool -> ((binding * attribute list) * term) list list -> theory -> theory
+
+ val add_fixrec: bool -> (binding * string option * mixfix) list
+ -> (Attrib.binding * string) list -> local_theory -> local_theory
+
+ val add_fixrec_i: bool -> (binding * typ option * mixfix) list
+ -> (Attrib.binding * term) list -> local_theory -> local_theory
+
val add_fixpat: Attrib.binding * string list -> theory -> theory
- val add_fixpat_i: (binding * attribute list) * term list -> theory -> theory
+ val add_fixpat_i: Thm.binding * term list -> theory -> theory
+ val add_matchers: (string * string) list -> theory -> theory
+ val setup: theory -> theory
end;
structure FixrecPackage: FIXREC_PACKAGE =
struct
(* legacy type inference *)
-
+(* used by the domain package *)
fun legacy_infer_term thy t =
singleton (Syntax.check_terms (ProofContext.init thy)) (Sign.intern_term thy t);
@@ -33,15 +40,41 @@
fun fixrec_eq_err thy s eq =
fixrec_err (s ^ "\nin\n" ^ quote (Syntax.string_of_term_global thy eq));
+(*************************************************************************)
+(***************************** building types ****************************)
+(*************************************************************************)
+
(* ->> is taken from holcf_logic.ML *)
-(* TODO: fix dependencies so we can import HOLCFLogic here *)
-infixr 6 ->>;
-fun S ->> T = Type (@{type_name "->"},[S,T]);
+fun cfunT (T, U) = Type(@{type_name "->"}, [T, U]);
+
+infixr 6 ->>; val (op ->>) = cfunT;
+
+fun dest_cfunT (Type(@{type_name "->"}, [T, U])) = (T, U)
+ | dest_cfunT T = raise TYPE ("dest_cfunT", [T], []);
+
+fun binder_cfun (Type(@{type_name "->"},[T, U])) = T :: binder_cfun U
+ | binder_cfun _ = [];
+
+fun body_cfun (Type(@{type_name "->"},[T, U])) = body_cfun U
+ | body_cfun T = T;
-(* extern_name is taken from domain/library.ML *)
-fun extern_name con = case Symbol.explode con of
- ("o"::"p"::" "::rest) => implode rest
- | _ => con;
+fun strip_cfun T : typ list * typ =
+ (binder_cfun T, body_cfun T);
+
+fun maybeT T = Type(@{type_name "maybe"}, [T]);
+
+fun dest_maybeT (Type(@{type_name "maybe"}, [T])) = T
+ | dest_maybeT T = raise TYPE ("dest_maybeT", [T], []);
+
+fun tupleT [] = @{typ "unit"}
+ | tupleT [T] = T
+ | tupleT (T :: Ts) = HOLogic.mk_prodT (T, tupleT Ts);
+
+fun matchT T = body_cfun T ->> maybeT (tupleT (binder_cfun T));
+
+(*************************************************************************)
+(***************************** building terms ****************************)
+(*************************************************************************)
val mk_trp = HOLogic.mk_Trueprop;
@@ -52,60 +85,119 @@
fun chead_of (Const(@{const_name Rep_CFun},_)$f$t) = chead_of f
| chead_of u = u;
-(* these are helpful functions copied from HOLCF/domain/library.ML *)
-fun %: s = Free(s,dummyT);
-fun %%: s = Const(s,dummyT);
-infix 0 ==; fun S == T = %%:"==" $ S $ T;
-infix 1 ===; fun S === T = %%:"op =" $ S $ T;
-infix 9 ` ; fun f ` x = %%:@{const_name Rep_CFun} $ f $ x;
+fun capply_const (S, T) =
+ Const(@{const_name Rep_CFun}, (S ->> T) --> (S --> T));
+
+fun cabs_const (S, T) =
+ Const(@{const_name Abs_CFun}, (S --> T) --> (S ->> T));
+
+fun mk_capply (t, u) =
+ let val (S, T) =
+ case Term.fastype_of t of
+ Type(@{type_name "->"}, [S, T]) => (S, T)
+ | _ => raise TERM ("mk_capply " ^ ML_Syntax.print_list ML_Syntax.print_term [t, u], [t, u]);
+ in capply_const (S, T) $ t $ u end;
+
+infix 0 ==; val (op ==) = Logic.mk_equals;
+infix 1 ===; val (op ===) = HOLogic.mk_eq;
+infix 9 ` ; val (op `) = mk_capply;
+
+
+fun mk_cpair (t, u) =
+ let val T = Term.fastype_of t
+ val U = Term.fastype_of u
+ val cpairT = T ->> U ->> HOLogic.mk_prodT (T, U)
+ in Const(@{const_name cpair}, cpairT) ` t ` u end;
+
+fun mk_cfst t =
+ let val T = Term.fastype_of t;
+ val (U, _) = HOLogic.dest_prodT T;
+ in Const(@{const_name cfst}, T ->> U) ` t end;
+
+fun mk_csnd t =
+ let val T = Term.fastype_of t;
+ val (_, U) = HOLogic.dest_prodT T;
+ in Const(@{const_name csnd}, T ->> U) ` t end;
+
+fun mk_csplit t =
+ let val (S, TU) = dest_cfunT (Term.fastype_of t);
+ val (T, U) = dest_cfunT TU;
+ val csplitT = (S ->> T ->> U) ->> HOLogic.mk_prodT (S, T) ->> U;
+ in Const(@{const_name csplit}, csplitT) ` t end;
(* builds the expression (LAM v. rhs) *)
-fun big_lambda v rhs = %%:@{const_name Abs_CFun}$(Term.lambda v rhs);
+fun big_lambda v rhs =
+ cabs_const (Term.fastype_of v, Term.fastype_of rhs) $ Term.lambda v rhs;
(* builds the expression (LAM v1 v2 .. vn. rhs) *)
fun big_lambdas [] rhs = rhs
| big_lambdas (v::vs) rhs = big_lambda v (big_lambdas vs rhs);
(* builds the expression (LAM <v1,v2,..,vn>. rhs) *)
-fun lambda_ctuple [] rhs = big_lambda (%:"unit") rhs
+fun lambda_ctuple [] rhs = big_lambda (Free("unit", HOLogic.unitT)) rhs
| lambda_ctuple (v::[]) rhs = big_lambda v rhs
| lambda_ctuple (v::vs) rhs =
- %%:@{const_name csplit}`(big_lambda v (lambda_ctuple vs rhs));
+ mk_csplit (big_lambda v (lambda_ctuple vs rhs));
(* builds the expression <v1,v2,..,vn> *)
-fun mk_ctuple [] = %%:"UU"
+fun mk_ctuple [] = @{term "UU::unit"}
| mk_ctuple (t::[]) = t
-| mk_ctuple (t::ts) = %%:@{const_name cpair}`t`(mk_ctuple ts);
+| mk_ctuple (t::ts) = mk_cpair (t, mk_ctuple ts);
+
+fun mk_return t =
+ let val T = Term.fastype_of t
+ in Const(@{const_name Fixrec.return}, T ->> maybeT T) ` t end;
+
+fun mk_bind (t, u) =
+ let val (T, mU) = dest_cfunT (Term.fastype_of u);
+ val bindT = maybeT T ->> (T ->> mU) ->> mU;
+ in Const(@{const_name Fixrec.bind}, bindT) ` t ` u end;
+
+fun mk_mplus (t, u) =
+ let val mT = Term.fastype_of t
+ in Const(@{const_name Fixrec.mplus}, mT ->> mT ->> mT) ` t ` u end;
+
+fun mk_run t =
+ let val mT = Term.fastype_of t
+ val T = dest_maybeT mT
+ in Const(@{const_name Fixrec.run}, mT ->> T) ` t end;
+
+fun mk_fix t =
+ let val (T, _) = dest_cfunT (Term.fastype_of t)
+ in Const(@{const_name fix}, (T ->> T) ->> T) ` t end;
(*************************************************************************)
(************* fixed-point definitions and unfolding theorems ************)
(*************************************************************************)
-fun add_fixdefs eqs thy =
+fun add_fixdefs
+ (fixes : ((binding * typ) * mixfix) list)
+ (spec : (Attrib.binding * term) list)
+ (lthy : local_theory) =
let
- val (lhss,rhss) = ListPair.unzip (map dest_eqs eqs);
- val fixpoint = %%:@{const_name fix}`lambda_ctuple lhss (mk_ctuple rhss);
+ val names = map (Binding.name_of o fst o fst) fixes;
+ val all_names = space_implode "_" names;
+ val (lhss,rhss) = ListPair.unzip (map (dest_eqs o snd) spec);
+ val fixpoint = mk_fix (lambda_ctuple lhss (mk_ctuple rhss));
- fun one_def (l as Const(n,T)) r =
- let val b = Sign.base_name n in (b, (b^"_def", l == r)) end
+ fun one_def (l as Free(n,_)) r =
+ let val b = Sign.base_name n
+ in ((Binding.name (b^"_def"), []), r) end
| one_def _ _ = fixrec_err "fixdefs: lhs not of correct form";
fun defs [] _ = []
| defs (l::[]) r = [one_def l r]
- | defs (l::ls) r = one_def l (%%:@{const_name cfst}`r) :: defs ls (%%:@{const_name csnd}`r);
- val (names, pre_fixdefs) = ListPair.unzip (defs lhss fixpoint);
-
- val fixdefs = map (apsnd (legacy_infer_prop thy)) pre_fixdefs;
- val (fixdef_thms, thy') =
- PureThy.add_defs false (map (Thm.no_attributes o apfst Binding.name) fixdefs) thy;
- val ctuple_fixdef_thm = foldr1 (fn (x,y) => @{thm cpair_equalI} OF [x,y]) fixdef_thms;
-
- val ctuple_unfold = legacy_infer_term thy' (mk_trp (mk_ctuple lhss === mk_ctuple rhss));
- val ctuple_unfold_thm = Goal.prove_global thy' [] [] ctuple_unfold
- (fn _ => EVERY [rtac (ctuple_fixdef_thm RS fix_eq2 RS trans) 1,
- simp_tac (simpset_of thy') 1]);
- val ctuple_induct_thm =
- (space_implode "_" names ^ "_induct", ctuple_fixdef_thm RS def_fix_ind);
-
+ | defs (l::ls) r = one_def l (mk_cfst r) :: defs ls (mk_csnd r);
+ val fixdefs = defs lhss fixpoint;
+ val define_all = fold_map (LocalTheory.define Thm.definitionK);
+ val (fixdef_thms : (term * (string * thm)) list, lthy') = lthy
+ |> define_all (map (apfst fst) fixes ~~ fixdefs);
+ fun cpair_equalI (thm1, thm2) = @{thm cpair_equalI} OF [thm1, thm2];
+ val ctuple_fixdef_thm = foldr1 cpair_equalI (map (snd o snd) fixdef_thms);
+ val ctuple_induct_thm = ctuple_fixdef_thm RS def_fix_ind;
+ val ctuple_unfold_thm =
+ Goal.prove lthy' [] [] (mk_trp (mk_ctuple lhss === mk_ctuple rhss))
+ (fn _ => EVERY [rtac (ctuple_fixdef_thm RS fix_eq2 RS trans) 1,
+ simp_tac (local_simpset_of lthy') 1]);
fun unfolds [] thm = []
| unfolds (n::[]) thm = [(n^"_unfold", thm)]
| unfolds (n::ns) thm = let
@@ -113,93 +205,117 @@
val thmR = thm RS @{thm cpair_eqD2};
in (n^"_unfold", thmL) :: unfolds ns thmR end;
val unfold_thms = unfolds names ctuple_unfold_thm;
- val thms = ctuple_induct_thm :: unfold_thms;
- val (_, thy'') = PureThy.add_thms (map (Thm.no_attributes o apfst Binding.name) thms) thy';
+ fun mk_note (n, thm) = ((Binding.name n, []), [thm]);
+ val (thmss, lthy'') = lthy'
+ |> fold_map (LocalTheory.note Thm.theoremK o mk_note)
+ ((all_names ^ "_induct", ctuple_induct_thm) :: unfold_thms);
in
- (thy'', names, fixdef_thms, map snd unfold_thms)
+ (lthy'', names, fixdef_thms, map snd unfold_thms)
end;
(*************************************************************************)
(*********** monadic notation and pattern matching compilation ***********)
(*************************************************************************)
-fun add_names (Const(a,_), bs) = insert (op =) (Sign.base_name a) bs
- | add_names (Free(a,_) , bs) = insert (op =) a bs
- | add_names (f $ u , bs) = add_names (f, add_names(u, bs))
- | add_names (Abs(a,_,t), bs) = add_names (t, insert (op =) a bs)
- | add_names (_ , bs) = bs;
+structure FixrecMatchData = TheoryDataFun (
+ type T = string Symtab.table;
+ val empty = Symtab.empty;
+ val copy = I;
+ val extend = I;
+ fun merge _ tabs : T = Symtab.merge (K true) tabs;
+);
+
+(* associate match functions with pattern constants *)
+fun add_matchers ms = FixrecMatchData.map (fold Symtab.update ms);
-fun add_terms ts xs = foldr add_names xs ts;
+fun taken_names (t : term) : bstring list =
+ let
+ fun taken (Const(a,_), bs) = insert (op =) (Sign.base_name a) bs
+ | taken (Free(a,_) , bs) = insert (op =) a bs
+ | taken (f $ u , bs) = taken (f, taken (u, bs))
+ | taken (Abs(a,_,t), bs) = taken (t, insert (op =) a bs)
+ | taken (_ , bs) = bs;
+ in
+ taken (t, [])
+ end;
(* builds a monadic term for matching a constructor pattern *)
-fun pre_build pat rhs vs taken =
+fun pre_build match_name pat rhs vs taken =
case pat of
Const(@{const_name Rep_CFun},_)$f$(v as Free(n,T)) =>
- pre_build f rhs (v::vs) taken
+ pre_build match_name f rhs (v::vs) taken
| Const(@{const_name Rep_CFun},_)$f$x =>
- let val (rhs', v, taken') = pre_build x rhs [] taken;
- in pre_build f rhs' (v::vs) taken' end
+ let val (rhs', v, taken') = pre_build match_name x rhs [] taken;
+ in pre_build match_name f rhs' (v::vs) taken' end
| Const(c,T) =>
let
val n = Name.variant taken "v";
fun result_type (Type(@{type_name "->"},[_,T])) (x::xs) = result_type T xs
| result_type T _ = T;
val v = Free(n, result_type T vs);
- val m = "match_"^(extern_name(Sign.base_name c));
+ val m = Const(match_name c, matchT T);
val k = lambda_ctuple vs rhs;
in
- (%%:@{const_name Fixrec.bind}`(%%:m`v)`k, v, n::taken)
+ (mk_bind (m`v, k), v, n::taken)
end
| Free(n,_) => fixrec_err ("expected constructor, found free variable " ^ quote n)
| _ => fixrec_err "pre_build: invalid pattern";
(* builds a monadic term for matching a function definition pattern *)
(* returns (name, arity, matcher) *)
-fun building pat rhs vs taken =
+fun building match_name pat rhs vs taken =
case pat of
Const(@{const_name Rep_CFun}, _)$f$(v as Free(n,T)) =>
- building f rhs (v::vs) taken
+ building match_name f rhs (v::vs) taken
| Const(@{const_name Rep_CFun}, _)$f$x =>
- let val (rhs', v, taken') = pre_build x rhs [] taken;
- in building f rhs' (v::vs) taken' end
- | Const(name,_) => (name, length vs, big_lambdas vs rhs)
- | _ => fixrec_err "function is not declared as constant in theory";
+ let val (rhs', v, taken') = pre_build match_name x rhs [] taken;
+ in building match_name f rhs' (v::vs) taken' end
+ | Free(_,_) => ((pat, length vs), big_lambdas vs rhs)
+ | Const(_,_) => ((pat, length vs), big_lambdas vs rhs)
+ | _ => fixrec_err ("function is not declared as constant in theory: "
+ ^ ML_Syntax.print_term pat);
-fun match_eq eq =
- let val (lhs,rhs) = dest_eqs eq;
- in building lhs (%%:@{const_name Fixrec.return}`rhs) [] (add_terms [eq] []) end;
+fun strip_alls t =
+ if Logic.is_all t then strip_alls (snd (Logic.dest_all t)) else t;
+
+fun match_eq match_name eq =
+ let
+ val (lhs,rhs) = dest_eqs (Logic.strip_imp_concl (strip_alls eq));
+ in
+ building match_name lhs (mk_return rhs) [] (taken_names eq)
+ end;
(* returns the sum (using +++) of the terms in ms *)
(* also applies "run" to the result! *)
fun fatbar arity ms =
let
+ fun LAM_Ts 0 t = ([], Term.fastype_of t)
+ | LAM_Ts n (_ $ Abs(_,T,t)) =
+ let val (Ts, U) = LAM_Ts (n-1) t in (T::Ts, U) end
+ | LAM_Ts _ _ = fixrec_err "fatbar: internal error, not enough LAMs";
fun unLAM 0 t = t
| unLAM n (_$Abs(_,_,t)) = unLAM (n-1) t
| unLAM _ _ = fixrec_err "fatbar: internal error, not enough LAMs";
- fun reLAM 0 t = t
- | reLAM n t = reLAM (n-1) (%%:@{const_name Abs_CFun} $ Abs("",dummyT,t));
- fun mplus (x,y) = %%:@{const_name Fixrec.mplus}`x`y;
- val msum = foldr1 mplus (map (unLAM arity) ms);
+ fun reLAM ([], U) t = t
+ | reLAM (T::Ts, U) t = reLAM (Ts, T ->> U) (cabs_const(T,U)$Abs("",T,t));
+ val msum = foldr1 mk_mplus (map (unLAM arity) ms);
+ val (Ts, U) = LAM_Ts arity (hd ms)
in
- reLAM arity (%%:@{const_name Fixrec.run}`msum)
+ reLAM (rev Ts, dest_maybeT U) (mk_run msum)
end;
-fun unzip3 [] = ([],[],[])
- | unzip3 ((x,y,z)::ts) =
- let val (xs,ys,zs) = unzip3 ts
- in (x::xs, y::ys, z::zs) end;
-
(* this is the pattern-matching compiler function *)
-fun compile_pats eqs =
+fun compile_pats match_name eqs =
let
- val ((n::names),(a::arities),mats) = unzip3 (map match_eq eqs);
+ val (((n::names),(a::arities)),mats) =
+ apfst ListPair.unzip (ListPair.unzip (map (match_eq match_name) eqs));
val cname = if forall (fn x => n=x) names then n
else fixrec_err "all equations in block must define the same function";
val arity = if forall (fn x => a=x) arities then a
else fixrec_err "all equations in block must have the same arity";
val rhs = fatbar arity mats;
in
- mk_trp (%%:cname === rhs)
+ mk_trp (cname === rhs)
end;
(*************************************************************************)
@@ -207,11 +323,13 @@
(*************************************************************************)
(* proves a block of pattern matching equations as theorems, using unfold *)
-fun make_simps thy (unfold_thm, eqns) =
+fun make_simps lthy (unfold_thm, eqns : (Attrib.binding * term) list) =
let
- val tacs = [rtac (unfold_thm RS @{thm ssubst_lhs}) 1, asm_simp_tac (simpset_of thy) 1];
- fun prove_term t = Goal.prove_global thy [] [] t (K (EVERY tacs));
- fun prove_eqn ((name, eqn_t), atts) = ((name, prove_term eqn_t), atts);
+ val tacs =
+ [rtac (unfold_thm RS @{thm ssubst_lhs}) 1,
+ asm_simp_tac (local_simpset_of lthy) 1];
+ fun prove_term t = Goal.prove lthy [] [] t (K (EVERY tacs));
+ fun prove_eqn (bind, eqn_t) = (bind, prove_term eqn_t);
in
map prove_eqn eqns
end;
@@ -220,42 +338,77 @@
(************************* Main fixrec function **************************)
(*************************************************************************)
-fun gen_add_fixrec prep_prop prep_attrib strict blocks thy =
+local
+(* code adapted from HOL/Tools/primrec_package.ML *)
+
+fun prepare_spec prep_spec ctxt raw_fixes raw_spec =
+ let
+ val ((fixes, spec), _) = prep_spec
+ raw_fixes (map (single o apsnd single) raw_spec) ctxt
+ in (fixes, map (apsnd the_single) spec) end;
+
+fun gen_fixrec
+ (set_group : bool)
+ (prep_spec : (binding * 'a option * mixfix) list ->
+ (Attrib.binding * 'b list) list list ->
+ Proof.context ->
+ (((binding * typ) * mixfix) list * (Attrib.binding * term list) list)
+ * Proof.context
+ )
+ (strict : bool)
+ raw_fixes
+ raw_spec
+ (lthy : local_theory) =
let
- val eqns = List.concat blocks;
- val lengths = map length blocks;
-
- val ((bindings, srcss), strings) = apfst split_list (split_list eqns);
- val names = map Binding.base_name bindings;
- val atts = map (map (prep_attrib thy)) srcss;
- val eqn_ts = map (prep_prop thy) strings;
- val rec_ts = map (fn eq => chead_of (fst (dest_eqs (Logic.strip_imp_concl eq)))
- handle TERM _ => fixrec_eq_err thy "not a proper equation" eq) eqn_ts;
- val (_, eqn_ts') = OldPrimrecPackage.unify_consts thy rec_ts eqn_ts;
-
- fun unconcat [] _ = []
- | unconcat (n::ns) xs = List.take (xs,n) :: unconcat ns (List.drop (xs,n));
- val pattern_blocks = unconcat lengths (map Logic.strip_imp_concl eqn_ts');
- val compiled_ts = map (legacy_infer_term thy o compile_pats) pattern_blocks;
- val (thy', cnames, fixdef_thms, unfold_thms) = add_fixdefs compiled_ts thy;
+ val (fixes : ((binding * typ) * mixfix) list,
+ spec : (Attrib.binding * term) list) =
+ prepare_spec prep_spec lthy raw_fixes raw_spec;
+ val chead_of_spec =
+ chead_of o fst o dest_eqs o Logic.strip_imp_concl o strip_alls o snd;
+ fun name_of (Free (n, _)) = n
+ | name_of t = fixrec_err ("unknown term");
+ val all_names = map (name_of o chead_of_spec) spec;
+ val names = distinct (op =) all_names;
+ fun block_of_name n =
+ map_filter
+ (fn (m,eq) => if m = n then SOME eq else NONE)
+ (all_names ~~ spec);
+ val blocks = map block_of_name names;
+
+ val matcher_tab = FixrecMatchData.get (ProofContext.theory_of lthy);
+ fun match_name c =
+ case Symtab.lookup matcher_tab c of SOME m => m
+ | NONE => fixrec_err ("unknown pattern constructor: " ^ c);
+
+ val matches = map (compile_pats match_name) (map (map snd) blocks);
+ val spec' = map (pair Attrib.empty_binding) matches;
+ val (lthy', cnames, fixdef_thms, unfold_thms) =
+ add_fixdefs fixes spec' lthy;
in
if strict then let (* only prove simp rules if strict = true *)
- val eqn_blocks = unconcat lengths ((names ~~ eqn_ts') ~~ atts);
- val simps = maps (make_simps thy') (unfold_thms ~~ eqn_blocks);
- val (simp_thms, thy'') = PureThy.add_thms ((map o apfst o apfst) Binding.name simps) thy';
-
- val simp_names = map (fn name => name^"_simps") cnames;
- val simp_attribute = rpair [Simplifier.simp_add];
- val simps' = map simp_attribute (simp_names ~~ unconcat lengths simp_thms);
+ val simps : (Attrib.binding * thm) list list =
+ map (make_simps lthy') (unfold_thms ~~ blocks);
+ fun mk_bind n : Attrib.binding =
+ (Binding.name (n ^ "_simps"),
+ [Attrib.internal (K Simplifier.simp_add)]);
+ val simps1 : (Attrib.binding * thm list) list =
+ map (fn (n,xs) => (mk_bind n, map snd xs)) (names ~~ simps);
+ val simps2 : (Attrib.binding * thm list) list =
+ map (apsnd (fn thm => [thm])) (List.concat simps);
+ val (_, lthy'') = lthy'
+ |> fold_map (LocalTheory.note Thm.theoremK) (simps1 @ simps2);
in
- (snd o PureThy.add_thmss ((map o apfst o apfst) Binding.name simps')) thy''
+ lthy''
end
- else thy'
+ else lthy'
end;
-val add_fixrec = gen_add_fixrec Syntax.read_prop_global Attrib.attribute;
-val add_fixrec_i = gen_add_fixrec Sign.cert_prop (K I);
+in
+val add_fixrec_i = gen_fixrec false Specification.check_specification;
+val add_fixrec = gen_fixrec true Specification.read_specification;
+
+end; (* local *)
(*************************************************************************)
(******************************** Fixpat *********************************)
@@ -291,17 +444,34 @@
local structure P = OuterParse and K = OuterKeyword in
-val fixrec_eqn = SpecParse.opt_thm_name ":" -- P.prop;
-
+(* bool parser *)
val fixrec_strict = P.opt_keyword "permissive" >> not;
-val fixrec_decl = fixrec_strict -- P.and_list1 (Scan.repeat1 fixrec_eqn);
+fun pipe_error t = P.!!! (Scan.fail_with (K
+ (cat_lines ["Equations must be separated by " ^ quote "|", quote t])));
+
+(* (Attrib.binding * string) parser *)
+val statement = SpecParse.opt_thm_name ":" -- P.prop --| Scan.ahead
+ ((P.term :-- pipe_error) || Scan.succeed ("",""));
+
+(* ((Attrib.binding * string) list) parser *)
+val statements = P.enum1 "|" statement;
+
+(* (((xstring option * bool) * (Binding.binding * string option * Mixfix.mixfix) list)
+ * (Attrib.binding * string) list) parser *)
+val fixrec_decl =
+ P.opt_target -- fixrec_strict -- P.fixes --| P.$$$ "where" -- statements;
(* this builds a parser for a new keyword, fixrec, whose functionality
is defined by add_fixrec *)
val _ =
- OuterSyntax.command "fixrec" "define recursive functions (HOLCF)" K.thy_decl
- (fixrec_decl >> (Toplevel.theory o uncurry add_fixrec));
+ let
+ val desc = "define recursive functions (HOLCF)";
+ fun fixrec (((opt_target, strict), raw_fixes), raw_spec) =
+ Toplevel.local_theory opt_target (add_fixrec strict raw_fixes raw_spec);
+ in
+ OuterSyntax.command "fixrec" desc K.thy_decl (fixrec_decl >> fixrec)
+ end;
(* fixpat parser *)
val fixpat_decl = SpecParse.opt_thm_name ":" -- Scan.repeat1 P.prop;
@@ -309,7 +479,9 @@
val _ =
OuterSyntax.command "fixpat" "define rewrites for fixrec functions" K.thy_decl
(fixpat_decl >> (Toplevel.theory o add_fixpat));
-
+
end; (* local structure *)
+val setup = FixrecMatchData.init;
+
end;
--- a/src/HOLCF/UpperPD.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/UpperPD.thy Wed Mar 04 10:45:52 2009 +0100
@@ -243,22 +243,25 @@
apply (simp add: PDPlus_commute)
done
-lemma upper_plus_absorb: "xs +\<sharp> xs = xs"
+lemma upper_plus_absorb [simp]: "xs +\<sharp> xs = xs"
apply (induct xs rule: upper_pd.principal_induct, simp)
apply (simp add: PDPlus_absorb)
done
-interpretation aci_upper_plus!: ab_semigroup_idem_mult "op +\<sharp>"
- proof qed (rule upper_plus_assoc upper_plus_commute upper_plus_absorb)+
+lemma upper_plus_left_commute: "xs +\<sharp> (ys +\<sharp> zs) = ys +\<sharp> (xs +\<sharp> zs)"
+by (rule mk_left_commute [of "op +\<sharp>", OF upper_plus_assoc upper_plus_commute])
-lemma upper_plus_left_commute: "xs +\<sharp> (ys +\<sharp> zs) = ys +\<sharp> (xs +\<sharp> zs)"
-by (rule aci_upper_plus.mult_left_commute)
+lemma upper_plus_left_absorb [simp]: "xs +\<sharp> (xs +\<sharp> ys) = xs +\<sharp> ys"
+by (simp only: upper_plus_assoc [symmetric] upper_plus_absorb)
-lemma upper_plus_left_absorb: "xs +\<sharp> (xs +\<sharp> ys) = xs +\<sharp> ys"
-by (rule aci_upper_plus.mult_left_idem)
-(*
-lemmas upper_plus_aci = aci_upper_plus.mult_ac_idem
-*)
+text {* Useful for @{text "simp add: upper_plus_ac"} *}
+lemmas upper_plus_ac =
+ upper_plus_assoc upper_plus_commute upper_plus_left_commute
+
+text {* Useful for @{text "simp only: upper_plus_aci"} *}
+lemmas upper_plus_aci =
+ upper_plus_ac upper_plus_absorb upper_plus_left_absorb
+
lemma upper_plus_less1: "xs +\<sharp> ys \<sqsubseteq> xs"
apply (induct xs ys rule: upper_pd.principal_induct2, simp, simp)
apply (simp add: PDPlus_upper_less)
@@ -388,7 +391,7 @@
apply unfold_locales
apply (simp add: upper_plus_assoc)
apply (simp add: upper_plus_commute)
-apply (simp add: upper_plus_absorb eta_cfun)
+apply (simp add: eta_cfun)
done
lemma upper_bind_basis_simps [simp]:
--- a/src/HOLCF/ex/Fixrec_ex.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/ex/Fixrec_ex.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: HOLCF/ex/Fixrec_ex.thy
- ID: $Id$
Author: Brian Huffman
*)
@@ -19,18 +18,18 @@
text {* typical usage is with lazy constructors *}
-consts down :: "'a u \<rightarrow> 'a"
-fixrec "down\<cdot>(up\<cdot>x) = x"
+fixrec down :: "'a u \<rightarrow> 'a"
+where "down\<cdot>(up\<cdot>x) = x"
text {* with strict constructors, rewrite rules may require side conditions *}
-consts from_sinl :: "'a \<oplus> 'b \<rightarrow> 'a"
-fixrec "x \<noteq> \<bottom> \<Longrightarrow> from_sinl\<cdot>(sinl\<cdot>x) = x"
+fixrec from_sinl :: "'a \<oplus> 'b \<rightarrow> 'a"
+where "x \<noteq> \<bottom> \<Longrightarrow> from_sinl\<cdot>(sinl\<cdot>x) = x"
text {* lifting can turn a strict constructor into a lazy one *}
-consts from_sinl_up :: "'a u \<oplus> 'b \<rightarrow> 'a"
-fixrec "from_sinl_up\<cdot>(sinl\<cdot>(up\<cdot>x)) = x"
+fixrec from_sinl_up :: "'a u \<oplus> 'b \<rightarrow> 'a"
+where "from_sinl_up\<cdot>(sinl\<cdot>(up\<cdot>x)) = x"
subsection {* fixpat examples *}
@@ -41,13 +40,13 @@
text {* zip function for lazy lists *}
-consts lzip :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
-
text {* notice that the patterns are not exhaustive *}
fixrec
+ lzip :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
+where
"lzip\<cdot>(lCons\<cdot>x\<cdot>xs)\<cdot>(lCons\<cdot>y\<cdot>ys) = lCons\<cdot><x,y>\<cdot>(lzip\<cdot>xs\<cdot>ys)"
- "lzip\<cdot>lNil\<cdot>lNil = lNil"
+| "lzip\<cdot>lNil\<cdot>lNil = lNil"
text {* fixpat is useful for producing strictness theorems *}
text {* note that pattern matching is done in left-to-right order *}
@@ -68,8 +67,6 @@
text {* another zip function for lazy lists *}
-consts lzip2 :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
-
text {*
Notice that this version has overlapping patterns.
The second equation cannot be proved as a theorem
@@ -77,8 +74,10 @@
*}
fixrec (permissive)
+ lzip2 :: "'a llist \<rightarrow> 'b llist \<rightarrow> ('a \<times> 'b) llist"
+where
"lzip2\<cdot>(lCons\<cdot>x\<cdot>xs)\<cdot>(lCons\<cdot>y\<cdot>ys) = lCons\<cdot><x,y>\<cdot>(lzip\<cdot>xs\<cdot>ys)"
- "lzip2\<cdot>xs\<cdot>ys = lNil"
+| "lzip2\<cdot>xs\<cdot>ys = lNil"
text {*
Usually fixrec tries to prove all equations as theorems.
@@ -105,21 +104,20 @@
domain 'a tree = Leaf (lazy 'a) | Branch (lazy "'a forest")
and 'a forest = Empty | Trees (lazy "'a tree") "'a forest"
-consts
- map_tree :: "('a \<rightarrow> 'b) \<rightarrow> ('a tree \<rightarrow> 'b tree)"
- map_forest :: "('a \<rightarrow> 'b) \<rightarrow> ('a forest \<rightarrow> 'b forest)"
-
text {*
To define mutually recursive functions, separate the equations
for each function using the keyword "and".
*}
fixrec
- "map_tree\<cdot>f\<cdot>(Leaf\<cdot>x) = Leaf\<cdot>(f\<cdot>x)"
- "map_tree\<cdot>f\<cdot>(Branch\<cdot>ts) = Branch\<cdot>(map_forest\<cdot>f\<cdot>ts)"
+ map_tree :: "('a \<rightarrow> 'b) \<rightarrow> ('a tree \<rightarrow> 'b tree)"
and
- "map_forest\<cdot>f\<cdot>Empty = Empty"
- "ts \<noteq> \<bottom> \<Longrightarrow>
+ map_forest :: "('a \<rightarrow> 'b) \<rightarrow> ('a forest \<rightarrow> 'b forest)"
+where
+ "map_tree\<cdot>f\<cdot>(Leaf\<cdot>x) = Leaf\<cdot>(f\<cdot>x)"
+| "map_tree\<cdot>f\<cdot>(Branch\<cdot>ts) = Branch\<cdot>(map_forest\<cdot>f\<cdot>ts)"
+| "map_forest\<cdot>f\<cdot>Empty = Empty"
+| "ts \<noteq> \<bottom> \<Longrightarrow>
map_forest\<cdot>f\<cdot>(Trees\<cdot>t\<cdot>ts) = Trees\<cdot>(map_tree\<cdot>f\<cdot>t)\<cdot>(map_forest\<cdot>f\<cdot>ts)"
fixpat map_tree_strict [simp]: "map_tree\<cdot>f\<cdot>\<bottom>"
--- a/src/HOLCF/ex/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/HOLCF/ex/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,8 +1,7 @@
(* Title: HOLCF/ex/ROOT.ML
- ID: $Id$
Misc HOLCF examples.
*)
use_thys ["Dnat", "Stream", "Dagstuhl", "Focus_ex", "Fix2", "Hoare",
- "Loop", "Fixrec_ex"];
+ "Loop", "Fixrec_ex", "Powerdomain_ex"];
--- a/src/Provers/README Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/README Wed Mar 04 10:45:52 2009 +0100
@@ -2,19 +2,13 @@
This directory contains ML sources of generic theorem proving tools.
Typically, they can be applied to various logics, provided rules of a
-certain form are derivable. Some of these are documented in the
-Reference Manual.
+certain form are derivable.
blast.ML generic tableau prover with proof reconstruction
clasimp.ML combination of classical reasoner and simplifier
classical.ML theorem prover for classical logics
hypsubst.ML tactic to substitute in the hypotheses
- ind.ML a simple induction package
- induct_method.ML proof by cases and induction on sets and types (Isar)
- linorder.ML transitivity reasoner for linear (total) orders
quantifier1.ML simplification procedures for "1 point rules"
- simp.ML powerful but slow simplifier
- split_paired_all.ML turn surjective pairing into split rule
splitter.ML performs case splits for simplifier
typedsimp.ML basic simplifier for explicitly typed logics
--- a/src/Provers/blast.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/blast.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Provers/blast.ML
- ID: $Id$
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1997 University of Cambridge
@@ -764,8 +763,8 @@
end
(*substitute throughout "stack frame"; extract affected formulae*)
fun subFrame ((Gs,Hs), (changed, frames)) =
- let val (changed', Gs') = foldr subForm (changed, []) Gs
- val (changed'', Hs') = foldr subForm (changed', []) Hs
+ let val (changed', Gs') = List.foldr subForm (changed, []) Gs
+ val (changed'', Hs') = List.foldr subForm (changed', []) Hs
in (changed'', (Gs',Hs')::frames) end
(*substitute throughout literals; extract affected ones*)
fun subLit (lit, (changed, nlits)) =
@@ -773,8 +772,8 @@
in if nlit aconv lit then (changed, nlit::nlits)
else ((nlit,true)::changed, nlits)
end
- val (changed, lits') = foldr subLit ([], []) lits
- val (changed', pairs') = foldr subFrame (changed, []) pairs
+ val (changed, lits') = List.foldr subLit ([], []) lits
+ val (changed', pairs') = List.foldr subFrame (changed, []) pairs
in if !trace then writeln ("Substituting " ^ traceTerm thy u ^
" for " ^ traceTerm thy t ^ " in branch" )
else ();
@@ -913,7 +912,7 @@
fun printStats (State {ntried, nclosed, ...}) (b, start, tacs) =
if b then
- writeln (end_timing start ^ " for search. Closed: "
+ writeln (#message (end_timing start) ^ " for search. Closed: "
^ Int.toString (!nclosed) ^
" tried: " ^ Int.toString (!ntried) ^
" tactics: " ^ Int.toString (length tacs))
@@ -971,7 +970,7 @@
then lim - (1+log(length rules))
else lim (*discourage branching updates*)
val vars = vars_in_vars vars
- val vars' = foldr add_terms_vars vars prems
+ val vars' = List.foldr add_terms_vars vars prems
val choices' = (ntrl, nbrs, PRV) :: choices
val tacs' = (tac(updated,false,true))
:: tacs (*no duplication; rotate*)
@@ -1098,7 +1097,7 @@
then
let val updated = ntrl < !ntrail (*branch updated*)
val vars = vars_in_vars vars
- val vars' = foldr add_terms_vars vars prems
+ val vars' = List.foldr add_terms_vars vars prems
(*duplicate H if md permits*)
val dup = md (*earlier had "andalso vars' <> vars":
duplicate only if the subgoal has new vars*)
@@ -1264,7 +1263,7 @@
else ();
backtrack choices)
| cell => (if (!trace orelse !stats)
- then writeln (end_timing start ^ " for reconstruction")
+ then writeln (#message (end_timing start) ^ " for reconstruction")
else ();
Seq.make(fn()=> cell))
end
--- a/src/Provers/clasimp.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/clasimp.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Provers/clasimp.ML
- ID: $Id$
Author: David von Oheimb, TU Muenchen
Combination of classical reasoner and simplifier (depends on
@@ -153,7 +152,7 @@
end;
fun modifier att (x, ths) =
- fst (foldl_map (Library.apply [att]) (x, rev ths));
+ fst (Library.foldl_map (Library.apply [att]) (x, rev ths));
val addXIs = modifier (ContextRules.intro_query NONE);
val addXEs = modifier (ContextRules.elim_query NONE);
--- a/src/Provers/classical.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/classical.ML Wed Mar 04 10:45:52 2009 +0100
@@ -223,7 +223,7 @@
let fun addrl (rl,brls) = (false, rl) :: (true, rl RSN (2, Data.swap)) :: brls
in assume_tac ORELSE'
contr_tac ORELSE'
- biresolve_tac (foldr addrl [] rls)
+ biresolve_tac (List.foldr addrl [] rls)
end;
(*Duplication of hazardous rules, for complete provers*)
--- a/src/Provers/order.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/order.ML Wed Mar 04 10:45:52 2009 +0100
@@ -639,7 +639,7 @@
(* Compute, for each adjacency list, the list with reversed edges,
and concatenate these lists. *)
- val flipped = foldr (op @) nil (map flip g)
+ val flipped = List.foldr (op @) nil (map flip g)
in assemble g flipped end
@@ -677,7 +677,7 @@
let
val _ = visited := u :: !visited
val descendents =
- foldr (fn ((v,l),ds) => if been_visited v then ds
+ List.foldr (fn ((v,l),ds) => if been_visited v then ds
else v :: dfs_visit g v @ ds)
nil (adjacent (op aconv) g u)
in
@@ -727,7 +727,7 @@
let
val _ = visited := u :: !visited
val descendents =
- foldr (fn ((v,l),ds) => if been_visited v then ds
+ List.foldr (fn ((v,l),ds) => if been_visited v then ds
else v :: dfs_visit g v @ ds)
nil (adjacent (op =) g u)
in descendents end
--- a/src/Provers/trancl.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/trancl.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,8 +1,6 @@
(*
- Title: Transitivity reasoner for transitive closures of relations
- Id: $Id$
- Author: Oliver Kutter
- Copyright: TU Muenchen
+ Title: Transitivity reasoner for transitive closures of relations
+ Author: Oliver Kutter, TU Muenchen
*)
(*
@@ -335,7 +333,7 @@
(* Compute, for each adjacency list, the list with reversed edges,
and concatenate these lists. *)
- val flipped = foldr (op @) nil (map flip g)
+ val flipped = List.foldr (op @) nil (map flip g)
in assemble g flipped end
@@ -359,7 +357,7 @@
let
val _ = visited := u :: !visited
val descendents =
- foldr (fn ((v,l),ds) => if been_visited v then ds
+ List.foldr (fn ((v,l),ds) => if been_visited v then ds
else v :: dfs_visit g v @ ds)
nil (adjacent eq_comp g u)
in descendents end
--- a/src/Provers/typedsimp.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Provers/typedsimp.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: typedsimp
- ID: $Id$
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1993 University of Cambridge
@@ -70,7 +69,7 @@
handle THM _ => (simp_rls, rl :: other_rls);
(*Given the list rls, return the pair (simp_rls, other_rls).*)
-fun process_rules rls = foldr add_rule ([],[]) rls;
+fun process_rules rls = List.foldr add_rule ([],[]) rls;
(*Given list of rewrite rules, return list of both forms, reject others*)
fun process_rewrites rls =
--- a/src/Pure/General/binding.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/General/binding.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,98 +1,104 @@
(* Title: Pure/General/binding.ML
Author: Florian Haftmann, TU Muenchen
+ Author: Makarius
Structured name bindings.
*)
-signature BASIC_BINDING =
-sig
- type binding
- val long_names: bool ref
- val short_names: bool ref
- val unique_names: bool ref
-end;
+type bstring = string; (*primitive names to be bound*)
signature BINDING =
sig
- include BASIC_BINDING
- val name_pos: string * Position.T -> binding
- val name: string -> binding
+ type binding
+ val dest: binding -> (string * bool) list * (string * bool) list * bstring
+ val verbose: bool ref
+ val str_of: binding -> string
+ val make: bstring * Position.T -> binding
+ val name: bstring -> binding
+ val pos_of: binding -> Position.T
+ val name_of: binding -> string
+ val map_name: (bstring -> bstring) -> binding -> binding
val empty: binding
- val map_base: (string -> string) -> binding -> binding
- val qualify: string -> binding -> binding
+ val is_empty: binding -> bool
+ val qualify: bool -> string -> binding -> binding
+ val map_prefix: ((string * bool) list -> (string * bool) list) -> binding -> binding
val add_prefix: bool -> string -> binding -> binding
- val map_prefix: ((string * bool) list -> binding -> binding) -> binding -> binding
- val is_empty: binding -> bool
- val base_name: binding -> string
- val pos_of: binding -> Position.T
- val dest: binding -> (string * bool) list * string
- val separator: string
- val is_qualified: string -> bool
- val display: binding -> string
end;
-structure Binding : BINDING =
+structure Binding: BINDING =
struct
-(** global flags **)
+(** representation **)
-val long_names = ref false;
-val short_names = ref false;
-val unique_names = ref true;
+(* datatype *)
+type component = string * bool; (*name with mandatory flag*)
-(** qualification **)
-
-val separator = ".";
-val is_qualified = exists_string (fn s => s = separator);
+datatype binding = Binding of
+ {prefix: component list, (*system prefix*)
+ qualifier: component list, (*user qualifier*)
+ name: bstring, (*base name*)
+ pos: Position.T}; (*source position*)
-fun reject_qualified kind s =
- if is_qualified s then
- error ("Attempt to declare qualified " ^ kind ^ " " ^ quote s)
- else s;
+fun make_binding (prefix, qualifier, name, pos) =
+ Binding {prefix = prefix, qualifier = qualifier, name = name, pos = pos};
+
+fun map_binding f (Binding {prefix, qualifier, name, pos}) =
+ make_binding (f (prefix, qualifier, name, pos));
+
+fun dest (Binding {prefix, qualifier, name, ...}) = (prefix, qualifier, name);
-(** binding representation **)
+(* diagnostic output *)
-datatype binding = Binding of ((string * bool) list * string) * Position.T;
- (* (prefix components (with mandatory flag), base name, position) *)
+val verbose = ref false;
-fun name_pos (name, pos) = Binding (([], name), pos);
-fun name name = name_pos (name, Position.none);
-val empty = name "";
+val str_of_components = implode o map (fn (s, true) => s ^ "!" | (s, false) => s ^ "?");
-fun map_binding f (Binding (prefix_name, pos)) = Binding (f prefix_name, pos);
-
-val map_base = map_binding o apsnd;
+fun str_of (Binding {prefix, qualifier, name, pos}) =
+ let
+ val text =
+ if ! verbose then
+ (if null prefix then "" else enclose "(" ")" (str_of_components prefix)) ^
+ str_of_components qualifier ^ name
+ else name;
+ val props = Position.properties_of pos;
+ in Markup.markup (Markup.properties props (Markup.binding name)) text end;
-fun qualify_base path name =
- if path = "" orelse name = "" then name
- else path ^ separator ^ name;
+
-val qualify = map_base o qualify_base;
- (*FIXME should all operations on bare names move here from name_space.ML ?*)
+(** basic operations **)
+
+(* name and position *)
-fun add_prefix sticky "" b = b
- | add_prefix sticky prfx b = (map_binding o apfst)
- (cons ((*reject_qualified "prefix"*) prfx, sticky)) b;
+fun make (name, pos) = make_binding ([], [], name, pos);
+fun name name = make (name, Position.none);
+
+fun pos_of (Binding {pos, ...}) = pos;
+fun name_of (Binding {name, ...}) = name;
-fun map_prefix f (Binding ((prefix, name), pos)) =
- f prefix (name_pos (name, pos));
+fun map_name f = map_binding (fn (prefix, qualifier, name, pos) => (prefix, qualifier, f name, pos));
+
+val empty = name "";
+fun is_empty b = name_of b = "";
+
-fun is_empty (Binding ((_, name), _)) = name = "";
-fun base_name (Binding ((_, name), _)) = name;
-fun pos_of (Binding (_, pos)) = pos;
-fun dest (Binding (prefix_name, _)) = prefix_name;
+(* user qualifier *)
+
+fun qualify _ "" = I
+ | qualify mandatory qual = map_binding (fn (prefix, qualifier, name, pos) =>
+ (prefix, (qual, mandatory) :: qualifier, name, pos));
+
-fun display (Binding ((prefix, name), _)) =
- let
- fun mk_prefix (prfx, true) = prfx
- | mk_prefix (prfx, false) = enclose "(" ")" prfx
- in if not (! long_names) orelse null prefix orelse name = "" then name
- else space_implode "." (map mk_prefix prefix) ^ ":" ^ name
- end;
+(* system prefix *)
+
+fun map_prefix f = map_binding (fn (prefix, qualifier, name, pos) =>
+ (f prefix, qualifier, name, pos));
+
+fun add_prefix _ "" = I
+ | add_prefix mandatory prfx = map_prefix (cons (prfx, mandatory));
end;
-structure Basic_Binding : BASIC_BINDING = Binding;
-open Basic_Binding;
+type binding = Binding.binding;
+
--- a/src/Pure/General/markup.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/General/markup.ML Wed Mar 04 10:45:52 2009 +0100
@@ -12,9 +12,9 @@
val properties: (string * string) list -> T -> T
val nameN: string
val name: string -> T -> T
+ val bindingN: string val binding: string -> T
val groupN: string
val theory_nameN: string
- val idN: string
val kindN: string
val internalK: string
val property_internal: Properties.property
@@ -25,6 +25,7 @@
val end_columnN: string
val end_offsetN: string
val fileN: string
+ val idN: string
val position_properties': string list
val position_properties: string list
val positionN: string val position: T
@@ -107,6 +108,8 @@
structure Markup: MARKUP =
struct
+(** markup elements **)
+
(* basic markup *)
type T = string * Properties.T;
@@ -130,6 +133,8 @@
val nameN = "name";
fun name a = properties [(nameN, a)];
+val (bindingN, binding) = markup_string "binding" nameN;
+
val groupN = "group";
val theory_nameN = "theory_name";
@@ -278,7 +283,7 @@
-(* print mode operations *)
+(** print mode operations **)
val no_output = ("", "");
fun default_output (_: T) = no_output;
--- a/src/Pure/General/name_space.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/General/name_space.ML Wed Mar 04 10:45:52 2009 +0100
@@ -3,15 +3,20 @@
Generic name spaces with declared and hidden entries. Unknown names
are considered global; no support for absolute addressing.
-Cf. Pure/General/binding.ML
*)
-type bstring = string; (*simple names to be bound -- legacy*)
type xstring = string; (*external names*)
+signature BASIC_NAME_SPACE =
+sig
+ val long_names: bool ref
+ val short_names: bool ref
+ val unique_names: bool ref
+end;
+
signature NAME_SPACE =
sig
- include BASIC_BINDING
+ include BASIC_NAME_SPACE
val hidden: string -> string
val is_hidden: string -> bool
val separator: string (*single char*)
@@ -27,8 +32,9 @@
val empty: T
val intern: T -> xstring -> string
val extern: T -> string -> xstring
+ val extern_flags: {long_names: bool, short_names: bool, unique_names: bool} ->
+ T -> string -> xstring
val hide: bool -> string -> T -> T
- val get_accesses: T -> string -> xstring list
val merge: T * T -> T
type naming
val default_naming: naming
@@ -41,12 +47,11 @@
val qualified_names: naming -> naming
val sticky_prefix: string -> naming -> naming
type 'a table = T * 'a Symtab.table
+ val bind: naming -> binding * 'a -> 'a table -> string * 'a table (*exception Symtab.DUP*)
val empty_table: 'a table
- val bind: naming -> binding * 'a
- -> 'a table -> string * 'a table (*exception Symtab.DUP*)
- val merge_tables: ('a * 'a -> bool) -> 'a table * 'a table -> 'a table
- val join_tables: (string -> 'a * 'a -> 'a)
- -> 'a table * 'a table -> 'a table
+ val merge_tables: ('a * 'a -> bool) -> 'a table * 'a table -> 'a table (*exception Symtab.DUP*)
+ val join_tables: (string -> 'a * 'a -> 'a) (*exception Symtab.DUP/Symtab.SAME*) ->
+ 'a table * 'a table -> 'a table (*exception Symtab.DUP*)
val dest_table: 'a table -> (string * 'a) list
val extern_table: 'a table -> (xstring * 'a) list
end;
@@ -54,16 +59,13 @@
structure NameSpace: NAME_SPACE =
struct
-open Basic_Binding;
-
-
(** long identifiers **)
fun hidden name = "??." ^ name;
val is_hidden = String.isPrefix "??.";
-val separator = Binding.separator;
-val is_qualified = Binding.is_qualified;
+val separator = ".";
+val is_qualified = exists_string (fn s => s = separator);
val implode_name = space_implode separator;
val explode_name = space_explode separator;
@@ -120,37 +122,28 @@
datatype T =
NameSpace of
- ((string list * string list) * stamp) Symtab.table * (*internals, hidden internals*)
- (string list * stamp) Symtab.table; (*externals*)
+ (string list * string list) Symtab.table * (*internals, hidden internals*)
+ string list Symtab.table; (*externals*)
val empty = NameSpace (Symtab.empty, Symtab.empty);
fun lookup (NameSpace (tab, _)) xname =
(case Symtab.lookup tab xname of
NONE => (xname, true)
- | SOME (([], []), _) => (xname, true)
- | SOME (([name], _), _) => (name, true)
- | SOME ((name :: _, _), _) => (name, false)
- | SOME (([], name' :: _), _) => (hidden name', true));
+ | SOME ([], []) => (xname, true)
+ | SOME ([name], _) => (name, true)
+ | SOME (name :: _, _) => (name, false)
+ | SOME ([], name' :: _) => (hidden name', true));
-fun ex_mapsto_in (NameSpace (tab, _)) name xname =
- (case Symtab.lookup tab xname of
- SOME ((name'::_, _), _) => name' = name
- | _ => false);
-
-fun get_accesses' valid_only (ns as (NameSpace (_, tab))) name =
- (case Symtab.lookup tab name of
+fun get_accesses (NameSpace (_, xtab)) name =
+ (case Symtab.lookup xtab name of
NONE => [name]
- | SOME (xnames, _) => if valid_only
- then filter (ex_mapsto_in ns name) xnames
- else xnames);
-
-val get_accesses = get_accesses' true;
+ | SOME xnames => xnames);
fun put_accesses name xnames (NameSpace (tab, xtab)) =
- NameSpace (tab, Symtab.update (name, (xnames, stamp ())) xtab);
+ NameSpace (tab, Symtab.update (name, xnames) xtab);
-fun valid_accesses (NameSpace (tab, _)) name = Symtab.fold (fn (xname, ((names, _), _)) =>
+fun valid_accesses (NameSpace (tab, _)) name = Symtab.fold (fn (xname, (names, _)) =>
if not (null names) andalso hd names = name then cons xname else I) tab [];
@@ -158,28 +151,37 @@
fun intern space xname = #1 (lookup space xname);
-fun extern space name =
+fun extern_flags {long_names, short_names, unique_names} space name =
let
fun valid unique xname =
let val (name', uniq) = lookup space xname
in name = name' andalso (uniq orelse not unique) end;
fun ext [] = if valid false name then name else hidden name
- | ext (nm :: nms) = if valid (! unique_names) nm then nm else ext nms;
+ | ext (nm :: nms) = if valid unique_names nm then nm else ext nms;
in
- if ! long_names then name
- else if ! short_names then base name
- else ext (get_accesses' false space name)
+ if long_names then name
+ else if short_names then base name
+ else ext (get_accesses space name)
end;
+val long_names = ref false;
+val short_names = ref false;
+val unique_names = ref true;
+
+fun extern space name =
+ extern_flags
+ {long_names = ! long_names,
+ short_names = ! short_names,
+ unique_names = ! unique_names} space name;
+
(* basic operations *)
local
fun map_space f xname (NameSpace (tab, xtab)) =
- NameSpace (Symtab.map_default (xname, (([], []), stamp ()))
- (fn (entry, _) => (f entry, stamp ())) tab, xtab);
+ NameSpace (Symtab.map_default (xname, ([], [])) f tab, xtab);
in
@@ -203,7 +205,7 @@
space
|> add_name' name name
|> fold (del_name name) (if fully then names else names inter_string [base name])
- |> fold (del_name_extra name) (get_accesses' false space name)
+ |> fold (del_name_extra name) (get_accesses space name)
end;
@@ -212,15 +214,13 @@
fun merge (NameSpace (tab1, xtab1), NameSpace (tab2, xtab2)) =
let
val tab' = (tab1, tab2) |> Symtab.join
- (K (fn (((names1, names1'), stamp1), ((names2, names2'), stamp2)) =>
- if stamp1 = stamp2 then raise Symtab.SAME
- else
- ((Library.merge (op =) (names1, names2),
- Library.merge (op =) (names1', names2')), stamp ())));
+ (K (fn names as ((names1, names1'), (names2, names2')) =>
+ if pointer_eq names then raise Symtab.SAME
+ else (Library.merge (op =) (names1, names2), Library.merge (op =) (names1', names2'))));
val xtab' = (xtab1, xtab2) |> Symtab.join
- (K (fn ((xnames1, stamp1), (xnames2, stamp2)) =>
- if stamp1 = stamp2 then raise Symtab.SAME
- else (Library.merge (op =) (xnames1, xnames2), stamp ())));
+ (K (fn xnames =>
+ if pointer_eq xnames then raise Symtab.SAME
+ else (Library.merge (op =) xnames)));
in NameSpace (tab', xtab') end;
@@ -272,32 +272,33 @@
in fold mk_prefix end;
-(* declarations *)
+(* full name *)
+
+fun full (Naming (path, (qualify, _))) = qualify path;
-fun full_internal (Naming (path, (qualify, _))) = qualify path;
+fun full_name naming binding =
+ let
+ val (prefix, qualifier, bname) = Binding.dest binding;
+ val naming' = apply_prefix (prefix @ qualifier) naming;
+ in full naming' bname end;
+
+
+(* declaration *)
-fun declare_internal naming name space =
- if is_hidden name then
- error ("Attempt to declare hidden name " ^ quote name)
- else
- let
- val names = explode_name name;
- val _ = (null names orelse exists (fn s => s = "") names
- orelse exists_string (fn s => s = "\"") name) andalso
- error ("Bad name declaration " ^ quote name);
- val (accs, accs') = pairself (map implode_name) (accesses naming names);
- in space |> fold (add_name name) accs |> put_accesses name accs' end;
+fun declare naming binding space =
+ let
+ val (prefix, qualifier, bname) = Binding.dest binding;
+ val naming' = apply_prefix (prefix @ qualifier) naming;
+ val name = full naming' bname;
+ val names = explode_name name;
-fun full_name naming b =
- let val (prefix, bname) = Binding.dest b
- in full_internal (apply_prefix prefix naming) bname end;
+ val _ = (null names orelse exists (fn s => s = "" orelse s = "??") names
+ orelse exists_string (fn s => s = "\"") name) andalso
+ error ("Bad name declaration " ^ quote (Binding.str_of binding));
-fun declare bnaming b =
- let
- val (prefix, bname) = Binding.dest b;
- val naming = apply_prefix prefix bnaming;
- val name = full_internal naming bname;
- in declare_internal naming name #> pair name end;
+ val (accs, accs') = pairself (map implode_name) (accesses naming' names);
+ val space' = space |> fold (add_name name) accs |> put_accesses name accs';
+ in (name, space') end;
@@ -305,12 +306,11 @@
type 'a table = T * 'a Symtab.table;
-val empty_table = (empty, Symtab.empty);
+fun bind naming (binding, x) (space, tab) =
+ let val (name, space') = declare naming binding space
+ in (name, (space', Symtab.update_new (name, x) tab)) end;
-fun bind naming (b, x) (space, tab) =
- let
- val (name, space') = declare naming b space;
- in (name, (space', Symtab.update_new (name, x) tab)) end;
+val empty_table = (empty, Symtab.empty);
fun merge_tables eq ((space1, tab1), (space2, tab2)) =
(merge (space1, space2), Symtab.merge eq (tab1, tab2));
@@ -331,3 +331,7 @@
val explode = explode_name;
end;
+
+structure BasicNameSpace: BASIC_NAME_SPACE = NameSpace;
+open BasicNameSpace;
+
--- a/src/Pure/General/output.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/General/output.ML Wed Mar 04 10:45:52 2009 +0100
@@ -135,7 +135,7 @@
let
val start = start_timing ();
val result = Exn.capture e ();
- val end_msg = end_timing start;
+ val end_msg = #message (end_timing start);
val _ = warning (if msg = "" then end_msg else msg ^ "\n" ^ end_msg);
in Exn.release result end
else e ();
--- a/src/Pure/General/swing.scala Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/General/swing.scala Wed Mar 04 10:45:52 2009 +0100
@@ -10,9 +10,11 @@
object Swing
{
- def now(body: => Unit) {
- if (SwingUtilities.isEventDispatchThread) body
- else SwingUtilities.invokeAndWait(new Runnable { def run = body })
+ def now[A](body: => A): A = {
+ var result: Option[A] = None
+ if (SwingUtilities.isEventDispatchThread) { result = Some(body) }
+ else SwingUtilities.invokeAndWait(new Runnable { def run = { result = Some(body) } })
+ result.get
}
def later(body: => Unit) {
--- a/src/Pure/IsaMakefile Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/IsaMakefile Wed Mar 04 10:45:52 2009 +0100
@@ -19,9 +19,29 @@
## Pure
+BOOTSTRAP_FILES = ML-Systems/exn.ML ML-Systems/ml_name_space.ML \
+ ML-Systems/mosml.ML ML-Systems/multithreading.ML \
+ ML-Systems/multithreading_polyml.ML ML-Systems/overloading_smlnj.ML \
+ ML-Systems/polyml-4.1.3.ML ML-Systems/polyml-4.1.4.ML \
+ ML-Systems/polyml-4.2.0.ML ML-Systems/polyml-5.0.ML \
+ ML-Systems/polyml-5.1.ML ML-Systems/polyml-experimental.ML \
+ ML-Systems/polyml.ML ML-Systems/polyml_common.ML \
+ ML-Systems/polyml_old_compiler4.ML \
+ ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
+ ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
+ ML-Systems/thread_dummy.ML ML-Systems/time_limit.ML \
+ ML-Systems/universal.ML
+
+RAW: $(OUT)/RAW
+
+$(OUT)/RAW: $(BOOTSTRAP_FILES)
+ @./mk -r
+
+
Pure: $(OUT)/Pure
-$(OUT)/Pure: Concurrent/ROOT.ML Concurrent/future.ML \
+$(OUT)/Pure: $(BOOTSTRAP_FILES) ../Tools/auto_solve.ML \
+ ../Tools/quickcheck.ML Concurrent/ROOT.ML Concurrent/future.ML \
Concurrent/mailbox.ML Concurrent/par_list.ML \
Concurrent/par_list_dummy.ML Concurrent/simple_thread.ML \
Concurrent/synchronized.ML Concurrent/task_queue.ML General/ROOT.ML \
@@ -38,33 +58,21 @@
Isar/attrib.ML Isar/auto_bind.ML Isar/calculation.ML Isar/class.ML \
Isar/class_target.ML Isar/code.ML Isar/code_unit.ML \
Isar/constdefs.ML Isar/context_rules.ML Isar/element.ML \
- Isar/expression.ML Isar/find_theorems.ML Isar/find_consts.ML \
- Isar/isar.ML Isar/isar_document.ML Isar/isar_cmd.ML Isar/isar_syn.ML \
- Isar/local_defs.ML Isar/local_syntax.ML Isar/local_theory.ML \
- Isar/locale.ML Isar/method.ML Isar/net_rules.ML \
+ Isar/expression.ML Isar/isar_cmd.ML Isar/isar_document.ML \
+ Isar/isar_syn.ML Isar/local_defs.ML Isar/local_syntax.ML \
+ Isar/local_theory.ML Isar/locale.ML Isar/method.ML Isar/net_rules.ML \
Isar/object_logic.ML Isar/obtain.ML Isar/outer_keyword.ML \
Isar/outer_lex.ML Isar/outer_parse.ML Isar/outer_syntax.ML \
Isar/overloading.ML Isar/proof.ML Isar/proof_context.ML \
Isar/proof_display.ML Isar/proof_node.ML Isar/rule_cases.ML \
- Isar/rule_insts.ML Isar/session.ML Isar/skip_proof.ML \
- Isar/spec_parse.ML Isar/specification.ML Isar/theory_target.ML \
- Isar/toplevel.ML Isar/value_parse.ML ML-Systems/alice.ML \
- ML-Systems/exn.ML ML-Systems/install_pp_polyml.ML \
- ML-Systems/ml_name_space.ML ML-Systems/multithreading.ML \
- ML-Systems/mosml.ML ML-Systems/multithreading_polyml.ML \
- ML-Systems/overloading_smlnj.ML ML-Systems/polyml-4.1.3.ML \
- ML-Systems/polyml-4.1.4.ML ML-Systems/polyml-4.2.0.ML \
- ML-Systems/polyml-5.0.ML ML-Systems/polyml-5.1.ML \
- ML-Systems/polyml_common.ML ML-Systems/polyml.ML \
- ML-Systems/polyml_old_compiler4.ML \
- ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
- ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
- ML-Systems/time_limit.ML ML-Systems/thread_dummy.ML \
- ML-Systems/universal.ML ML/ml_context.ML ML/ml_antiquote.ML \
- ML/ml_lex.ML ML/ml_parse.ML ML/ml_syntax.ML ML/ml_thms.ML \
- Proof/extraction.ML Proof/proof_rewrite_rules.ML \
- Proof/proof_syntax.ML Proof/proofchecker.ML Proof/reconstruct.ML \
- ProofGeneral/ROOT.ML ProofGeneral/pgip.ML ProofGeneral/pgip_input.ML \
+ Isar/rule_insts.ML Isar/skip_proof.ML Isar/spec_parse.ML \
+ Isar/specification.ML Isar/theory_target.ML Isar/toplevel.ML \
+ Isar/value_parse.ML ML/ml_antiquote.ML ML/ml_context.ML ML/ml_lex.ML \
+ ML/ml_parse.ML ML/ml_syntax.ML ML/ml_thms.ML \
+ ML-Systems/install_pp_polyml.ML Proof/extraction.ML \
+ Proof/proof_rewrite_rules.ML Proof/proof_syntax.ML \
+ Proof/proofchecker.ML Proof/reconstruct.ML ProofGeneral/ROOT.ML \
+ ProofGeneral/pgip.ML ProofGeneral/pgip_input.ML \
ProofGeneral/pgip_isabelle.ML ProofGeneral/pgip_markup.ML \
ProofGeneral/pgip_output.ML ProofGeneral/pgip_parser.ML \
ProofGeneral/pgip_tests.ML ProofGeneral/pgip_types.ML \
@@ -72,24 +80,25 @@
ProofGeneral/proof_general_pgip.ML Pure.thy ROOT.ML Syntax/ast.ML \
Syntax/lexicon.ML Syntax/mixfix.ML Syntax/parser.ML \
Syntax/printer.ML Syntax/simple_syntax.ML Syntax/syn_ext.ML \
- Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML Thy/html.ML \
- Thy/latex.ML Thy/present.ML Thy/term_style.ML Thy/thm_deps.ML \
- Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML Thy/thy_output.ML \
- Thy/thy_syntax.ML Tools/ROOT.ML \
- Tools/isabelle_process.ML Tools/named_thms.ML Tools/xml_syntax.ML \
- assumption.ML axclass.ML codegen.ML config.ML conjunction.ML \
- consts.ML context.ML context_position.ML conv.ML defs.ML display.ML \
- drule.ML envir.ML facts.ML goal.ML interpretation.ML library.ML \
- logic.ML meta_simplifier.ML more_thm.ML morphism.ML name.ML net.ML \
- old_goals.ML old_term.ML pattern.ML primitive_defs.ML proofterm.ML \
- pure_setup.ML pure_thy.ML search.ML sign.ML simplifier.ML sorts.ML \
- subgoal.ML tactic.ML tctical.ML term.ML term_ord.ML term_subst.ML \
- theory.ML thm.ML type.ML type_infer.ML unify.ML variable.ML \
- ../Tools/quickcheck.ML ../Tools/auto_solve.ML
+ Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML \
+ System/isabelle_process.ML System/isar.ML System/session.ML \
+ Thy/html.ML Thy/latex.ML Thy/present.ML Thy/term_style.ML \
+ Thy/thm_deps.ML Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML \
+ Thy/thy_output.ML Thy/thy_syntax.ML Tools/ROOT.ML \
+ Tools/find_consts.ML Tools/find_theorems.ML Tools/named_thms.ML \
+ Tools/xml_syntax.ML assumption.ML axclass.ML codegen.ML config.ML \
+ conjunction.ML consts.ML context.ML context_position.ML conv.ML \
+ defs.ML display.ML drule.ML envir.ML facts.ML goal.ML \
+ interpretation.ML library.ML logic.ML meta_simplifier.ML more_thm.ML \
+ morphism.ML name.ML net.ML old_goals.ML old_term.ML pattern.ML \
+ primitive_defs.ML proofterm.ML pure_setup.ML pure_thy.ML search.ML \
+ sign.ML simplifier.ML sorts.ML subgoal.ML tactic.ML tctical.ML \
+ term.ML term_ord.ML term_subst.ML theory.ML thm.ML type.ML \
+ type_infer.ML unify.ML variable.ML
@./mk
-## special targets
+## Proof General keywords
Pure-ProofGeneral: Pure $(LOG)/Pure-ProofGeneral.gz
@@ -97,28 +106,11 @@
@$(ISABELLE_TOOL) usedir -f proof_general_keywords.ML $(OUT)/Pure ProofGeneral
-RAW: $(OUT)/RAW
-
-$(OUT)/RAW: ML-Systems/alice.ML ML-Systems/exn.ML \
- ML-Systems/ml_name_space.ML ML-Systems/multithreading.ML \
- ML-Systems/mosml.ML ML-Systems/multithreading_polyml.ML \
- ML-Systems/overloading_smlnj.ML ML-Systems/polyml-4.1.3.ML \
- ML-Systems/polyml-4.1.4.ML ML-Systems/polyml-4.2.0.ML \
- ML-Systems/polyml-5.0.ML ML-Systems/polyml-5.1.ML \
- ML-Systems/polyml_common.ML ML-Systems/polyml.ML \
- ML-Systems/polyml_old_compiler4.ML \
- ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
- ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
- ML-Systems/time_limit.ML ML-Systems/thread_dummy.ML \
- ML-Systems/universal.ML
- @./mk -r
-
-
## clean
clean:
- @rm -f $(OUT)/Pure $(LOG)/Pure.gz $(LOG)/Pure-ProofGeneral.gz \
- $(OUT)/RAW $(LOG)/RAW.gz
+ @rm -f $(OUT)/Pure $(LOG)/Pure.gz $(OUT)/RAW $(LOG)/RAW.gz \
+ $(LOG)/Pure-ProofGeneral.gz
## Scala material
@@ -127,8 +119,8 @@
General/position.scala General/swing.scala General/symbol.scala \
General/xml.scala General/yxml.scala Isar/isar.scala \
Isar/isar_document.scala Isar/outer_keyword.scala \
- Thy/thy_header.scala Tools/isabelle_process.scala \
- Tools/isabelle_syntax.scala Tools/isabelle_system.scala
+ System/isabelle_process.scala System/isabelle_system.scala \
+ Thy/thy_header.scala Tools/isabelle_syntax.scala
SCALA_TARGET = $(ISABELLE_HOME)/lib/classes/Pure.jar
--- a/src/Pure/Isar/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -82,14 +82,12 @@
use "../old_goals.ML";
use "outer_syntax.ML";
use "../Thy/thy_info.ML";
-use "session.ML";
-use "isar.ML";
use "isar_document.ML";
(*theory and proof operations*)
use "rule_insts.ML";
use "../Thy/thm_deps.ML";
-use "find_theorems.ML";
-use "find_consts.ML";
use "isar_cmd.ML";
use "isar_syn.ML";
+
+
--- a/src/Pure/Isar/args.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/args.ML Wed Mar 04 10:45:52 2009 +0100
@@ -170,7 +170,7 @@
val name_source_position = named >> T.source_position_of;
val name = named >> T.content_of;
-val binding = P.position name >> Binding.name_pos;
+val binding = P.position name >> Binding.make;
val alt_name = alt_string >> T.content_of;
val symbol = symbolic >> T.content_of;
val liberal_name = symbol || name;
--- a/src/Pure/Isar/attrib.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/attrib.ML Wed Mar 04 10:45:52 2009 +0100
@@ -118,8 +118,7 @@
fun attribute thy = attribute_i thy o intern_src thy;
fun eval_thms ctxt args = ProofContext.note_thmss Thm.theoremK
- [((Binding.empty, []),
- map (apsnd (map (attribute (ProofContext.theory_of ctxt)))) args)] ctxt
+ [(Thm.empty_binding, map (apsnd (map (attribute (ProofContext.theory_of ctxt)))) args)] ctxt
|> fst |> maps snd;
@@ -198,7 +197,7 @@
let
val ths = Facts.select thmref fact;
val atts = map (attribute_i thy) srcs;
- val (context', ths') = foldl_map (Library.apply atts) (context, ths);
+ val (context', ths') = Library.foldl_map (Library.apply atts) (context, ths);
in (context', pick name ths') end)
end);
--- a/src/Pure/Isar/calculation.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/calculation.ML Wed Mar 04 10:45:52 2009 +0100
@@ -15,7 +15,7 @@
val symmetric: attribute
val also: (Facts.ref * Attrib.src list) list option -> bool -> Proof.state -> Proof.state Seq.seq
val also_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq
- val finally_: (Facts.ref * Attrib.src list) list option -> bool ->
+ val finally: (Facts.ref * Attrib.src list) list option -> bool ->
Proof.state -> Proof.state Seq.seq
val finally_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq
val moreover: bool -> Proof.state -> Proof.state
@@ -150,7 +150,7 @@
val also = calculate Proof.get_thmss false;
val also_i = calculate (K I) false;
-val finally_ = calculate Proof.get_thmss true;
+val finally = calculate Proof.get_thmss true;
val finally_i = calculate (K I) true;
--- a/src/Pure/Isar/class.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/class.ML Wed Mar 04 10:45:52 2009 +0100
@@ -201,7 +201,7 @@
| check_element e = [()];
val _ = map check_element syntax_elems;
fun fork_syn (Element.Fixes xs) =
- fold_map (fn (c, ty, syn) => cons (Binding.base_name c, syn) #> pair (c, ty, NoSyn)) xs
+ fold_map (fn (c, ty, syn) => cons (Binding.name_of c, syn) #> pair (c, ty, NoSyn)) xs
#>> Element.Fixes
| fork_syn x = pair x;
val (elems, global_syntax) = fold_map fork_syn syntax_elems [];
@@ -228,7 +228,7 @@
val raw_params = (snd o chop (length supparams)) all_params;
fun add_const (b, SOME raw_ty, _) thy =
let
- val v = Binding.base_name b;
+ val v = Binding.name_of b;
val c = Sign.full_bname thy v;
val ty = map_atyps (K (TFree (Name.aT, base_sort))) raw_ty;
val ty0 = Type.strip_sorts ty;
@@ -265,8 +265,7 @@
|> add_consts bname class base_sort sups supparams global_syntax
|-> (fn (param_map, params) => AxClass.define_class (bname, supsort)
(map (fst o snd) params)
- [((Binding.empty, []),
- Option.map (globalize param_map) raw_pred |> the_list)]
+ [(Thm.empty_binding, Option.map (globalize param_map) raw_pred |> the_list)]
#> snd
#> `get_axiom
#-> (fn assm_axiom => fold (Sign.add_const_constraint o apsnd SOME o snd) params
--- a/src/Pure/Isar/class_target.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/class_target.ML Wed Mar 04 10:45:52 2009 +0100
@@ -493,7 +493,7 @@
fun init_instantiation (tycos, vs, sort) thy =
let
val _ = if null tycos then error "At least one arity must be given" else ();
- val params = these_params thy sort;
+ val params = these_params thy (filter (can (AxClass.get_info thy)) sort);
fun get_param tyco (param, (_, (c, ty))) =
if can (AxClass.param_of_inst thy) (c, tyco)
then NONE else SOME ((c, tyco),
@@ -513,7 +513,8 @@
| SOME ts' => SOME (ts', ctxt);
fun improve (c, ty) = case AxClass.inst_tyco_of thy (c, ty)
of SOME tyco => (case AList.lookup (op =) inst_params (c, tyco)
- of SOME (_, ty') => if Type.raw_instance (ty', ty) then SOME (ty, ty') else NONE
+ of SOME (_, ty') => if Type.typ_instance (Sign.tsig_of thy) (ty', ty)
+ then SOME (ty, ty') else NONE
| NONE => NONE)
| NONE => NONE;
in
@@ -523,8 +524,7 @@
|> fold (Variable.declare_typ o TFree) vs
|> fold (Variable.declare_names o Free o snd) inst_params
|> (Overloading.map_improvable_syntax o apfst)
- (fn ((_, _), ((_, subst), unchecks)) =>
- ((primary_constraints, []), (((improve, K NONE), false), [])))
+ (K ((primary_constraints, []), (((improve, K NONE), false), [])))
|> Overloading.add_improvable_syntax
|> Context.proof_map (Syntax.add_term_check 0 "resorting" resort_check)
|> synchronize_inst_syntax
--- a/src/Pure/Isar/code.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/code.ML Wed Mar 04 10:45:52 2009 +0100
@@ -35,7 +35,7 @@
val these_raw_eqns: theory -> string -> (thm * bool) list
val get_datatype: theory -> string -> ((string * sort) list * (string * typ list) list)
val get_datatype_of_constr: theory -> string -> string option
- val get_case_data: theory -> string -> (int * string list) option
+ val get_case_scheme: theory -> string -> (int * (int * string list)) option
val is_undefined: theory -> string -> bool
val default_typscheme: theory -> string -> (string * sort) list * typ
@@ -111,7 +111,7 @@
(** logical and syntactical specification of executable code **)
-(* defining equations *)
+(* code equations *)
type eqns = bool * (thm * bool) list lazy;
(*default flag, theorems with linear flag (perhaps lazy)*)
@@ -136,7 +136,7 @@
Pattern.matchess thy (args, (map incr_idx o curry Library.take (length args)) args');
fun drop (thm', linear') = if (linear orelse not linear')
andalso matches_args (args_of thm') then
- (warning ("Code generator: dropping redundant defining equation\n" ^ Display.string_of_thm thm'); true)
+ (warning ("Code generator: dropping redundant code equation\n" ^ Display.string_of_thm thm'); true)
else false;
in (thm, linear) :: filter_out drop thms end;
@@ -157,7 +157,7 @@
(*with explicit history*),
dtyps: ((serial * ((string * sort) list * (string * typ list) list)) list) Symtab.table
(*with explicit history*),
- cases: (int * string list) Symtab.table * unit Symtab.table
+ cases: (int * (int * string list)) Symtab.table * unit Symtab.table
};
fun mk_spec ((concluded_history, eqns), (dtyps, cases)) =
@@ -409,7 +409,7 @@
in
(Pretty.writeln o Pretty.chunks) [
Pretty.block (
- Pretty.str "defining equations:"
+ Pretty.str "code equations:"
:: Pretty.fbrk
:: (Pretty.fbreaks o map pretty_eqn) eqns
),
@@ -452,7 +452,7 @@
val ty1 :: tys = map (snd o Code_Unit.const_typ_eqn) thms';
fun unify ty env = Sign.typ_unify thy (ty1, ty) env
handle Type.TUNIFY =>
- error ("Type unificaton failed, while unifying defining equations\n"
+ error ("Type unificaton failed, while unifying code equations\n"
^ (cat_lines o map Display.string_of_thm) thms
^ "\nwith types\n"
^ (cat_lines o map (Code_Unit.string_of_typ thy)) (ty1 :: tys));
@@ -463,7 +463,7 @@
fun check_linear (eqn as (thm, linear)) =
if linear then eqn else Code_Unit.bad_thm
- ("Duplicate variables on left hand side of defining equation:\n"
+ ("Duplicate variables on left hand side of code equation:\n"
^ Display.string_of_thm thm);
fun mk_eqn thy linear =
@@ -489,7 +489,7 @@
fun retrieve_algebra thy operational =
Sorts.subalgebra (Syntax.pp_global thy) operational
- (arity_constraints thy (Sign.classes_of thy))
+ (SOME o arity_constraints thy (Sign.classes_of thy))
(Sign.classes_of thy);
in
@@ -525,22 +525,13 @@
then SOME tyco else NONE
| _ => NONE;
-fun get_constr_typ thy c =
- case get_datatype_of_constr thy c
- of SOME tyco => let
- val (vs, cos) = get_datatype thy tyco;
- val SOME tys = AList.lookup (op =) cos c;
- val ty = tys ---> Type (tyco, map TFree vs);
- in SOME (Logic.varifyT ty) end
- | NONE => NONE;
-
fun recheck_eqn thy = Code_Unit.error_thm
(Code_Unit.assert_linear (is_some o get_datatype_of_constr thy) o apfst (Code_Unit.assert_eqn thy));
fun recheck_eqns_const thy c eqns =
let
fun cert (eqn as (thm, _)) = if c = Code_Unit.const_eqn thm
- then eqn else error ("Wrong head of defining equation,\nexpected constant "
+ then eqn else error ("Wrong head of code equation,\nexpected constant "
^ Code_Unit.string_of_const thy c ^ "\n" ^ Display.string_of_thm thm)
in map (cert o recheck_eqn thy) eqns end;
@@ -554,11 +545,11 @@
let
val c = Code_Unit.const_eqn thm;
val _ = if not default andalso (is_some o AxClass.class_of_param thy) c
- then error ("Rejected polymorphic equation for overloaded constant:\n"
+ then error ("Rejected polymorphic code equation for overloaded constant:\n"
^ Display.string_of_thm thm)
else ();
val _ = if not default andalso (is_some o get_datatype_of_constr thy) c
- then error ("Rejected equation for datatype constructor:\n"
+ then error ("Rejected code equation for datatype constructor:\n"
^ Display.string_of_thm thm)
else ();
in change_eqns false c (add_thm thy default (thm, linear)) thy end
@@ -583,7 +574,7 @@
fun del_eqns c = change_eqns true c (K (false, Lazy.value []));
-val get_case_data = Symtab.lookup o fst o the_cases o the_exec;
+fun get_case_scheme thy = Symtab.lookup ((fst o the_cases o the_exec) thy);
val is_undefined = Symtab.defined o snd o the_cases o the_exec;
@@ -593,11 +584,17 @@
let
val cs = map (fn c_ty as (_, ty) => (AxClass.unoverload_const thy c_ty, ty)) raw_cs;
val (tyco, vs_cos) = Code_Unit.constrset_of_consts thy cs;
+ val old_cs = (map fst o snd o get_datatype thy) tyco;
+ fun drop_outdated_cases cases = fold Symtab.delete_safe
+ (Symtab.fold (fn (c, (_, (_, cos))) =>
+ if exists (member (op =) old_cs) cos
+ then insert (op =) c else I) cases []) cases;
in
thy
|> map_exec_purge NONE
((map_dtyps o Symtab.map_default (tyco, [])) (cons (serial (), vs_cos))
- #> map_eqns (fold (Symtab.delete_safe o fst) cs))
+ #> map_eqns (fold (Symtab.delete_safe o fst) cs)
+ #> (map_cases o apfst) drop_outdated_cases)
|> TypeInterpretation.data (tyco, serial ())
end;
@@ -611,10 +608,12 @@
fun add_case thm thy =
let
- val entry as (c, _) = Code_Unit.case_cert thm;
- in
- (map_exec_purge (SOME [c]) o map_cases o apfst) (Symtab.update entry) thy
- end;
+ val (c, (k, case_pats)) = Code_Unit.case_cert thm;
+ val _ = case filter (is_none o get_datatype_of_constr thy) case_pats
+ of [] => ()
+ | cs => error ("Non-constructor(s) in case certificate: " ^ commas (map quote cs));
+ val entry = (1 + Int.max (1, length case_pats), (k, case_pats))
+ in (map_exec_purge (SOME [c]) o map_cases o apfst) (Symtab.update (c, entry)) thy end;
fun add_undefined c thy =
(map_exec_purge (SOME [c]) o map_cases o apsnd) (Symtab.update (c, ())) thy;
@@ -727,18 +726,16 @@
fun default_typscheme thy c =
let
- val typscheme = curry (Code_Unit.typscheme thy) c
- val the_const_type = snd o dest_Const o TermSubst.zero_var_indexes
- o curry Const "" o Sign.the_const_type thy;
+ fun the_const_typscheme c = (curry (Code_Unit.typscheme thy) c o snd o dest_Const
+ o TermSubst.zero_var_indexes o curry Const "" o Sign.the_const_type thy) c;
+ fun strip_sorts (vs, ty) = (map (fn (v, _) => (v, [])) vs, ty);
in case AxClass.class_of_param thy c
- of SOME class => the_const_type c
- |> Term.map_type_tvar (K (TVar ((Name.aT, 0), [class])))
- |> typscheme
- | NONE => (case get_constr_typ thy c
- of SOME ty => typscheme ty
- | NONE => (case get_eqns thy c
- of (thm, _) :: _ => snd (Code_Unit.head_eqn thy (Drule.zero_var_indexes thm))
- | [] => typscheme (the_const_type c))) end;
+ of SOME class => ([(Name.aT, [class])], snd (the_const_typscheme c))
+ | NONE => if is_some (get_datatype_of_constr thy c)
+ then strip_sorts (the_const_typscheme c)
+ else case get_eqns thy c
+ of (thm, _) :: _ => snd (Code_Unit.head_eqn thy (Drule.zero_var_indexes thm))
+ | [] => strip_sorts (the_const_typscheme c) end;
end; (*local*)
--- a/src/Pure/Isar/code_unit.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/code_unit.ML Wed Mar 04 10:45:52 2009 +0100
@@ -34,7 +34,7 @@
val constrset_of_consts: theory -> (string * typ) list
-> string * ((string * sort) list * (string * typ list) list)
- (*defining equations*)
+ (*code equations*)
val assert_eqn: theory -> thm -> thm
val mk_eqn: theory -> thm -> thm * bool
val assert_linear: (string -> bool) -> thm * bool -> thm * bool
@@ -76,10 +76,11 @@
fun typscheme thy (c, ty) =
let
- fun dest (TVar ((v, 0), sort)) = (v, sort)
+ val ty' = Logic.unvarifyT ty;
+ fun dest (TFree (v, sort)) = (v, sort)
| dest ty = error ("Illegal type parameter in type scheme: " ^ Syntax.string_of_typ_global thy ty);
- val vs = map dest (Sign.const_typargs thy (c, ty));
- in (vs, ty) end;
+ val vs = map dest (Sign.const_typargs thy (c, ty'));
+ in (vs, Type.strip_sorts ty') end;
fun inst_thm thy tvars' thm =
let
@@ -313,10 +314,10 @@
val ((tyco, sorts), cs'') = fold add cs' (apsnd single c');
val vs = Name.names Name.context Name.aT sorts;
val cs''' = map (inst vs) cs'';
- in (tyco, (vs, cs''')) end;
+ in (tyco, (vs, rev cs''')) end;
-(* defining equations *)
+(* code equations *)
fun assert_eqn thy thm =
let
@@ -351,7 +352,7 @@
^ Display.string_of_thm thm)
| check 0 (Var _) = ()
| check _ (Var _) = bad_thm
- ("Variable with application on left hand side of defining equation\n"
+ ("Variable with application on left hand side of code equation\n"
^ Display.string_of_thm thm)
| check n (t1 $ t2) = (check (n+1) t1; check 0 t2)
| check n (Const (_, ty)) = if n <> (length o fst o strip_type) ty
@@ -363,7 +364,7 @@
val ty_decl = Sign.the_const_type thy c;
val _ = if Sign.typ_equiv thy (Type.strip_sorts ty_decl, Type.strip_sorts ty)
then () else bad_thm ("Type\n" ^ string_of_typ thy ty
- ^ "\nof defining equation\n"
+ ^ "\nof code equation\n"
^ Display.string_of_thm thm
^ "\nis incompatible with declared function type\n"
^ string_of_typ thy ty_decl)
@@ -388,7 +389,7 @@
fun assert_linear is_cons (thm, false) = (thm, false)
| assert_linear is_cons (thm, true) = if snd (add_linear (assert_pat is_cons thm)) then (thm, true)
else bad_thm
- ("Duplicate variables on left hand side of defining equation:\n"
+ ("Duplicate variables on left hand side of code equation:\n"
^ Display.string_of_thm thm);
--- a/src/Pure/Isar/constdefs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/constdefs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -9,11 +9,9 @@
signature CONSTDEFS =
sig
val add_constdefs: (binding * string option) list *
- ((binding * string option * mixfix) option *
- (Attrib.binding * string)) list -> theory -> theory
+ ((binding * string option * mixfix) option * (Attrib.binding * string)) list -> theory -> theory
val add_constdefs_i: (binding * typ option) list *
- ((binding * typ option * mixfix) option *
- ((binding * attribute list) * term)) list -> theory -> theory
+ ((binding * typ option * mixfix) option * (Thm.binding * term)) list -> theory -> theory
end;
structure Constdefs: CONSTDEFS =
@@ -38,7 +36,7 @@
val prop = prep_prop var_ctxt raw_prop;
val (c, T) = #1 (LocalDefs.cert_def thy_ctxt (Logic.strip_imp_concl prop));
val _ =
- (case Option.map Binding.base_name d of
+ (case Option.map Binding.name_of d of
NONE => ()
| SOME c' =>
if c <> c' then
@@ -46,7 +44,7 @@
else ());
val def = Term.subst_atomic [(Free (c, T), Const (Sign.full_bname thy c, T))] prop;
- val name = Thm.def_name_optional c (Binding.base_name raw_name);
+ val name = Thm.def_name_optional c (Binding.name_of raw_name);
val atts = map (prep_att thy) raw_atts;
val thy' =
--- a/src/Pure/Isar/element.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/element.ML Wed Mar 04 10:45:52 2009 +0100
@@ -96,7 +96,7 @@
fun map_ctxt {binding, typ, term, pattern, fact, attrib} =
fn Fixes fixes => Fixes (fixes |> map (fn (x, T, mx) => (binding x, Option.map typ T, mx)))
| Constrains xs => Constrains (xs |> map (fn (x, T) =>
- (Binding.base_name (binding (Binding.name x)), typ T)))
+ (Binding.name_of (binding (Binding.name x)), typ T)))
| Assumes asms => Assumes (asms |> map (fn ((a, atts), propps) =>
((binding a, map attrib atts), propps |> map (fn (t, ps) => (term t, map pattern ps)))))
| Defines defs => Defines (defs |> map (fn ((a, atts), (t, ps)) =>
@@ -125,11 +125,9 @@
map (fn y => Pretty.block [Pretty.str " ", Pretty.keyword sep, Pretty.brk 1, y]) ys;
fun pretty_name_atts ctxt (b, atts) sep =
- let val name = Binding.display b in
- if name = "" andalso null atts then []
- else [Pretty.block
- (Pretty.breaks (Pretty.str name :: Attrib.pretty_attribs ctxt atts @ [Pretty.str sep]))]
- end;
+ if Binding.is_empty b andalso null atts then []
+ else [Pretty.block (Pretty.breaks
+ (Pretty.str (Binding.str_of b) :: Attrib.pretty_attribs ctxt atts @ [Pretty.str sep]))];
(* pretty_stmt *)
@@ -145,8 +143,8 @@
Pretty.block (Pretty.breaks (prt_name_atts a ":" @ prt_terms (map fst ts)));
fun prt_var (x, SOME T) = Pretty.block
- [Pretty.str (Binding.base_name x ^ " ::"), Pretty.brk 1, prt_typ T]
- | prt_var (x, NONE) = Pretty.str (Binding.base_name x);
+ [Pretty.str (Binding.name_of x ^ " ::"), Pretty.brk 1, prt_typ T]
+ | prt_var (x, NONE) = Pretty.str (Binding.name_of x);
val prt_vars = separate (Pretty.keyword "and") o map prt_var;
fun prt_obtain (_, ([], ts)) = Pretty.block (Pretty.breaks (prt_terms ts))
@@ -170,9 +168,9 @@
fun prt_mixfix NoSyn = []
| prt_mixfix mx = [Pretty.brk 2, Syntax.pretty_mixfix mx];
- fun prt_fix (x, SOME T, mx) = Pretty.block (Pretty.str (Binding.base_name x ^ " ::") ::
+ fun prt_fix (x, SOME T, mx) = Pretty.block (Pretty.str (Binding.name_of x ^ " ::") ::
Pretty.brk 1 :: prt_typ T :: Pretty.brk 1 :: prt_mixfix mx)
- | prt_fix (x, NONE, mx) = Pretty.block (Pretty.str (Binding.base_name x) ::
+ | prt_fix (x, NONE, mx) = Pretty.block (Pretty.str (Binding.name_of x) ::
Pretty.brk 1 :: prt_mixfix mx);
fun prt_constrain (x, T) = prt_fix (Binding.name x, SOME T, NoSyn);
@@ -296,7 +294,7 @@
gen_witness_proof (fn after_qed' => fn propss =>
Proof.map_context (K goal_ctxt)
#> Proof.local_goal (ProofDisplay.print_results int) (K I) ProofContext.bind_propp_i
- cmd NONE after_qed' (map (pair (Binding.empty, [])) propss))
+ cmd NONE after_qed' (map (pair Thm.empty_binding) propss))
(fn wits => fn _ => after_qed wits) wit_propss [];
end;
@@ -504,7 +502,7 @@
val defs' = Attrib.map_specs (Attrib.attribute_i (ProofContext.theory_of ctxt)) defs;
val asms = defs' |> map (fn ((name, atts), (t, ps)) =>
let val ((c, _), t') = LocalDefs.cert_def ctxt t
- in (t', ((Binding.map_base (Thm.def_name_optional c) name, atts), [(t', ps)])) end);
+ in (t', ((Binding.map_name (Thm.def_name_optional c) name, atts), [(t', ps)])) end);
val (_, ctxt') =
ctxt |> fold (Variable.auto_fixes o #1) asms
|> ProofContext.add_assms_i LocalDefs.def_export (map #2 asms);
@@ -529,7 +527,7 @@
fun prep_facts prep_name get intern ctxt =
map_ctxt
- {binding = Binding.map_base prep_name,
+ {binding = Binding.map_name prep_name,
typ = I,
term = I,
pattern = I,
--- a/src/Pure/Isar/expression.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/expression.ML Wed Mar 04 10:45:52 2009 +0100
@@ -88,17 +88,13 @@
if null dups then () else error (message ^ commas dups)
end;
- fun match_bind (n, b) = (n = Binding.base_name b);
+ fun match_bind (n, b) = (n = Binding.name_of b);
fun parm_eq ((b1, mx1: mixfix), (b2, mx2)) =
(* FIXME: cannot compare bindings for equality, instead check for equal name and syntax *)
- (Binding.base_name b1 = Binding.base_name b2) andalso
- (if mx1 = mx2 then true
- else error ("Conflicting syntax for parameter" ^ quote (Binding.display b1) ^
- " in expression."));
+ Binding.name_of b1 = Binding.name_of b2 andalso
+ (mx1 = mx2 orelse
+ error ("Conflicting syntax for parameter " ^ quote (Binding.str_of b1) ^ " in expression"));
- fun bind_eq (b1, b2) = (Binding.base_name b1 = Binding.base_name b2);
- (* FIXME: cannot compare bindings for equality. *)
-
fun params_loc loc =
(Locale.params_of thy loc |> map (fn (p, _, mx) => (p, mx)), loc);
fun params_inst (expr as (loc, (prfx, Positional insts))) =
@@ -133,8 +129,8 @@
val (implicit, expr') = params_expr expr;
- val implicit' = map (#1 #> Binding.base_name) implicit;
- val fixed' = map (#1 #> Binding.base_name) fixed;
+ val implicit' = map (#1 #> Binding.name_of) implicit;
+ val fixed' = map (#1 #> Binding.name_of) fixed;
val _ = reject_dups "Duplicate fixed parameter(s): " fixed';
val implicit'' = if strict then []
else let val _ = reject_dups
@@ -310,14 +306,12 @@
(a, map (fn (t, ps) => (close_frees t, no_binds ps)) propps)))
| Defines defs => Defines (defs |> map (fn ((name, atts), (t, ps)) =>
let val ((c, _), t') = LocalDefs.cert_def ctxt (close_frees t)
- in
- ((Binding.map_base (Thm.def_name_optional c) name, atts), (t', no_binds ps))
- end))
+ in ((Binding.map_name (Thm.def_name_optional c) name, atts), (t', no_binds ps)) end))
| e => e)
end;
fun finish_primitive parms _ (Fixes fixes) = Fixes (map (fn (binding, _, mx) =>
- let val x = Binding.base_name binding
+ let val x = Binding.name_of binding
in (binding, AList.lookup (op =) parms x, mx) end) fixes)
| finish_primitive _ _ (Constrains _) = Constrains []
| finish_primitive _ close (Assumes asms) = close (Assumes asms)
@@ -328,7 +322,7 @@
let
val thy = ProofContext.theory_of ctxt;
val (parm_names, parm_types) = Locale.params_of thy loc |>
- map_split (fn (b, SOME T, _) => (Binding.base_name b, T));
+ map_split (fn (b, SOME T, _) => (Binding.name_of b, T));
val (morph, _) = inst_morph (parm_names, parm_types) (prfx, inst) ctxt;
in (loc, morph) end;
@@ -360,7 +354,7 @@
fun prep_insts (loc, (prfx, inst)) (i, insts, ctxt) =
let
val (parm_names, parm_types) = Locale.params_of thy loc |>
- map_split (fn (b, SOME T, _) => (Binding.base_name b, T))
+ map_split (fn (b, SOME T, _) => (Binding.name_of b, T))
(*FIXME return value of Locale.params_of has strange type*)
val inst' = prep_inst ctxt parm_names inst;
val parm_types' = map (TypeInfer.paramify_vars o
@@ -394,7 +388,7 @@
prep_concl raw_concl (insts', elems, ctxt5);
(* Retrieve parameter types *)
- val xs = fold (fn Fixes fixes => (fn ps => ps @ map (Binding.base_name o #1) fixes)
+ val xs = fold (fn Fixes fixes => (fn ps => ps @ map (Binding.name_of o #1) fixes)
| _ => fn ps => ps) (Fixes fors :: elems') [];
val (Ts, ctxt7) = fold_map ProofContext.inferred_param xs ctxt6;
val parms = xs ~~ Ts; (* params from expression and elements *)
@@ -726,14 +720,14 @@
| defines_to_notes _ e = e;
fun gen_add_locale prep_decl
- bname raw_predicate_bname raw_imprt raw_body thy =
+ bname raw_predicate_bname raw_import raw_body thy =
let
val name = Sign.full_bname thy bname;
val _ = Locale.defined thy name andalso
error ("Duplicate definition of locale " ^ quote name);
val ((fixed, deps, body_elems), (parms, ctxt')) =
- prep_decl raw_imprt I raw_body (ProofContext.init thy);
+ prep_decl raw_import I raw_body (ProofContext.init thy);
val text as (((_, exts'), _), defs) = eval ctxt' deps body_elems;
val predicate_bname = if raw_predicate_bname = "" then bname
--- a/src/Pure/Isar/isar_cmd.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/isar_cmd.ML Wed Mar 04 10:45:52 2009 +0100
@@ -32,7 +32,6 @@
val skip_proof: Toplevel.transition -> Toplevel.transition
val init_theory: string * string list * (string * bool) list ->
Toplevel.transition -> Toplevel.transition
- val welcome: Toplevel.transition -> Toplevel.transition
val exit: Toplevel.transition -> Toplevel.transition
val quit: Toplevel.transition -> Toplevel.transition
val pr: string list * (int option * int option) -> Toplevel.transition -> Toplevel.transition
@@ -62,10 +61,6 @@
val class_deps: Toplevel.transition -> Toplevel.transition
val thy_deps: Toplevel.transition -> Toplevel.transition
val thm_deps: (Facts.ref * Attrib.src list) list -> Toplevel.transition -> Toplevel.transition
- val find_theorems: (int option * bool) * (bool * string FindTheorems.criterion) list
- -> Toplevel.transition -> Toplevel.transition
- val find_consts: (bool * FindConsts.criterion) list ->
- Toplevel.transition -> Toplevel.transition
val unused_thms: (string list * string list option) option ->
Toplevel.transition -> Toplevel.transition
val print_binds: Toplevel.transition -> Toplevel.transition
@@ -166,7 +161,7 @@
(* axioms *)
fun add_axms f args thy =
- f (map (fn ((b, ax), srcs) => ((Binding.base_name b, ax), map (Attrib.attribute thy) srcs)) args) thy;
+ f (map (fn ((b, ax), srcs) => ((Binding.name_of b, ax), map (Attrib.attribute thy) srcs)) args) thy;
val add_axioms = add_axms (snd oo PureThy.add_axioms_cmd);
@@ -269,8 +264,6 @@
if ThyInfo.check_known_thy (Context.theory_name thy)
then ThyInfo.end_theory thy else ());
-val welcome = Toplevel.imperative (writeln o Session.welcome);
-
val exit = Toplevel.keep (fn state =>
(Context.set_thread_data (try Toplevel.generic_theory_of state);
raise Toplevel.TERMINATE));
@@ -403,20 +396,9 @@
|> sort (int_ord o pairself #1) |> map #2;
in Present.display_graph gr end);
-
-(* retrieve theorems *)
-
fun thm_deps args = Toplevel.unknown_theory o Toplevel.keep (fn state =>
ThmDeps.thm_deps (Proof.get_thmss (Toplevel.enter_proof_body state) args));
-fun find_theorems ((opt_lim, rem_dups), spec) =
- Toplevel.unknown_theory o Toplevel.keep (fn state =>
- let
- val proof_state = Toplevel.enter_proof_body state;
- val ctxt = Proof.context_of proof_state;
- val opt_goal = try Proof.get_goal proof_state |> Option.map (#2 o #2);
- in FindTheorems.print_theorems ctxt opt_goal opt_lim rem_dups spec end);
-
(* find unused theorems *)
@@ -434,12 +416,6 @@
|> map pretty_thm |> Pretty.chunks |> Pretty.writeln
end);
-(* retrieve constants *)
-
-fun find_consts spec =
- Toplevel.unknown_theory o Toplevel.keep (fn state =>
- let val ctxt = (Proof.context_of o Toplevel.enter_proof_body) state
- in FindConsts.find_consts ctxt spec end);
(* print proof context contents *)
--- a/src/Pure/Isar/isar_syn.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/isar_syn.ML Wed Mar 04 10:45:52 2009 +0100
@@ -37,6 +37,7 @@
(Scan.succeed (Toplevel.exit o Toplevel.end_local_theory));
+
(** markup commands **)
val _ = OuterSyntax.markup_command ThyOutput.Markup "header" "theory header" K.diag
@@ -79,7 +80,7 @@
-(** theory sections **)
+(** theory commands **)
(* classes and sorts *)
@@ -692,7 +693,7 @@
val _ =
OuterSyntax.command "finally" "combine calculation and current facts, exhibit result"
(K.tag_proof K.prf_chain)
- (calc_args >> (Toplevel.proofs' o Calculation.finally_));
+ (calc_args >> (Toplevel.proofs' o Calculation.finally));
val _ =
OuterSyntax.command "moreover" "augment calculation by current facts"
@@ -728,39 +729,6 @@
handle ERROR msg => Scan.fail_with (K msg)));
-(* global history commands *)
-
-val _ =
- OuterSyntax.improper_command "init_toplevel" "init toplevel point-of-interest" K.control
- (Scan.succeed (Toplevel.no_timing o Toplevel.imperative Isar.init));
-
-val _ =
- OuterSyntax.improper_command "linear_undo" "undo commands" K.control
- (Scan.optional P.nat 1 >>
- (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.linear_undo n)));
-
-val _ =
- OuterSyntax.improper_command "undo" "undo commands (skipping closed proofs)" K.control
- (Scan.optional P.nat 1 >>
- (fn n => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.undo n)));
-
-val _ =
- OuterSyntax.improper_command "undos_proof" "undo commands (skipping closed proofs)" K.control
- (Scan.optional P.nat 1 >> (fn n => Toplevel.no_timing o
- Toplevel.keep (fn state =>
- if Toplevel.is_proof state then (Isar.undo n; Isar.print ()) else raise Toplevel.UNDEF)));
-
-val _ =
- OuterSyntax.improper_command "cannot_undo" "partial undo -- Proof General legacy" K.control
- (P.name >>
- (fn "end" => Toplevel.no_timing o Toplevel.imperative (fn () => Isar.undo 1)
- | txt => Toplevel.imperative (fn () => error ("Cannot undo " ^ quote txt))));
-
-val _ =
- OuterSyntax.improper_command "kill" "kill partial proof or theory development" K.control
- (Scan.succeed (Toplevel.no_timing o Toplevel.imperative Isar.kill));
-
-
(** diagnostic commands (for interactive mode only) **)
@@ -853,47 +821,6 @@
OuterSyntax.improper_command "thm_deps" "visualize theorem dependencies"
K.diag (SpecParse.xthms1 >> (Toplevel.no_timing oo IsarCmd.thm_deps));
-local
-
-val criterion =
- P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindTheorems.Name ||
- P.reserved "intro" >> K FindTheorems.Intro ||
- P.reserved "elim" >> K FindTheorems.Elim ||
- P.reserved "dest" >> K FindTheorems.Dest ||
- P.reserved "solves" >> K FindTheorems.Solves ||
- P.reserved "simp" |-- P.!!! (P.$$$ ":" |-- P.term) >> FindTheorems.Simp ||
- P.term >> FindTheorems.Pattern;
-
-val options =
- Scan.optional
- (P.$$$ "(" |--
- P.!!! (Scan.option P.nat -- Scan.optional (P.reserved "with_dups" >> K false) true
- --| P.$$$ ")")) (NONE, true);
-in
-
-val _ =
- OuterSyntax.improper_command "find_theorems" "print theorems meeting specified criteria" K.diag
- (options -- Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
- >> (Toplevel.no_timing oo IsarCmd.find_theorems));
-
-end;
-
-local
-
-val criterion =
- P.reserved "strict" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindConsts.Strict ||
- P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindConsts.Name ||
- P.xname >> FindConsts.Loose;
-
-in
-
-val _ =
- OuterSyntax.improper_command "find_consts" "search constants by type pattern"
- K.diag (Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
- >> (Toplevel.no_timing oo IsarCmd.find_consts));
-
-end;
-
val _ =
OuterSyntax.improper_command "print_binds" "print term bindings of proof context" K.diag
(Scan.succeed (Toplevel.no_timing o IsarCmd.print_binds));
@@ -948,6 +875,7 @@
(Toplevel.no_timing oo IsarCmd.unused_thms));
+
(** system commands (for interactive mode only) **)
val _ =
@@ -1013,9 +941,5 @@
OuterSyntax.improper_command "exit" "exit Isar loop" K.control
(Scan.succeed (Toplevel.no_timing o IsarCmd.exit));
-val _ =
- OuterSyntax.improper_command "welcome" "print welcome message" K.diag
- (Scan.succeed (Toplevel.no_timing o IsarCmd.welcome));
-
end;
--- a/src/Pure/Isar/local_defs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/local_defs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -11,8 +11,8 @@
val mk_def: Proof.context -> (string * term) list -> term list
val expand: cterm list -> thm -> thm
val def_export: Assumption.export
- val add_defs: ((binding * mixfix) * ((binding * attribute list) * term)) list ->
- Proof.context -> (term * (string * thm)) list * Proof.context
+ val add_defs: ((binding * mixfix) * (Thm.binding * term)) list -> Proof.context ->
+ (term * (string * thm)) list * Proof.context
val add_def: (binding * mixfix) * term -> Proof.context -> (term * thm) * Proof.context
val fixed_abbrev: (binding * mixfix) * term -> Proof.context ->
(term * term) * Proof.context
@@ -90,8 +90,8 @@
let
val ((bvars, mxs), specs) = defs |> split_list |>> split_list;
val ((bfacts, atts), rhss) = specs |> split_list |>> split_list;
- val xs = map Binding.base_name bvars;
- val names = map2 (Binding.map_base o Thm.def_name_optional) xs bfacts;
+ val xs = map Binding.name_of bvars;
+ val names = map2 (Binding.map_name o Thm.def_name_optional) xs bfacts;
val eqs = mk_def ctxt (xs ~~ rhss);
val lhss = map (fst o Logic.dest_equals) eqs;
in
@@ -104,7 +104,7 @@
end;
fun add_def (var, rhs) ctxt =
- let val ([(lhs, (_, th))], ctxt') = add_defs [(var, ((Binding.empty, []), rhs))] ctxt
+ let val ([(lhs, (_, th))], ctxt') = add_defs [(var, (Thm.empty_binding, rhs))] ctxt
in ((lhs, th), ctxt') end;
--- a/src/Pure/Isar/locale.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/locale.ML Wed Mar 04 10:45:52 2009 +0100
@@ -194,7 +194,7 @@
fun axioms_of thy = #axioms o the_locale thy;
fun instance_of thy name morph = params_of thy name |>
- map ((fn (b, T, _) => Free (Binding.base_name b, the T)) #> Morphism.term morph);
+ map ((fn (b, T, _) => Free (Binding.name_of b, the T)) #> Morphism.term morph);
fun specification_of thy = #spec o the_locale thy;
@@ -464,8 +464,7 @@
fun decl_attrib decl phi = Thm.declaration_attribute (K (decl phi));
fun add_decls add loc decl =
- ProofContext.theory ((change_locale loc o apfst o apfst) (add (decl, stamp ())))
- #>
+ ProofContext.theory ((change_locale loc o apfst o apfst) (add (decl, stamp ()))) #>
add_thmss loc Thm.internalK
[((Binding.empty, [Attrib.internal (decl_attrib decl)]), [([Drule.dummy_thm], [])])];
--- a/src/Pure/Isar/method.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/method.ML Wed Mar 04 10:45:52 2009 +0100
@@ -38,7 +38,7 @@
val atomize: bool -> method
val this: method
val fact: thm list -> Proof.context -> method
- val assumption_tac: Proof.context -> int -> tactic
+ val assm_tac: Proof.context -> int -> tactic
val assumption: Proof.context -> method
val close: bool -> Proof.context -> method
val trace: Proof.context -> thm list -> unit
@@ -49,7 +49,6 @@
val erule: int -> thm list -> method
val drule: int -> thm list -> method
val frule: int -> thm list -> method
- val iprover_tac: Proof.context -> int option -> int -> tactic
val set_tactic: (thm list -> tactic) -> Proof.context -> Proof.context
val tactic: string * Position.T -> Proof.context -> method
val raw_tactic: string * Position.T -> Proof.context -> method
@@ -225,20 +224,20 @@
in
-fun assumption_tac ctxt =
+fun assm_tac ctxt =
assume_tac APPEND'
Goal.assume_rule_tac ctxt APPEND'
cond_rtac (can Logic.dest_equals) Drule.reflexive_thm APPEND'
cond_rtac (can Logic.dest_term) Drule.termI;
fun assumption ctxt = METHOD (HEADGOAL o
- (fn [] => assumption_tac ctxt
+ (fn [] => assm_tac ctxt
| [fact] => solve_tac [fact]
| _ => K no_tac));
fun close immed ctxt = METHOD (K
(FILTER Thm.no_prems
- ((if immed then ALLGOALS (assumption_tac ctxt) else all_tac) THEN flexflex_tac)));
+ ((if immed then ALLGOALS (assm_tac ctxt) else all_tac) THEN flexflex_tac)));
end;
@@ -296,55 +295,6 @@
THEN Tactic.distinct_subgoals_tac;
-(* iprover -- intuitionistic proof search *)
-
-local
-
-val remdups_tac = SUBGOAL (fn (g, i) =>
- let val prems = Logic.strip_assums_hyp g in
- REPEAT_DETERM_N (length prems - length (distinct op aconv prems))
- (Tactic.ematch_tac [Drule.remdups_rl] i THEN Tactic.eq_assume_tac i)
- end);
-
-fun REMDUPS tac = tac THEN_ALL_NEW remdups_tac;
-
-val bires_tac = Tactic.biresolution_from_nets_tac ContextRules.orderlist;
-
-fun safe_step_tac ctxt =
- ContextRules.Swrap ctxt
- (eq_assume_tac ORELSE'
- bires_tac true (ContextRules.netpair_bang ctxt));
-
-fun unsafe_step_tac ctxt =
- ContextRules.wrap ctxt
- (assume_tac APPEND'
- bires_tac false (ContextRules.netpair_bang ctxt) APPEND'
- bires_tac false (ContextRules.netpair ctxt));
-
-fun step_tac ctxt i =
- REPEAT_DETERM1 (REMDUPS (safe_step_tac ctxt) i) ORELSE
- REMDUPS (unsafe_step_tac ctxt) i;
-
-fun intprover_tac ctxt gs d lim = SUBGOAL (fn (g, i) => if d > lim then no_tac else
- let
- val ps = Logic.strip_assums_hyp g;
- val c = Logic.strip_assums_concl g;
- in
- if member (fn ((ps1, c1), (ps2, c2)) =>
- c1 aconv c2 andalso
- length ps1 = length ps2 andalso
- gen_eq_set (op aconv) (ps1, ps2)) gs (ps, c) then no_tac
- else (step_tac ctxt THEN_ALL_NEW intprover_tac ctxt ((ps, c) :: gs) (d + 1) lim) i
- end);
-
-in
-
-fun iprover_tac ctxt opt_lim =
- SELECT_GOAL (DEEPEN (2, the_default 20 opt_lim) (intprover_tac ctxt [] 0) 4 1);
-
-end;
-
-
(* ML tactics *)
structure TacticData = ProofDataFun
@@ -486,7 +436,7 @@
local
fun thms ss = Scan.repeat (Scan.unless (Scan.lift (Scan.first ss)) Attrib.multi_thm) >> flat;
-fun app (f, att) (context, ths) = foldl_map att (Context.map_proof f context, ths);
+fun app (f, att) (context, ths) = Library.foldl_map att (Context.map_proof f context, ths);
fun section ss = Scan.depend (fn context => (Scan.first ss -- Scan.pass context (thms ss)) :|--
(fn (m, ths) => Scan.succeed (app m (context, ths))));
@@ -511,39 +461,6 @@
end;
-(* iprover syntax *)
-
-local
-
-val introN = "intro";
-val elimN = "elim";
-val destN = "dest";
-val ruleN = "rule";
-
-fun modifier name kind kind' att =
- Args.$$$ name |-- (kind >> K NONE || kind' |-- P.nat --| Args.colon >> SOME)
- >> (pair (I: Proof.context -> Proof.context) o att);
-
-val iprover_modifiers =
- [modifier destN Args.bang_colon Args.bang ContextRules.dest_bang,
- modifier destN Args.colon (Scan.succeed ()) ContextRules.dest,
- modifier elimN Args.bang_colon Args.bang ContextRules.elim_bang,
- modifier elimN Args.colon (Scan.succeed ()) ContextRules.elim,
- modifier introN Args.bang_colon Args.bang ContextRules.intro_bang,
- modifier introN Args.colon (Scan.succeed ()) ContextRules.intro,
- Args.del -- Args.colon >> K (I, ContextRules.rule_del)];
-
-in
-
-val iprover_meth =
- bang_sectioned_args' iprover_modifiers (Scan.lift (Scan.option P.nat))
- (fn n => fn prems => fn ctxt => METHOD (fn facts =>
- HEADGOAL (insert_tac (prems @ facts) THEN'
- ObjectLogic.atomize_prems_tac THEN' iprover_tac ctxt n)));
-
-end;
-
-
(* tactic syntax *)
fun nat_thms_args f = uncurry f oo
@@ -599,7 +516,6 @@
("fold", thms_ctxt_args fold_meth, "fold definitions"),
("atomize", (atomize o fst) oo syntax (Args.mode "full"),
"present local premises as object-level statements"),
- ("iprover", iprover_meth, "intuitionistic proof search"),
("rule", thms_ctxt_args some_rule, "apply some intro/elim rule"),
("erule", nat_thms_args erule, "apply rule in elimination manner (improper)"),
("drule", nat_thms_args drule, "apply rule in destruct manner (improper)"),
--- a/src/Pure/Isar/obtain.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/obtain.ML Wed Mar 04 10:45:52 2009 +0100
@@ -40,11 +40,9 @@
sig
val thatN: string
val obtain: string -> (binding * string option * mixfix) list ->
- (Attrib.binding * (string * string list) list) list ->
- bool -> Proof.state -> Proof.state
+ (Attrib.binding * (string * string list) list) list -> bool -> Proof.state -> Proof.state
val obtain_i: string -> (binding * typ option * mixfix) list ->
- ((binding * attribute list) * (term * term list) list) list ->
- bool -> Proof.state -> Proof.state
+ (Thm.binding * (term * term list) list) list -> bool -> Proof.state -> Proof.state
val result: (Proof.context -> tactic) -> thm list -> Proof.context ->
(cterm list * thm list) * Proof.context
val guess: (binding * string option * mixfix) list -> bool -> Proof.state -> Proof.state
@@ -121,7 +119,7 @@
(*obtain vars*)
val (vars, vars_ctxt) = prep_vars raw_vars ctxt;
val (_, fix_ctxt) = vars_ctxt |> ProofContext.add_fixes_i vars;
- val xs = map (Binding.base_name o #1) vars;
+ val xs = map (Binding.name_of o #1) vars;
(*obtain asms*)
val (asms_ctxt, proppss) = prep_propp (fix_ctxt, map snd raw_asms);
@@ -155,14 +153,14 @@
in
state
|> Proof.enter_forward
- |> Proof.have_i NONE (K I) [((Binding.empty, []), [(obtain_prop, [])])] int
+ |> Proof.have_i NONE (K I) [(Thm.empty_binding, [(obtain_prop, [])])] int
|> Proof.proof (SOME Method.succeed_text) |> Seq.hd
|> Proof.fix_i [(Binding.name thesisN, NONE, NoSyn)]
|> Proof.assume_i
[((Binding.name that_name, [ContextRules.intro_query NONE]), [(that_prop, [])])]
|> `Proof.the_facts
||> Proof.chain_facts chain_facts
- ||> Proof.show_i NONE after_qed [((Binding.empty, []), [(thesis, [])])] false
+ ||> Proof.show_i NONE after_qed [(Thm.empty_binding, [(thesis, [])])] false
|-> Proof.refine_insert
end;
@@ -260,7 +258,7 @@
fun inferred_type (binding, _, mx) ctxt =
let
- val x = Binding.base_name binding;
+ val x = Binding.name_of binding;
val (T, ctxt') = ProofContext.inferred_param x ctxt
in ((x, T, mx), ctxt') end;
@@ -295,7 +293,7 @@
|> Proof.map_context (K ctxt')
|> Proof.fix_i (map (fn ((x, T), mx) => (Binding.name x, SOME T, mx)) parms)
|> `Proof.context_of |-> (fn fix_ctxt => Proof.assm_i
- (obtain_export fix_ctxt rule (map cert ts)) [((Binding.empty, []), asms)])
+ (obtain_export fix_ctxt rule (map cert ts)) [(Thm.empty_binding, asms)])
|> Proof.add_binds_i AutoBind.no_facts
end;
@@ -313,7 +311,7 @@
|> Proof.fix_i [(Binding.name AutoBind.thesisN, NONE, NoSyn)]
|> Proof.chain_facts chain_facts
|> Proof.local_goal print_result (K I) (apsnd (rpair I))
- "guess" before_qed after_qed [((Binding.empty, []), [Logic.mk_term goal, goal])]
+ "guess" before_qed after_qed [(Thm.empty_binding, [Logic.mk_term goal, goal])]
|> Proof.refine (Method.primitive_text (K (Goal.init (cert thesis)))) |> Seq.hd
end;
--- a/src/Pure/Isar/outer_parse.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/outer_parse.ML Wed Mar 04 10:45:52 2009 +0100
@@ -228,7 +228,7 @@
(* names and text *)
val name = group "name declaration" (short_ident || sym_ident || string || number);
-val binding = position name >> Binding.name_pos;
+val binding = position name >> Binding.make;
val xname = group "name reference" (short_ident || long_ident || sym_ident || string || number);
val text = group "text" (short_ident || long_ident || sym_ident || string || number || verbatim);
val path = group "file name/path specification" name >> Path.explode;
--- a/src/Pure/Isar/proof.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/proof.ML Wed Mar 04 10:45:52 2009 +0100
@@ -48,23 +48,18 @@
val assm: Assumption.export ->
(Attrib.binding * (string * string list) list) list -> state -> state
val assm_i: Assumption.export ->
- ((binding * attribute list) * (term * term list) list) list -> state -> state
+ (Thm.binding * (term * term list) list) list -> state -> state
val assume: (Attrib.binding * (string * string list) list) list -> state -> state
- val assume_i: ((binding * attribute list) * (term * term list) list) list ->
- state -> state
+ val assume_i: (Thm.binding * (term * term list) list) list -> state -> state
val presume: (Attrib.binding * (string * string list) list) list -> state -> state
- val presume_i: ((binding * attribute list) * (term * term list) list) list ->
- state -> state
- val def: (Attrib.binding * ((binding * mixfix) * (string * string list))) list ->
- state -> state
- val def_i: ((binding * attribute list) *
- ((binding * mixfix) * (term * term list))) list -> state -> state
+ val presume_i: (Thm.binding * (term * term list) list) list -> state -> state
+ val def: (Attrib.binding * ((binding * mixfix) * (string * string list))) list -> state -> state
+ val def_i: (Thm.binding * ((binding * mixfix) * (term * term list))) list -> state -> state
val chain: state -> state
val chain_facts: thm list -> state -> state
val get_thmss: state -> (Facts.ref * Attrib.src list) list -> thm list
val note_thmss: (Attrib.binding * (Facts.ref * Attrib.src list) list) list -> state -> state
- val note_thmss_i: ((binding * attribute list) *
- (thm list * attribute list) list) list -> state -> state
+ val note_thmss_i: (Thm.binding * (thm list * attribute list) list) list -> state -> state
val from_thmss: ((Facts.ref * Attrib.src list) list) list -> state -> state
val from_thmss_i: ((thm list * attribute list) list) list -> state -> state
val with_thmss: ((Facts.ref * Attrib.src list) list) list -> state -> state
@@ -107,11 +102,11 @@
val have: Method.text option -> (thm list list -> state -> state) ->
(Attrib.binding * (string * string list) list) list -> bool -> state -> state
val have_i: Method.text option -> (thm list list -> state -> state) ->
- ((binding * attribute list) * (term * term list) list) list -> bool -> state -> state
+ (Thm.binding * (term * term list) list) list -> bool -> state -> state
val show: Method.text option -> (thm list list -> state -> state) ->
(Attrib.binding * (string * string list) list) list -> bool -> state -> state
val show_i: Method.text option -> (thm list list -> state -> state) ->
- ((binding * attribute list) * (term * term list) list) list -> bool -> state -> state
+ (Thm.binding * (term * term list) list) list -> bool -> state -> state
val schematic_goal: state -> bool
val is_relevant: state -> bool
val local_future_proof: (state -> ('a * state) Future.future) ->
--- a/src/Pure/Isar/proof_context.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/proof_context.ML Wed Mar 04 10:45:52 2009 +0100
@@ -103,12 +103,10 @@
val sticky_prefix: string -> Proof.context -> Proof.context
val restore_naming: Proof.context -> Proof.context -> Proof.context
val reset_naming: Proof.context -> Proof.context
- val note_thmss: string ->
- ((binding * attribute list) * (Facts.ref * attribute list) list) list ->
- Proof.context -> (string * thm list) list * Proof.context
- val note_thmss_i: string ->
- ((binding * attribute list) * (thm list * attribute list) list) list ->
- Proof.context -> (string * thm list) list * Proof.context
+ val note_thmss: string -> (Thm.binding * (Facts.ref * attribute list) list) list ->
+ Proof.context -> (string * thm list) list * Proof.context
+ val note_thmss_i: string -> (Thm.binding * (thm list * attribute list) list) list ->
+ Proof.context -> (string * thm list) list * Proof.context
val put_thms: bool -> string * thm list option -> Proof.context -> Proof.context
val read_vars: (binding * string option * mixfix) list -> Proof.context ->
(binding * typ option * mixfix) list * Proof.context
@@ -121,10 +119,10 @@
val auto_fixes: Proof.context * (term list list * 'a) -> Proof.context * (term list list * 'a)
val bind_fixes: string list -> Proof.context -> (term -> term) * Proof.context
val add_assms: Assumption.export ->
- ((binding * attribute list) * (string * string list) list) list ->
+ (Thm.binding * (string * string list) list) list ->
Proof.context -> (string * thm list) list * Proof.context
val add_assms_i: Assumption.export ->
- ((binding * attribute list) * (term * term list) list) list ->
+ (Thm.binding * (term * term list) list) list ->
Proof.context -> (string * thm list) list * Proof.context
val add_cases: bool -> (string * RuleCases.T option) list -> Proof.context -> Proof.context
val apply_case: RuleCases.T -> Proof.context -> (string * term list) list * Proof.context
@@ -975,7 +973,7 @@
val facts = PureThy.name_thmss false pos name (map (apfst (get ctxt)) raw_facts);
fun app (th, attrs) x =
- swap (foldl_map (Thm.proof_attributes (surround (Thm.kind k) (attrs @ more_attrs))) (x, th));
+ swap (Library.foldl_map (Thm.proof_attributes (surround (Thm.kind k) (attrs @ more_attrs))) (x, th));
val (res, ctxt') = fold_map app facts ctxt;
val thms = PureThy.name_thms false false pos name (flat res);
val Mode {stmt, ...} = get_mode ctxt;
@@ -1010,7 +1008,7 @@
fun prep_vars prep_typ internal =
fold_map (fn (raw_b, raw_T, raw_mx) => fn ctxt =>
let
- val raw_x = Binding.base_name raw_b;
+ val raw_x = Binding.name_of raw_b;
val (x, mx) = Syntax.const_mixfix raw_x raw_mx;
val _ = Syntax.is_identifier (no_skolem internal x) orelse
error ("Illegal variable name: " ^ quote x);
@@ -1019,7 +1017,7 @@
if internal then T
else Type.no_tvars T handle TYPE (msg, _, _) => error msg;
val opt_T = Option.map (cond_tvars o cert_typ ctxt o prep_typ ctxt) raw_T;
- val var = (Binding.map_base (K x) raw_b, opt_T, mx);
+ val var = (Binding.map_name (K x) raw_b, opt_T, mx);
in (var, ctxt |> declare_var (x, opt_T, mx) |> #2) end);
in
@@ -1093,7 +1091,7 @@
fun add_abbrev mode tags (b, raw_t) ctxt =
let
val t0 = cert_term (ctxt |> set_mode mode_abbrev) raw_t
- handle ERROR msg => cat_error msg ("in constant abbreviation " ^ quote (Binding.display b));
+ handle ERROR msg => cat_error msg ("in constant abbreviation " ^ quote (Binding.str_of b));
val [t] = Variable.exportT_terms (Variable.declare_term t0 ctxt) ctxt [t0];
val ((lhs, rhs), consts') = consts_of ctxt
|> Consts.abbreviate (Syntax.pp ctxt) (tsig_of ctxt) (naming_of ctxt) mode tags (b, t);
@@ -1120,7 +1118,7 @@
fun gen_fixes prep raw_vars ctxt =
let
val (vars, _) = prep raw_vars ctxt;
- val (xs', ctxt') = Variable.add_fixes (map (Binding.base_name o #1) vars) ctxt;
+ val (xs', ctxt') = Variable.add_fixes (map (Binding.name_of o #1) vars) ctxt;
val ctxt'' =
ctxt'
|> fold_map declare_var (map2 (fn x' => fn (_, T, mx) => (x', T, mx)) xs' vars)
--- a/src/Pure/Isar/specification.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/specification.ML Wed Mar 04 10:45:52 2009 +0100
@@ -140,7 +140,7 @@
fun gen_axioms do_print prep raw_vars raw_specs thy =
let
val ((vars, specs), _) = prep raw_vars [raw_specs] (ProofContext.init thy);
- val xs = map (fn ((b, T), _) => (Binding.base_name b, T)) vars;
+ val xs = map (fn ((b, T), _) => (Binding.name_of b, T)) vars;
(*consts*)
val (consts, consts_thy) = thy |> fold_map (Theory.specify_const []) vars;
@@ -148,8 +148,8 @@
(*axioms*)
val (axioms, axioms_thy) = consts_thy |> fold_map (fn ((b, atts), props) =>
- fold_map Thm.add_axiom
- ((map o apfst) Binding.name (PureThy.name_multi (Binding.base_name b) (map subst props)))
+ fold_map Thm.add_axiom (* FIXME proper use of binding!? *)
+ ((map o apfst) Binding.name (PureThy.name_multi (Binding.name_of b) (map subst props)))
#>> (fn ths => ((b, atts), [(map Drule.standard' ths, [])]))) specs;
val (facts, thy') = axioms_thy |> PureThy.note_thmss Thm.axiomK
(Attrib.map_facts (Attrib.attribute_i axioms_thy) axioms);
@@ -169,19 +169,19 @@
val (vars, [((raw_name, atts), [prop])]) =
fst (prep (the_list raw_var) [(raw_a, [raw_prop])] lthy);
val (((x, T), rhs), prove) = LocalDefs.derived_def lthy true prop;
- val name = Binding.map_base (Thm.def_name_optional x) raw_name;
+ val name = Binding.map_name (Thm.def_name_optional x) raw_name;
val var =
(case vars of
[] => (Binding.name x, NoSyn)
| [((b, _), mx)] =>
let
- val y = Binding.base_name b;
+ val y = Binding.name_of b;
val _ = x = y orelse
error ("Head of definition " ^ quote x ^ " differs from declaration " ^ quote y ^
Position.str_of (Binding.pos_of b));
in (b, mx) end);
val ((lhs, (_, th)), lthy2) = lthy |> LocalTheory.define Thm.definitionK
- (var, ((Binding.map_base (suffix "_raw") name, []), rhs));
+ (var, ((Binding.map_name (suffix "_raw") name, []), rhs));
val ((def_name, [th']), lthy3) = lthy2 |> LocalTheory.note Thm.definitionK
((name, Code.add_default_eqn_attrib :: atts), [prove lthy2 th]);
@@ -208,7 +208,7 @@
[] => (Binding.name x, NoSyn)
| [((b, _), mx)] =>
let
- val y = Binding.base_name b;
+ val y = Binding.name_of b;
val _ = x = y orelse
error ("Head of abbreviation " ^ quote x ^ " differs from declaration " ^ quote y ^
Position.str_of (Binding.pos_of b));
@@ -269,11 +269,10 @@
| Element.Obtains obtains =>
let
val case_names = obtains |> map_index (fn (i, (b, _)) =>
- let val name = Binding.base_name b
- in if name = "" then string_of_int (i + 1) else name end);
+ if Binding.is_empty b then string_of_int (i + 1) else Binding.name_of b);
val constraints = obtains |> map (fn (_, (vars, _)) =>
Element.Constrains
- (vars |> map_filter (fn (x, SOME T) => SOME (Binding.base_name x, T) | _ => NONE)));
+ (vars |> map_filter (fn (x, SOME T) => SOME (Binding.name_of x, T) | _ => NONE)));
val raw_propp = obtains |> map (fn (_, (_, props)) => map (rpair []) props);
val (propp, elems_ctxt) = prep_stmt (elems @ constraints) raw_propp ctxt;
@@ -283,7 +282,7 @@
fun assume_case ((name, (vars, _)), asms) ctxt' =
let
val bs = map fst vars;
- val xs = map Binding.base_name bs;
+ val xs = map Binding.name_of bs;
val props = map fst asms;
val (Ts, _) = ctxt'
|> fold Variable.declare_term props
--- a/src/Pure/Isar/theory_target.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Isar/theory_target.ML Wed Mar 04 10:45:52 2009 +0100
@@ -13,7 +13,7 @@
val begin: string -> Proof.context -> local_theory
val context: xstring -> theory -> local_theory
val instantiation: string list * (string * sort) list * sort -> theory -> local_theory
- val instantiation_cmd: xstring list * sort * xstring -> theory -> local_theory
+ val instantiation_cmd: xstring list * xstring list * xstring -> theory -> local_theory
val overloading: (string * (string * typ) * bool) list -> theory -> local_theory
val overloading_cmd: (string * string * bool) list -> theory -> local_theory
end;
@@ -188,8 +188,8 @@
val arg = (b', Term.close_schematic_term rhs');
val similar_body = Type.similar_types (rhs, rhs');
(* FIXME workaround based on educated guess *)
- val (prefix', _) = Binding.dest b';
- val class_global = Binding.base_name b = Binding.base_name b'
+ val (prefix', _, _) = Binding.dest b';
+ val class_global = Binding.name_of b = Binding.name_of b'
andalso not (null prefix')
andalso (fst o snd o split_last) prefix' = Class_Target.class_prefix target;
in
@@ -206,14 +206,15 @@
Morphism.form (ProofContext.target_notation true prmode [(lhs', mx)]))))
end;
+fun syntax_error c = error ("Illegal mixfix syntax for overloaded constant " ^ quote c);
+
fun declare_const (ta as Target {target, is_locale, is_class, ...}) depends ((b, T), mx) lthy =
let
- val c = Binding.base_name b;
+ val c = Binding.name_of b;
val tags = LocalTheory.group_position_of lthy;
val xs = filter depends (#1 (ProofContext.inferred_fixes (LocalTheory.target_of lthy)));
val U = map #2 xs ---> T;
val (mx1, mx2, mx3) = fork_mixfix ta mx;
- fun syntax_error c = error ("Illegal mixfix syntax for overloaded constant " ^ quote c);
val declare_const =
(case Class_Target.instantiation_param lthy c of
SOME c' =>
@@ -241,7 +242,7 @@
fun abbrev (ta as Target {target, is_locale, is_class, ...}) prmode ((b, mx), t) lthy =
let
- val c = Binding.base_name b;
+ val c = Binding.name_of b;
val tags = LocalTheory.group_position_of lthy;
val thy_ctxt = ProofContext.init (ProofContext.theory_of lthy);
val target_ctxt = LocalTheory.target_of lthy;
@@ -278,8 +279,8 @@
val thy = ProofContext.theory_of lthy;
val thy_ctxt = ProofContext.init thy;
- val c = Binding.base_name b;
- val name' = Binding.map_base (Thm.def_name_optional c) name;
+ val c = Binding.name_of b;
+ val name' = Binding.map_name (Thm.def_name_optional c) name;
val (rhs', rhs_conv) =
LocalDefs.export_cterm lthy thy_ctxt (Thm.cterm_of thy rhs) |>> Thm.term_of;
val xs = Variable.add_fixed (LocalTheory.target_of lthy) rhs' [];
@@ -299,7 +300,7 @@
then (fn name => fn eq => Thm.add_def false false (Binding.name name, Logic.mk_equals eq))
else (fn name => fn (Const (c, _), rhs) => AxClass.define_overloaded name (c, rhs)));
val (global_def, lthy3) = lthy2
- |> LocalTheory.theory_result (define_const (Binding.base_name name') (lhs', rhs'));
+ |> LocalTheory.theory_result (define_const (Binding.name_of name') (lhs', rhs'));
val def = LocalDefs.trans_terms lthy3
[(*c == global.c xs*) local_def,
(*global.c xs == rhs'*) global_def,
--- a/src/Pure/ML-Systems/mosml.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML-Systems/mosml.ML Wed Mar 04 10:45:52 2009 +0100
@@ -141,19 +141,19 @@
load "Timer";
fun start_timing () =
- let val CPUtimer = Timer.startCPUTimer();
- val time = Timer.checkCPUTimer(CPUtimer)
- in (CPUtimer,time) end;
+ let
+ val timer = Timer.startCPUTimer ();
+ val time = Timer.checkCPUTimer timer;
+ in (timer, time) end;
-fun end_timing (CPUtimer, {gc,sys,usr}) =
- let open Time (*...for Time.toString, Time.+ and Time.- *)
- val {gc=gc2,sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
- in "User " ^ toString (usr2-usr) ^
- " GC " ^ toString (gc2-gc) ^
- " All "^ toString (sys2-sys + usr2-usr + gc2-gc) ^
- " secs"
- handle Time => ""
- end;
+fun end_timing (timer, {gc, sys, usr}) =
+ let
+ open Time; (*...for Time.toString, Time.+ and Time.- *)
+ val {gc = gc2, sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+ val user = usr2 - usr + gc2 - gc;
+ val all = user + sys2 - sys;
+ val message = "User " ^ toString user ^ " All "^ toString all ^ " secs" handle Time => "";
+ in {message = message, user = user, all = all} end;
fun check_timer timer =
let val {sys, usr, gc} = Timer.checkCPUTimer timer
--- a/src/Pure/ML-Systems/polyml-experimental.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML-Systems/polyml-experimental.ML Wed Mar 04 10:45:52 2009 +0100
@@ -49,16 +49,17 @@
| c :: cs =>
(in_buffer := cs; if c = #"\n" then current_line := ! current_line + 1 else (); SOME c));
fun put s = out_buffer := s :: ! out_buffer;
- fun put_message (prt, is_err, {file, line, offset}) =
- (put (if is_err then "Error: " else "Warning: ");
- PolyML.prettyPrint (put, 76) prt;
+ fun put_message {message, hard, location = {startLine = line, ...}, context} =
+ (put (if hard then "Error: " else "Warning: ");
+ PolyML.prettyPrint (put, 76) message;
put (str_of_pos line name ^ "\n"));
val parameters =
[PolyML.Compiler.CPOutStream put,
PolyML.Compiler.CPLineNo (fn () => ! current_line),
PolyML.Compiler.CPErrorMessageProc put_message,
- PolyML.Compiler.CPNameSpace name_space];
+ PolyML.Compiler.CPNameSpace name_space,
+ PolyML.Compiler.CPPrintInAlphabeticalOrder false];
val _ =
(while not (List.null (! in_buffer)) do
PolyML.compiler (get, parameters) ())
--- a/src/Pure/ML-Systems/polyml_common.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML-Systems/polyml_common.ML Wed Mar 04 10:45:52 2009 +0100
@@ -47,18 +47,19 @@
(* compiler-independent timing functions *)
fun start_timing () =
- let val CPUtimer = Timer.startCPUTimer();
- val time = Timer.checkCPUTimer(CPUtimer)
- in (CPUtimer,time) end;
+ let
+ val timer = Timer.startCPUTimer ();
+ val time = Timer.checkCPUTimer timer;
+ in (timer, time) end;
-fun end_timing (CPUtimer, {sys,usr}) =
- let open Time (*...for Time.toString, Time.+ and Time.- *)
- val {sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
- in "User " ^ toString (usr2-usr) ^
- " All "^ toString (sys2-sys + usr2-usr) ^
- " secs"
- handle Time => ""
- end;
+fun end_timing (timer, {sys, usr}) =
+ let
+ open Time; (*...for Time.toString, Time.+ and Time.- *)
+ val {sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+ val user = usr2 - usr;
+ val all = user + sys2 - sys;
+ val message = "User " ^ toString user ^ " All "^ toString all ^ " secs" handle Time => "";
+ in {message = message, user = user, all = all} end;
fun check_timer timer =
let
--- a/src/Pure/ML-Systems/smlnj.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML-Systems/smlnj.ML Wed Mar 04 10:45:52 2009 +0100
@@ -59,18 +59,19 @@
(* compiler-independent timing functions *)
fun start_timing () =
- let val CPUtimer = Timer.startCPUTimer();
- val time = Timer.checkCPUTimer(CPUtimer)
- in (CPUtimer,time) end;
+ let
+ val timer = Timer.startCPUTimer ();
+ val time = Timer.checkCPUTimer timer;
+ in (timer, time) end;
-fun end_timing (CPUtimer, {sys,usr}) =
- let open Time (*...for Time.toString, Time.+ and Time.- *)
- val {sys=sys2,usr=usr2} = Timer.checkCPUTimer(CPUtimer)
- in "User " ^ toString (usr2-usr) ^
- " All "^ toString (sys2-sys + usr2-usr) ^
- " secs"
- handle Time => ""
- end;
+fun end_timing (timer, {sys, usr}) =
+ let
+ open Time; (*...for Time.toString, Time.+ and Time.- *)
+ val {sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
+ val user = usr2 - usr;
+ val all = user + sys2 - sys;
+ val message = "User " ^ toString user ^ " All "^ toString all ^ " secs" handle Time => "";
+ in {message = message, user = user, all = all} end;
fun check_timer timer =
let
--- a/src/Pure/ML/ml_antiquote.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML/ml_antiquote.ML Wed Mar 04 10:45:52 2009 +0100
@@ -59,12 +59,13 @@
-(** concrete antiquotations **)
+(** misc antiquotations **)
structure P = OuterParse;
-
-(* misc *)
+val _ = inline "binding" (Scan.lift (P.position Args.name) >> (fn b =>
+ ML_Syntax.atomic ("Binding.make " ^
+ ML_Syntax.print_pair ML_Syntax.print_string ML_Syntax.print_position b)));
val _ = value "theory"
(Scan.lift Args.name >> (fn name => "ThyInfo.get_theory " ^ ML_Syntax.print_string name)
--- a/src/Pure/ML/ml_syntax.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ML/ml_syntax.ML Wed Mar 04 10:45:52 2009 +0100
@@ -18,6 +18,8 @@
val print_char: string -> string
val print_string: string -> string
val print_strings: string list -> string
+ val print_properties: Properties.T -> string
+ val print_position: Position.T -> string
val print_indexname: indexname -> string
val print_class: class -> string
val print_sort: sort -> string
@@ -68,6 +70,9 @@
val print_string = quote o translate_string print_char;
val print_strings = print_list print_string;
+val print_properties = print_list (print_pair print_string print_string);
+fun print_position pos = "Position.of_properties " ^ print_properties (Position.properties_of pos);
+
val print_indexname = print_pair print_string print_int;
val print_class = print_string;
--- a/src/Pure/Proof/proofchecker.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Proof/proofchecker.ML Wed Mar 04 10:45:52 2009 +0100
@@ -56,7 +56,7 @@
| thm_of _ _ (PAxm (name, _, SOME Ts)) =
thm_of_atom (Thm.axiom thy name) Ts
- | thm_of _ Hs (PBound i) = List.nth (Hs, i)
+ | thm_of _ Hs (PBound i) = nth Hs i
| thm_of (vs, names) Hs (Abst (s, SOME T, prf)) =
let
--- a/src/Pure/Proof/reconstruct.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Proof/reconstruct.ML Wed Mar 04 10:45:52 2009 +0100
@@ -98,7 +98,7 @@
let val (env3, V) = mk_tvar (env2, [])
in (t' $ u', V, vTs2, unifyT thy env3 T (U --> V)) end)
end
- | infer_type thy env Ts vTs (t as Bound i) = ((t, List.nth (Ts, i), vTs, env)
+ | infer_type thy env Ts vTs (t as Bound i) = ((t, nth Ts i, vTs, env)
handle Subscript => error ("infer_type: bad variable index " ^ string_of_int i));
fun cantunify thy (t, u) = error ("Non-unifiable terms:\n" ^
@@ -106,7 +106,7 @@
fun decompose thy Ts (env, p as (t, u)) =
let fun rigrig (a, T) (b, U) uT ts us = if a <> b then cantunify thy p
- else apsnd flat (foldl_map (decompose thy Ts) (uT env T U, ts ~~ us))
+ else apsnd flat (Library.foldl_map (decompose thy Ts) (uT env T U, ts ~~ us))
in case pairself (strip_comb o Envir.head_norm env) p of
((Const c, ts), (Const d, us)) => rigrig c d (unifyT thy) ts us
| ((Free c, ts), (Free d, us)) => rigrig c d (unifyT thy) ts us
@@ -141,7 +141,7 @@
val tvars = OldTerm.term_tvars prop;
val tfrees = OldTerm.term_tfrees prop;
val (env', Ts) = (case opTs of
- NONE => foldl_map mk_tvar (env, map snd tvars @ map snd tfrees)
+ NONE => Library.foldl_map mk_tvar (env, map snd tvars @ map snd tfrees)
| SOME Ts => (env, Ts));
val prop' = subst_atomic_types (map TVar tvars @ map TFree tfrees ~~ Ts)
(forall_intr_vfs prop) handle Library.UnequalLengths =>
@@ -152,7 +152,7 @@
fun head_norm (prop, prf, cnstrts, env, vTs) =
(Envir.head_norm env prop, prf, cnstrts, env, vTs);
- fun mk_cnstrts env _ Hs vTs (PBound i) = ((List.nth (Hs, i), PBound i, [], env, vTs)
+ fun mk_cnstrts env _ Hs vTs (PBound i) = ((nth Hs i, PBound i, [], env, vTs)
handle Subscript => error ("mk_cnstrts: bad variable index " ^ string_of_int i))
| mk_cnstrts env Ts Hs vTs (Abst (s, opT, cprf)) =
let
@@ -304,7 +304,7 @@
val head_norm = Envir.head_norm (Envir.empty 0);
-fun prop_of0 Hs (PBound i) = List.nth (Hs, i)
+fun prop_of0 Hs (PBound i) = nth Hs i
| prop_of0 Hs (Abst (s, SOME T, prf)) =
Term.all T $ (Abs (s, T, prop_of0 Hs prf))
| prop_of0 Hs (AbsP (s, SOME t, prf)) =
--- a/src/Pure/ProofGeneral/README Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ProofGeneral/README Wed Mar 04 10:45:52 2009 +0100
@@ -34,4 +34,4 @@
http://proofgeneral.inf.ed.ac.uk/wiki/Main/PGIP
David Aspinall, Dec. 2006.
-$Id$
+
--- a/src/Pure/README Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/README Wed Mar 04 10:45:52 2009 +0100
@@ -19,5 +19,3 @@
See ROOT.ML for further information.
-
-$Id$
--- a/src/Pure/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -81,12 +81,18 @@
use "goal.ML";
use "axclass.ML";
-(*the main Isar system*)
+(*main Isar stuff*)
cd "Isar"; use "ROOT.ML"; cd "..";
use "subgoal.ML";
use "Proof/extraction.ML";
+(*Isabelle/Isar system*)
+use "System/session.ML";
+use "System/isar.ML";
+use "System/isabelle_process.ML";
+
+(*additional tools*)
cd "Tools"; use "ROOT.ML"; cd "..";
use "codegen.ML";
--- a/src/Pure/Syntax/parser.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Syntax/parser.ML Wed Mar 04 10:45:52 2009 +0100
@@ -73,10 +73,10 @@
val chain_from = case (pri, rhs) of (~1, [Nonterminal (id, ~1)]) => SOME id | _ => NONE;
(*store chain if it does not already exist*)
- val (new_chain, chains') = case chain_from of NONE => (NONE, chains) | SOME from_ =>
- let val old_tos = these (AList.lookup (op =) chains from_) in
+ val (new_chain, chains') = case chain_from of NONE => (NONE, chains) | SOME from =>
+ let val old_tos = these (AList.lookup (op =) chains from) in
if member (op =) old_tos lhs then (NONE, chains)
- else (SOME from_, AList.update (op =) (from_, insert (op =) lhs old_tos) chains)
+ else (SOME from, AList.update (op =) (from, insert (op =) lhs old_tos) chains)
end;
(*propagate new chain in lookahead and lambda lists;
@@ -410,7 +410,7 @@
fun pretty_nt (name, tag) =
let
- fun prod_of_chain from_ = ([Nonterminal (from_, ~1)], "", ~1);
+ fun prod_of_chain from = ([Nonterminal (from, ~1)], "", ~1);
val nt_prods =
Library.foldl (gen_union op =) ([], map snd (snd (Array.sub (prods, tag)))) @
@@ -552,8 +552,8 @@
val to_tag = convert_tag to;
fun make [] result = result
- | make (from_ :: froms) result = make froms ((to_tag,
- ([Nonterminal (convert_tag from_, ~1)], "", ~1)) :: result);
+ | make (from :: froms) result = make froms ((to_tag,
+ ([Nonterminal (convert_tag from, ~1)], "", ~1)) :: result);
in mk_chain_prods cs (make froms [] @ result) end;
val chain_prods = mk_chain_prods chains2 [];
--- a/src/Pure/Syntax/syn_ext.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Syntax/syn_ext.ML Wed Mar 04 10:45:52 2009 +0100
@@ -26,7 +26,7 @@
val logic: string
val args: string
val cargs: string
- val any_: string
+ val any: string
val sprop: string
val typ_to_nonterm: typ -> string
datatype xsymb =
@@ -108,8 +108,8 @@
val sprop = "#prop";
val spropT = Type (sprop, []);
-val any_ = "any";
-val anyT = Type (any_, []);
+val any = "any";
+val anyT = Type (any, []);
@@ -181,7 +181,7 @@
| typ_to_nt default _ = default;
(*get nonterminal for rhs*)
-val typ_to_nonterm = typ_to_nt any_;
+val typ_to_nonterm = typ_to_nt any;
(*get nonterminal for lhs*)
val typ_to_nonterm1 = typ_to_nt logic;
--- a/src/Pure/Syntax/syn_trans.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Syntax/syn_trans.ML Wed Mar 04 10:45:52 2009 +0100
@@ -222,7 +222,7 @@
(* implicit structures *)
fun the_struct structs i =
- if 1 <= i andalso i <= length structs then List.nth (structs, i - 1)
+ if 1 <= i andalso i <= length structs then nth structs (i - 1)
else raise error ("Illegal reference to implicit structure #" ^ string_of_int i);
fun struct_tr structs (*"_struct"*) [Const ("_indexdefault", _)] =
--- a/src/Pure/Syntax/syntax.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Syntax/syntax.ML Wed Mar 04 10:45:52 2009 +0100
@@ -390,7 +390,7 @@
val basic_nonterms =
(Lexicon.terminals @ [SynExt.logic, "type", "types", "sort", "classes",
SynExt.args, SynExt.cargs, "pttrn", "pttrns", "idt", "idts", "aprop",
- "asms", SynExt.any_, SynExt.sprop, "num_const", "float_const",
+ "asms", SynExt.any, SynExt.sprop, "num_const", "float_const",
"index", "struct"]);
--- a/src/Pure/Thy/thy_output.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Thy/thy_output.ML Wed Mar 04 10:45:52 2009 +0100
@@ -519,9 +519,9 @@
fun ml_type txt = "val _ = NONE : (" ^ txt ^ ") option;";
fun ml_struct txt = "functor DUMMY_FUNCTOR() = struct structure DUMMY = " ^ txt ^ " end;"
-fun output_ml ml src ctxt (txt, pos) =
+fun output_ml ml _ ctxt (txt, pos) =
(ML_Context.eval_in (SOME ctxt) false pos (ml txt);
- (if ! source then str_of_source src else SymbolPos.content (SymbolPos.explode (txt, pos)))
+ SymbolPos.content (SymbolPos.explode (txt, pos))
|> (if ! quotes then quote else I)
|> (if ! display then enclose "\\begin{verbatim}\n" "\n\\end{verbatim}"
else
--- a/src/Pure/Tools/ROOT.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/Tools/ROOT.ML Wed Mar 04 10:45:52 2009 +0100
@@ -4,11 +4,13 @@
*)
use "named_thms.ML";
-use "isabelle_process.ML";
(*basic XML support*)
use "xml_syntax.ML";
+use "find_theorems.ML";
+use "find_consts.ML";
+
(*quickcheck/autosolve needed here because of pg preferences*)
use "../../Tools/quickcheck.ML";
use "../../Tools/auto_solve.ML";
--- a/src/Pure/axclass.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/axclass.ML Wed Mar 04 10:45:52 2009 +0100
@@ -8,7 +8,7 @@
signature AX_CLASS =
sig
val define_class: bstring * class list -> string list ->
- ((binding * attribute list) * term list) list -> theory -> class * theory
+ (Thm.binding * term list) list -> theory -> class * theory
val add_classrel: thm -> theory -> theory
val add_arity: thm -> theory -> theory
val prove_classrel: class * class -> tactic -> theory -> theory
--- a/src/Pure/conv.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/conv.ML Wed Mar 04 10:45:52 2009 +0100
@@ -7,12 +7,17 @@
infix 1 then_conv;
infix 0 else_conv;
+signature BASIC_CONV =
+sig
+ val then_conv: conv * conv -> conv
+ val else_conv: conv * conv -> conv
+end;
+
signature CONV =
sig
+ include BASIC_CONV
val no_conv: conv
val all_conv: conv
- val then_conv: conv * conv -> conv
- val else_conv: conv * conv -> conv
val first_conv: conv list -> conv
val every_conv: conv list -> conv
val try_conv: conv -> conv
@@ -171,3 +176,6 @@
| NONE => raise THM ("gconv_rule", i, [th]));
end;
+
+structure BasicConv: BASIC_CONV = Conv;
+open BasicConv;
--- a/src/Pure/display.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/display.ML Wed Mar 04 10:45:52 2009 +0100
@@ -20,7 +20,6 @@
val pretty_thm_aux: Pretty.pp -> bool -> bool -> term list -> thm -> Pretty.T
val pretty_thm: thm -> Pretty.T
val string_of_thm: thm -> string
- val pretty_fact: Facts.ref * thm -> Pretty.T
val pretty_thms: thm list -> Pretty.T
val pretty_thm_sg: theory -> thm -> Pretty.T
val pretty_thms_sg: theory -> thm list -> Pretty.T
@@ -93,10 +92,6 @@
val string_of_thm = Pretty.string_of o pretty_thm;
-fun pretty_fact (thmref, thm) = Pretty.block
- [Pretty.str (Facts.string_of_ref thmref), Pretty.str ":", Pretty.brk 1,
- pretty_thm thm];
-
fun pretty_thms [th] = pretty_thm th
| pretty_thms ths = Pretty.block (Pretty.fbreaks (map pretty_thm ths));
--- a/src/Pure/envir.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/envir.ML Wed Mar 04 10:45:52 2009 +0100
@@ -265,7 +265,7 @@
| fast Ts (Const (_, T)) = T
| fast Ts (Free (_, T)) = T
| fast Ts (Bound i) =
- (List.nth (Ts, i)
+ (nth Ts i
handle Subscript => raise TERM ("fastype: Bound", [Bound i]))
| fast Ts (Var (_, T)) = T
| fast Ts (Abs (_, T, u)) = T --> fast (T :: Ts) u
--- a/src/Pure/library.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/library.ML Wed Mar 04 10:45:52 2009 +0100
@@ -76,7 +76,6 @@
val perhaps_loop: ('a -> 'a option) -> 'a -> 'a option
val foldl1: ('a * 'a -> 'a) -> 'a list -> 'a
val foldr1: ('a * 'a -> 'a) -> 'a list -> 'a
- val foldl_map: ('a * 'b -> 'a * 'c) -> 'a * 'b list -> 'a * 'c list
val flat: 'a list list -> 'a list
val unflat: 'a list list -> 'b list -> 'b list list
val burrow: ('a list -> 'b list) -> 'a list list -> 'b list list
@@ -238,6 +237,7 @@
include BASIC_LIBRARY
val foldl: ('a * 'b -> 'a) -> 'a * 'b list -> 'a
val foldr: ('a * 'b -> 'b) -> 'a list * 'b -> 'b
+ val foldl_map: ('a * 'b -> 'a * 'c) -> 'a * 'b list -> 'a * 'c list
val take: int * 'a list -> 'a list
val drop: int * 'a list -> 'a list
val last_elem: 'a list -> 'a
--- a/src/Pure/mk Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/mk Wed Mar 04 10:45:52 2009 +0100
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
#
-# $Id$
# Author: Markus Wenzel, TU Muenchen
#
# mk - build Pure Isabelle.
--- a/src/Pure/more_thm.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/more_thm.ML Wed Mar 04 10:45:52 2009 +0100
@@ -40,6 +40,8 @@
val close_derivation: thm -> thm
val add_axiom: binding * term -> theory -> thm * theory
val add_def: bool -> bool -> binding * term -> theory -> thm * theory
+ type binding = binding * attribute list
+ val empty_binding: binding
val rule_attribute: (Context.generic -> thm -> thm) -> attribute
val declaration_attribute: (thm -> Context.generic -> Context.generic) -> attribute
val theory_attributes: attribute list -> theory * thm -> theory * thm
@@ -301,6 +303,9 @@
(** attributes **)
+type binding = binding * attribute list;
+val empty_binding: binding = (Binding.empty, []);
+
fun rule_attribute f (x, th) = (x, f x th);
fun declaration_attribute f (x, th) = (f th x, th);
--- a/src/Pure/proofterm.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/proofterm.ML Wed Mar 04 10:45:52 2009 +0100
@@ -470,8 +470,8 @@
val n = length args;
fun subst' lev (Bound i) =
(if i<lev then raise SAME (*var is locally bound*)
- else incr_boundvars lev (List.nth (args, i-lev))
- handle Subscript => Bound (i-n) (*loose: change it*))
+ else incr_boundvars lev (nth args (i-lev))
+ handle Subscript => Bound (i-n)) (*loose: change it*)
| subst' lev (Abs (a, T, body)) = Abs (a, T, subst' (lev+1) body)
| subst' lev (f $ t) = (subst' lev f $ substh' lev t
handle SAME => f $ subst' lev t)
@@ -494,7 +494,7 @@
val n = length args;
fun subst (PBound i) Plev tlev =
(if i < Plev then raise SAME (*var is locally bound*)
- else incr_pboundvars Plev tlev (List.nth (args, i-Plev))
+ else incr_pboundvars Plev tlev (nth args (i-Plev))
handle Subscript => PBound (i-n) (*loose: change it*))
| subst (AbsP (a, t, body)) Plev tlev = AbsP (a, t, subst body (Plev+1) tlev)
| subst (Abst (a, T, body)) Plev tlev = Abst (a, T, subst body Plev (tlev+1))
@@ -935,7 +935,7 @@
in (is, ch orelse ch', ts',
if ch orelse ch' then prf' % t' else prf) end
| shrink' ls lev ts prfs (prf as PBound i) =
- (if exists (fn SOME (Bound j) => lev-j <= List.nth (ls, i) | _ => true) ts
+ (if exists (fn SOME (Bound j) => lev-j <= nth ls i | _ => true) ts
orelse has_duplicates (op =)
(Library.foldl (fn (js, SOME (Bound j)) => j :: js | (js, _) => js) ([], ts))
orelse exists #1 prfs then [i] else [], false, map (pair false) ts, prf)
--- a/src/Pure/pure_setup.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/pure_setup.ML Wed Mar 04 10:45:52 2009 +0100
@@ -33,7 +33,7 @@
map (fn (x, y) => Pretty.str (x ^ "=" ^ y)) o Position.properties_of));
install_pp (make_pp ["Thm", "thm"] ProofDisplay.pprint_thm);
install_pp (make_pp ["Thm", "cterm"] ProofDisplay.pprint_cterm);
-install_pp (make_pp ["Binding", "binding"] (Pretty.pprint o Pretty.str o Binding.display));
+install_pp (make_pp ["Binding", "binding"] (Pretty.pprint o Pretty.str o Binding.str_of));
install_pp (make_pp ["Thm", "ctyp"] ProofDisplay.pprint_ctyp);
install_pp (make_pp ["Context", "theory"] Context.pprint_thy);
install_pp (make_pp ["Context", "theory_ref"] Context.pprint_thy_ref);
--- a/src/Pure/pure_thy.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/pure_thy.ML Wed Mar 04 10:45:52 2009 +0100
@@ -31,10 +31,10 @@
val add_thm: (binding * thm) * attribute list -> theory -> thm * theory
val add_thmss: ((binding * thm list) * attribute list) list -> theory -> thm list list * theory
val add_thms_dynamic: binding * (Context.generic -> thm list) -> theory -> theory
- val note_thmss: string -> ((binding * attribute list) *
- (thm list * attribute list) list) list -> theory -> (string * thm list) list * theory
- val note_thmss_grouped: string -> string -> ((binding * attribute list) *
- (thm list * attribute list) list) list -> theory -> (string * thm list) list * theory
+ val note_thmss: string -> (Thm.binding *
+ (thm list * attribute list) list) list -> theory -> (string * thm list) list * theory
+ val note_thmss_grouped: string -> string -> (Thm.binding *
+ (thm list * attribute list) list) list -> theory -> (string * thm list) list * theory
val add_axioms: ((binding * term) * attribute list) list -> theory -> thm list * theory
val add_axioms_cmd: ((bstring * string) * attribute list) list -> theory -> thm list * theory
val add_defs: bool -> ((binding * term) * attribute list) list ->
@@ -151,14 +151,15 @@
fun enter_thms pre_name post_name app_att (b, thms) thy =
if Binding.is_empty b
then swap (enter_proofs (app_att (thy, thms)))
- else let
- val naming = Sign.naming_of thy;
- val name = NameSpace.full_name naming b;
- val (thy', thms') =
- enter_proofs (apsnd (post_name name) (app_att (thy, pre_name name thms)));
- val thms'' = map (Thm.transfer thy') thms';
- val thy'' = thy' |> (FactsData.map o apfst) (Facts.add_global naming (b, thms'') #> snd);
- in (thms'', thy'') end;
+ else
+ let
+ val naming = Sign.naming_of thy;
+ val name = NameSpace.full_name naming b;
+ val (thy', thms') =
+ enter_proofs (apsnd (post_name name) (app_att (thy, pre_name name thms)));
+ val thms'' = map (Thm.transfer thy') thms';
+ val thy'' = thy' |> (FactsData.map o apfst) (Facts.add_global naming (b, thms'') #> snd);
+ in (thms'', thy'') end;
(* store_thm(s) *)
@@ -177,7 +178,7 @@
fun add_thms_atts pre_name ((b, thms), atts) =
enter_thms pre_name (name_thms false true Position.none)
- (foldl_map (Thm.theory_attributes atts)) (b, thms);
+ (Library.foldl_map (Thm.theory_attributes atts)) (b, thms);
fun gen_add_thmss pre_name =
fold_map (add_thms_atts pre_name);
@@ -207,9 +208,9 @@
val name = Sign.full_name thy b;
val _ = Position.report (Markup.fact_decl name) pos;
- fun app (x, (ths, atts)) = foldl_map (Thm.theory_attributes atts) (x, ths);
+ fun app (x, (ths, atts)) = Library.foldl_map (Thm.theory_attributes atts) (x, ths);
val (thms, thy') = thy |> enter_thms
- (name_thmss true pos) (name_thms false true pos) (apsnd flat o foldl_map app)
+ (name_thmss true pos) (name_thms false true pos) (apsnd flat o Library.foldl_map app)
(b, map (fn (ths, atts) => (ths, surround tag (atts @ more_atts))) ths_atts);
in ((name, thms), thy') end);
--- a/src/Pure/sign.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/sign.ML Wed Mar 04 10:45:52 2009 +0100
@@ -338,7 +338,7 @@
fun typ_of (_, Const (_, T)) = T
| typ_of (_, Free (_, T)) = T
| typ_of (_, Var (_, T)) = T
- | typ_of (bs, Bound i) = snd (List.nth (bs, i) handle Subscript =>
+ | typ_of (bs, Bound i) = snd (nth bs i handle Subscript =>
raise TYPE ("Loose bound variable: B." ^ string_of_int i, [], [Bound i]))
| typ_of (bs, Abs (x, T, body)) = T --> typ_of ((x, T) :: bs, body)
| typ_of (bs, t $ u) =
@@ -507,12 +507,12 @@
val prepT = Type.no_tvars o Term.no_dummyT o certify_typ thy o parse_typ ctxt;
fun prep (raw_b, raw_T, raw_mx) =
let
- val (mx_name, mx) = Syntax.const_mixfix (Binding.base_name raw_b) raw_mx;
- val b = Binding.map_base (K mx_name) raw_b;
+ val (mx_name, mx) = Syntax.const_mixfix (Binding.name_of raw_b) raw_mx;
+ val b = Binding.map_name (K mx_name) raw_b;
val c = full_name thy b;
- val c_syn = if authentic then Syntax.constN ^ c else Binding.base_name b;
+ val c_syn = if authentic then Syntax.constN ^ c else Binding.name_of b;
val T = (prepT raw_T handle TYPE (msg, _, _) => error msg) handle ERROR msg =>
- cat_error msg ("in declaration of constant " ^ quote (Binding.display b));
+ cat_error msg ("in declaration of constant " ^ quote (Binding.str_of b));
val T' = Logic.varifyT T;
in ((b, T'), (c_syn, T', mx), Const (c, T)) end;
val args = map prep raw_args;
@@ -549,7 +549,7 @@
val pp = Syntax.pp_global thy;
val prep_tm = no_frees pp o Term.no_dummy_patterns o cert_term_abbrev thy;
val t = (prep_tm raw_t handle TYPE (msg, _, _) => error msg | TERM (msg, _) => error msg)
- handle ERROR msg => cat_error msg ("in constant abbreviation " ^ quote (Binding.display b));
+ handle ERROR msg => cat_error msg ("in constant abbreviation " ^ quote (Binding.str_of b));
val (res, consts') = consts_of thy
|> Consts.abbreviate pp (tsig_of thy) (naming_of thy) mode tags (b, t);
in (res, thy |> map_consts (K consts')) end;
--- a/src/Pure/sorts.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/sorts.ML Wed Mar 04 10:45:52 2009 +0100
@@ -46,9 +46,7 @@
val add_arities: Pretty.pp -> string * (class * sort list) list -> algebra -> algebra
val empty_algebra: algebra
val merge_algebra: Pretty.pp -> algebra * algebra -> algebra
- val classrels_of: algebra -> (class * class list) list
- val instances_of: algebra -> (string * class) list
- val subalgebra: Pretty.pp -> (class -> bool) -> (class * string -> sort list)
+ val subalgebra: Pretty.pp -> (class -> bool) -> (class * string -> sort list option)
-> algebra -> (sort -> sort) * algebra
type class_error
val class_error: Pretty.pp -> class_error -> string
@@ -302,19 +300,14 @@
(* algebra projections *)
-fun classrels_of (Algebra {classes, ...}) =
- map (fn [c] => (c, Graph.imm_succs classes c)) (rev (Graph.strong_conn classes));
-
-fun instances_of (Algebra {arities, ...}) =
- Symtab.fold (fn (a, cs) => append (map (pair a o fst) cs)) arities [];
-
fun subalgebra pp P sargs (algebra as Algebra {classes, arities}) =
let
val restrict_sort = minimize_sort algebra o filter P o Graph.all_succs classes;
fun restrict_arity tyco (c, (_, Ss)) =
- if P c then
- SOME (c, (c, Ss |> map2 (curry (inter_sort algebra)) (sargs (c, tyco))
+ if P c then case sargs (c, tyco)
+ of SOME sorts => SOME (c, (c, Ss |> map2 (curry (inter_sort algebra)) sorts
|> map restrict_sort))
+ | NONE => NONE
else NONE;
val classes' = classes |> Graph.subgraph P;
val arities' = arities |> Symtab.map' (map_filter o restrict_arity);
--- a/src/Pure/tctical.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/tctical.ML Wed Mar 04 10:45:52 2009 +0100
@@ -349,15 +349,13 @@
(*Returns all states that have changed in subgoal i, counted from the LAST
subgoal. For stac, for example.*)
fun CHANGED_GOAL tac i st =
- let val np = nprems_of st
+ let val np = Thm.nprems_of st
val d = np-i (*distance from END*)
- val t = List.nth(prems_of st, i-1)
+ val t = Thm.term_of (Thm.cprem_of st i)
fun diff st' =
- nprems_of st' - d <= 0 (*the subgoal no longer exists*)
+ Thm.nprems_of st' - d <= 0 (*the subgoal no longer exists*)
orelse
- not (Pattern.aeconv (t,
- List.nth(prems_of st',
- nprems_of st' - d - 1)))
+ not (Pattern.aeconv (t, Thm.term_of (Thm.cprem_of st' (Thm.nprems_of st' - d))))
in Seq.filter diff (tac i st) end
handle Subscript => Seq.empty (*no subgoal i*);
--- a/src/Pure/term.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/term.ML Wed Mar 04 10:45:52 2009 +0100
@@ -297,7 +297,7 @@
Ts = [T0,T1,...] holds types of bound variables 0, 1, ...*)
fun type_of1 (Ts, Const (_,T)) = T
| type_of1 (Ts, Free (_,T)) = T
- | type_of1 (Ts, Bound i) = (List.nth (Ts,i)
+ | type_of1 (Ts, Bound i) = (nth Ts i
handle Subscript => raise TYPE("type_of: bound variable", [], [Bound i]))
| type_of1 (Ts, Var (_,T)) = T
| type_of1 (Ts, Abs (_,T,body)) = T --> type_of1(T::Ts, body)
@@ -322,7 +322,7 @@
| _ => raise TERM("fastype_of: expected function type", [f$u]))
| fastype_of1 (_, Const (_,T)) = T
| fastype_of1 (_, Free (_,T)) = T
- | fastype_of1 (Ts, Bound i) = (List.nth(Ts,i)
+ | fastype_of1 (Ts, Bound i) = (nth Ts i
handle Subscript => raise TERM("fastype_of: Bound", [Bound i]))
| fastype_of1 (_, Var (_,T)) = T
| fastype_of1 (Ts, Abs (_,T,u)) = T --> fastype_of1 (T::Ts, u);
@@ -387,17 +387,17 @@
(*number of atoms and abstractions in a term*)
fun size_of_term tm =
let
- fun add_size (t $ u, n) = add_size (t, add_size (u, n))
- | add_size (Abs (_ ,_, t), n) = add_size (t, n + 1)
- | add_size (_, n) = n + 1;
- in add_size (tm, 0) end;
+ fun add_size (t $ u) n = add_size t (add_size u n)
+ | add_size (Abs (_ ,_, t)) n = add_size t (n + 1)
+ | add_size _ n = n + 1;
+ in add_size tm 0 end;
-(*number of tfrees, tvars, and constructors in a type*)
+(*number of atoms and constructors in a type*)
fun size_of_typ ty =
let
- fun add_size (Type (_, ars), n) = foldl add_size (n + 1) ars
- | add_size (_, n) = n + 1;
- in add_size (ty, 0) end;
+ fun add_size (Type (_, tys)) n = fold add_size tys (n + 1)
+ | add_size _ n = n + 1;
+ in add_size ty 0 end;
fun map_atyps f (Type (a, Ts)) = Type (a, map (map_atyps f) Ts)
| map_atyps f T = f T;
@@ -638,7 +638,7 @@
val n = length args;
fun subst (t as Bound i, lev) =
(if i < lev then raise SAME (*var is locally bound*)
- else incr_boundvars lev (List.nth (args, i - lev))
+ else incr_boundvars lev (nth args (i - lev))
handle Subscript => Bound (i - n)) (*loose: change it*)
| subst (Abs (a, T, body), lev) = Abs (a, T, subst (body, lev + 1))
| subst (f $ t, lev) =
--- a/src/Pure/theory.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/theory.ML Wed Mar 04 10:45:52 2009 +0100
@@ -258,7 +258,7 @@
val _ = check_overloading thy overloaded lhs_const;
in defs |> dependencies thy unchecked true name lhs_const rhs_consts end
handle ERROR msg => cat_error msg (Pretty.string_of (Pretty.block
- [Pretty.str ("The error(s) above occurred in definition " ^ quote (Binding.display b) ^ ":"),
+ [Pretty.str ("The error(s) above occurred in definition " ^ quote (Binding.str_of b) ^ ":"),
Pretty.fbrk, Pretty.quote (Syntax.pretty_term_global thy tm)]));
--- a/src/Pure/type_infer.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Pure/type_infer.ML Wed Mar 04 10:45:52 2009 +0100
@@ -369,7 +369,7 @@
fun inf _ (PConst (_, T)) = T
| inf _ (PFree (_, T)) = T
| inf _ (PVar (_, T)) = T
- | inf bs (PBound i) = snd (List.nth (bs, i) handle Subscript => err_loose i)
+ | inf bs (PBound i) = snd (nth bs i handle Subscript => err_loose i)
| inf bs (PAbs (x, T, t)) = PType ("fun", [T, inf ((x, T) :: bs) t])
| inf bs (PAppl (t, u)) =
let
--- a/src/Tools/Compute_Oracle/Compute_Oracle.thy Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/Compute_Oracle.thy Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/Compute_Oracle/Compute_Oracle.thy
- ID: $Id$
Author: Steven Obua, TU Munich
Steven Obua's evaluator.
--- a/src/Tools/Compute_Oracle/am_compiler.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_compiler.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/Compute_Oracle/am_compiler.ML
- ID: $Id$
Author: Steven Obua
*)
--- a/src/Tools/Compute_Oracle/am_ghc.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_ghc.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/Compute_Oracle/am_ghc.ML
- ID: $Id$
Author: Steven Obua
*)
--- a/src/Tools/Compute_Oracle/am_interpreter.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_interpreter.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/Compute_Oracle/am_interpreter.ML
- ID: $Id$
Author: Steven Obua
*)
--- a/src/Tools/Compute_Oracle/am_sml.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/am_sml.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/Compute_Oracle/am_sml.ML
- ID: $Id$
Author: Steven Obua
ToDO: "parameterless rewrite cannot be used in pattern": In a lot of cases it CAN be used, and these cases should be handled properly;
--- a/src/Tools/Compute_Oracle/report.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Compute_Oracle/report.ML Wed Mar 04 10:45:52 2009 +0100
@@ -13,7 +13,7 @@
let
val t1 = start_timing ()
val x = f ()
- val t2 = end_timing t1
+ val t2 = #message (end_timing t1)
val _ = writeln ((report_space ()) ^ "--> "^t2)
in
x
--- a/src/Tools/IsaPlanner/README Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/IsaPlanner/README Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,3 @@
-ID: $Id$
Author: Lucas Dixon, University of Edinburgh
Support files for IsaPlanner (see http://isaplanner.sourceforge.net).
--- a/src/Tools/IsaPlanner/isand.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/IsaPlanner/isand.ML Wed Mar 04 10:45:52 2009 +0100
@@ -132,7 +132,7 @@
fun allify_prem_var (vt as (n,ty),t) =
(Term.all ty) $ (Abs(n,ty,Term.abstract_over (Free vt, t)))
- fun allify_prem Ts p = foldr allify_prem_var p Ts
+ fun allify_prem Ts p = List.foldr allify_prem_var p Ts
val cTs = map (ctermify o Free) Ts
val cterm_asms = map (ctermify o allify_prem Ts) premts
@@ -306,7 +306,7 @@
in (Term.all ty) $ (Abs(n,ty,Term.abstract_over (vt, t))) end;
fun allify_for_sg_term ctermify vs t =
- let val t_alls = foldr allify_term t vs;
+ let val t_alls = List.foldr allify_term t vs;
val ct_alls = ctermify t_alls;
in
(ct_alls, Drule.forall_elim_list vs (Thm.assume ct_alls))
@@ -394,7 +394,7 @@
|> Drule.forall_intr_list cfvs
in Drule.compose_single (solth', i, gth) end;
-fun export_solutions (xs,th) = foldr (uncurry export_solution) th xs;
+fun export_solutions (xs,th) = List.foldr (uncurry export_solution) th xs;
(* fix parameters of a subgoal "i", as free variables, and create an
--- a/src/Tools/IsaPlanner/rw_inst.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/IsaPlanner/rw_inst.ML Wed Mar 04 10:45:52 2009 +0100
@@ -136,7 +136,7 @@
fun mk_renamings tgt rule_inst =
let
val rule_conds = Thm.prems_of rule_inst
- val names = foldr OldTerm.add_term_names [] (tgt :: rule_conds);
+ val names = List.foldr OldTerm.add_term_names [] (tgt :: rule_conds);
val (conds_tyvs,cond_vs) =
Library.foldl (fn ((tyvs, vs), t) =>
(Library.union
@@ -147,7 +147,7 @@
val termvars = map Term.dest_Var (OldTerm.term_vars tgt);
val vars_to_fix = Library.union (termvars, cond_vs);
val (renamings, names2) =
- foldr (fn (((n,i),ty), (vs, names')) =>
+ List.foldr (fn (((n,i),ty), (vs, names')) =>
let val n' = Name.variant names' n in
((((n,i),ty), Free (n', ty)) :: vs, n'::names')
end)
@@ -166,13 +166,13 @@
let
val ignore_ixs = map fst ignore_insts;
val (tvars, tfrees) =
- foldr (fn (t, (varixs, tfrees)) =>
+ List.foldr (fn (t, (varixs, tfrees)) =>
(OldTerm.add_term_tvars (t,varixs),
OldTerm.add_term_tfrees (t,tfrees)))
([],[]) ts;
val unfixed_tvars =
List.filter (fn (ix,s) => not (member (op =) ignore_ixs ix)) tvars;
- val (fixtyinsts, _) = foldr new_tfree ([], map fst tfrees) unfixed_tvars
+ val (fixtyinsts, _) = List.foldr new_tfree ([], map fst tfrees) unfixed_tvars
in (fixtyinsts, tfrees) end;
@@ -248,7 +248,7 @@
Ts;
(* type-instantiate the var instantiations *)
- val insts_tyinst = foldr (fn ((ix,(ty,t)),insts_tyinst) =>
+ val insts_tyinst = List.foldr (fn ((ix,(ty,t)),insts_tyinst) =>
(ix, (Term.typ_subst_TVars term_typ_inst ty,
Term.subst_TVars term_typ_inst t))
:: insts_tyinst)
--- a/src/Tools/IsaPlanner/rw_tools.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/IsaPlanner/rw_tools.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/IsaPlanner/rw_tools.ML
- ID: $Id$
Author: Lucas Dixon, University of Edinburgh
Term related tools used for rewriting.
--- a/src/Tools/IsaPlanner/zipper.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/IsaPlanner/zipper.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/IsaPlanner/zipper.ML
- ID: $Id$
Author: Lucas Dixon, University of Edinburgh
A notion roughly based on Huet's Zippers for Isabelle terms.
--- a/src/Tools/Metis/make-metis Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Metis/make-metis Wed Mar 04 10:45:52 2009 +0100
@@ -1,7 +1,5 @@
#!/usr/bin/env bash
#
-# $Id$
-#
# make-metis - turn original Metis files into Isabelle ML source.
#
# Structure declarations etc. are made local by wrapping into a
@@ -11,8 +9,6 @@
THIS=$(cd "$(dirname "$0")"; echo $PWD)
(
- echo -n '(* $'
- echo 'Id$ *)'
cat <<EOF
(******************************************************************)
(* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
--- a/src/Tools/Metis/metis.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/Metis/metis.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,4 +1,3 @@
-(* $Id$ *)
(******************************************************************)
(* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
(* GENERATED FILE -- DO NOT EDIT -- GENERATED FILE -- DO NOT EDIT *)
--- a/src/Tools/README Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/README Wed Mar 04 10:45:52 2009 +0100
@@ -4,5 +4,3 @@
This directory contains ML sources of generic tools. Typically, they
can be applied to various logics.
-
-$Id$
--- a/src/Tools/atomize_elim.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/atomize_elim.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/atomize_elim.ML
- ID: $Id$
Author: Alexander Krauss, TU Muenchen
Turn elimination rules into atomic expressions in the object logic.
--- a/src/Tools/auto_solve.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/auto_solve.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,89 +1,91 @@
-(* Title: auto_solve.ML
+(* Title: Pure/Tools/auto_solve.ML
Author: Timothy Bourke and Gerwin Klein, NICTA
- Check whether a newly stated theorem can be solved directly
- by an existing theorem. Duplicate lemmas can be detected in
- this way.
+Check whether a newly stated theorem can be solved directly by an
+existing theorem. Duplicate lemmas can be detected in this way.
- The implemenation is based in part on Berghofer and
- Haftmann's Pure/codegen.ML. It relies critically on
- the FindTheorems solves feature.
+The implemenation is based in part on Berghofer and Haftmann's
+Pure/codegen.ML. It relies critically on the FindTheorems solves
+feature.
*)
signature AUTO_SOLVE =
sig
- val auto : bool ref;
- val auto_time_limit : int ref;
+ val auto : bool ref
+ val auto_time_limit : int ref
- val seek_solution : bool -> Proof.state -> Proof.state;
+ val seek_solution : bool -> Proof.state -> Proof.state
end;
structure AutoSolve : AUTO_SOLVE =
struct
- structure FT = FindTheorems;
- val auto = ref false;
- val auto_time_limit = ref 5000;
+val auto = ref false;
+val auto_time_limit = ref 2500;
- fun seek_solution int state = let
- val ctxt = Proof.context_of state;
+fun seek_solution int state =
+ let
+ val ctxt = Proof.context_of state;
- fun conj_to_list [] = []
- | conj_to_list (t::ts) =
- (Conjunction.dest_conjunction t
- |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
- handle TERM _ => t::conj_to_list ts;
+ fun conj_to_list [] = []
+ | conj_to_list (t::ts) =
+ (Conjunction.dest_conjunction t
+ |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
+ handle TERM _ => t::conj_to_list ts;
- val crits = [(true, FT.Solves)];
- fun find g = (NONE, FT.find_theorems ctxt g true crits);
- fun find_cterm g = (SOME g, FT.find_theorems ctxt
- (SOME (Goal.init g)) true crits);
+ val crits = [(true, FindTheorems.Solves)];
+ fun find g = (NONE, FindTheorems.find_theorems ctxt g true crits);
+ fun find_cterm g = (SOME g, FindTheorems.find_theorems ctxt
+ (SOME (Goal.init g)) true crits);
- fun prt_result (goal, results) = let
- val msg = case goal of
- NONE => "The current goal"
- | SOME g => Syntax.string_of_term ctxt (term_of g);
- in
- Pretty.big_list (msg ^ " could be solved directly with:")
- (map Display.pretty_fact results)
- end;
+ fun prt_result (goal, results) =
+ let
+ val msg = case goal of
+ NONE => "The current goal"
+ | SOME g => Syntax.string_of_term ctxt (term_of g);
+ in
+ Pretty.big_list (msg ^ " could be solved directly with:")
+ (map (FindTheorems.pretty_thm ctxt) results)
+ end;
- fun seek_against_goal () = let
- val goal = try Proof.get_goal state
- |> Option.map (#2 o #2);
+ fun seek_against_goal () =
+ let
+ val goal = try Proof.get_goal state
+ |> Option.map (#2 o #2);
- val goals = goal
- |> Option.map (fn g => cprem_of g 1)
- |> the_list
- |> conj_to_list;
+ val goals = goal
+ |> Option.map (fn g => cprem_of g 1)
+ |> the_list
+ |> conj_to_list;
- val rs = if length goals = 1
- then [find goal]
- else map find_cterm goals;
- val frs = filter_out (null o snd) rs;
+ val rs = if length goals = 1
+ then [find goal]
+ else map find_cterm goals;
+ val frs = filter_out (null o snd) rs;
- in if null frs then NONE else SOME frs end;
+ in if null frs then NONE else SOME frs end;
- fun go () = let
- val res = TimeLimit.timeLimit
- (Time.fromMilliseconds (!auto_time_limit))
- (try seek_against_goal) ();
- in
- case Option.join res of
- NONE => state
- | SOME results => (Proof.goal_message
- (fn () => Pretty.chunks [Pretty.str "",
- Pretty.markup Markup.hilite
- (Library.separate (Pretty.brk 0)
- (map prt_result results))])
- state)
- end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
- in
- if int andalso !auto andalso not (!Toplevel.quiet)
- then go ()
- else state
- end;
-
+ fun go () =
+ let
+ val res = TimeLimit.timeLimit
+ (Time.fromMilliseconds (! auto_time_limit))
+ (try seek_against_goal) ();
+ in
+ case Option.join res of
+ NONE => state
+ | SOME results => (Proof.goal_message
+ (fn () => Pretty.chunks [Pretty.str "",
+ Pretty.markup Markup.hilite
+ (Library.separate (Pretty.brk 0)
+ (map prt_result results))])
+ state)
+ end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
+ in
+ if int andalso ! auto andalso not (! Toplevel.quiet)
+ then go ()
+ else state
+ end;
+
end;
val _ = Context.>> (Specification.add_theorem_hook AutoSolve.seek_solution);
--- a/src/Tools/code/code_funcgr.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_funcgr.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,12 +1,13 @@
(* Title: Tools/code/code_funcgr.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
-Retrieving, normalizing and structuring defining equations in graph
+Retrieving, normalizing and structuring code equations in graph
with explicit dependencies.
+
+Legacy. To be replaced by Tools/code/code_wellsorted.ML
*)
-signature CODE_FUNCGR =
+signature CODE_WELLSORTED =
sig
type T
val eqns: T -> string -> (thm * bool) list
@@ -22,7 +23,7 @@
val timing: bool ref
end
-structure Code_Funcgr : CODE_FUNCGR =
+structure Code_Wellsorted : CODE_WELLSORTED =
struct
(** the graph type **)
@@ -318,13 +319,13 @@
in
val _ =
- OuterSyntax.improper_command "code_thms" "print system of defining equations for code" OuterKeyword.diag
+ OuterSyntax.improper_command "code_thms" "print system of code equations for code" OuterKeyword.diag
(Scan.repeat P.term_group
>> (fn cs => Toplevel.no_timing o Toplevel.unknown_theory
o Toplevel.keep ((fn thy => code_thms_cmd thy cs) o Toplevel.theory_of)));
val _ =
- OuterSyntax.improper_command "code_deps" "visualize dependencies of defining equations for code" OuterKeyword.diag
+ OuterSyntax.improper_command "code_deps" "visualize dependencies of code equations for code" OuterKeyword.diag
(Scan.repeat P.term_group
>> (fn cs => Toplevel.no_timing o Toplevel.unknown_theory
o Toplevel.keep ((fn thy => code_deps_cmd thy cs) o Toplevel.theory_of)));
--- a/src/Tools/code/code_haskell.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_haskell.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/code/code_haskell.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
Serializer for Haskell.
--- a/src/Tools/code/code_name.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_name.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/code/code_name.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
Some code generator infrastructure concerning names.
--- a/src/Tools/code/code_printer.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_printer.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/code/code_printer.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
Generic operations for pretty printing of target language code.
--- a/src/Tools/code/code_target.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_target.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/code/code_target.ML
- ID: $Id$
Author: Florian Haftmann, TU Muenchen
Serializer from intermediate language ("Thin-gol") to target languages.
@@ -418,7 +417,7 @@
val program4 = Graph.subgraph (member (op =) names_all) program3;
val empty_funs = filter_out (member (op =) abortable)
(Code_Thingol.empty_funs program3);
- val _ = if null empty_funs then () else error ("No defining equations for "
+ val _ = if null empty_funs then () else error ("No code equations for "
^ commas (map (Sign.extern_const thy) empty_funs));
in
serializer module args (labelled_name thy program2) reserved includes
--- a/src/Tools/code/code_thingol.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/code/code_thingol.ML Wed Mar 04 10:45:52 2009 +0100
@@ -109,7 +109,7 @@
let val (xs', x') = unfoldr dest x2 in (x1::xs', x') end;
-(** language core - types, patterns, expressions **)
+(** language core - types, terms **)
type vname = string;
@@ -131,31 +131,6 @@
| ICase of ((iterm * itype) * (iterm * iterm) list) * iterm;
(*see also signature*)
-(*
- variable naming conventions
-
- bare names:
- variable names v
- class names class
- type constructor names tyco
- datatype names dtco
- const names (general) c (const)
- constructor names co
- class parameter names classparam
- arbitrary name s
-
- v, c, co, classparam also annotated with types etc.
-
- constructs:
- sort sort
- type parameters vs
- type ty
- type schemes tysm
- term t
- (term as pattern) p
- instance (class, tyco) inst
- *)
-
val op `$$ = Library.foldl (op `$);
val op `|--> = Library.foldr (op `|->);
@@ -478,7 +453,7 @@
let
val err_class = Sorts.class_error (Syntax.pp_global thy) e;
val err_thm = case thm
- of SOME thm => "\n(in defining equation " ^ Display.string_of_thm thm ^ ")" | NONE => "";
+ of SOME thm => "\n(in code equation " ^ Display.string_of_thm thm ^ ")" | NONE => "";
val err_typ = "Type " ^ Syntax.string_of_typ_global thy ty ^ " not of sort "
^ Syntax.string_of_sort_global thy sort;
in error ("Wellsortedness error" ^ err_thm ^ ":\n" ^ err_typ ^ "\n" ^ err_class) end;
@@ -486,12 +461,6 @@
(* translation *)
-(*FIXME move to code(_unit).ML*)
-fun get_case_scheme thy c = case Code.get_case_data thy c
- of SOME (proto_case_scheme as (_, case_pats)) =>
- SOME (1 + (if null case_pats then 1 else length case_pats), proto_case_scheme)
- | NONE => NONE
-
fun ensure_class thy (algbr as (_, algebra)) funcgr class =
let
val superclasses = (Sorts.minimize_sort algebra o Sorts.super_classes algebra) class;
@@ -526,9 +495,8 @@
and translate_tyvar_sort thy (algbr as (proj_sort, _)) funcgr (v, sort) =
fold_map (ensure_class thy algbr funcgr) (proj_sort sort)
#>> (fn sort => (unprefix "'" v, sort))
-and translate_typ thy algbr funcgr (TFree v_sort) =
- translate_tyvar_sort thy algbr funcgr v_sort
- #>> (fn (v, sort) => ITyVar v)
+and translate_typ thy algbr funcgr (TFree (v, _)) =
+ pair (ITyVar (unprefix "'" v))
| translate_typ thy algbr funcgr (Type (tyco, tys)) =
ensure_tyco thy algbr funcgr tyco
##>> fold_map (translate_typ thy algbr funcgr) tys
@@ -543,16 +511,8 @@
Global ((class, tyco), yss)
| class_relation (Local (classrels, v), subclass) superclass =
Local ((subclass, superclass) :: classrels, v);
- fun norm_typargs ys =
- let
- val raw_sort = map snd ys;
- val sort = Sorts.minimize_sort algebra raw_sort;
- in
- map_filter (fn (y, class) =>
- if member (op =) sort class then SOME y else NONE) ys
- end;
fun type_constructor tyco yss class =
- Global ((class, tyco), map norm_typargs yss);
+ Global ((class, tyco), (map o map) fst yss);
fun type_variable (TFree (v, sort)) =
let
val sort' = proj_sort sort;
@@ -622,9 +582,8 @@
fun stmt_classparam class =
ensure_class thy algbr funcgr class
#>> (fn class => Classparam (c, class));
- fun stmt_fun ((vs, raw_ty), raw_thms) =
+ fun stmt_fun ((vs, ty), raw_thms) =
let
- val ty = Logic.unvarifyT raw_ty;
val thms = if null (Term.add_tfreesT ty []) orelse (null o fst o strip_type) ty
then raw_thms
else (map o apfst) (Code_Unit.expand_eta thy 1) raw_thms;
@@ -638,7 +597,7 @@
of SOME tyco => stmt_datatypecons tyco
| NONE => (case AxClass.class_of_param thy c
of SOME class => stmt_classparam class
- | NONE => stmt_fun (Code_Funcgr.typ funcgr c, Code_Funcgr.eqns funcgr c))
+ | NONE => stmt_fun (Code_Wellsorted.typ funcgr c, Code_Wellsorted.eqns funcgr c))
in ensure_stmt lookup_const (declare_const thy) stmt_const c end
and translate_term thy algbr funcgr thm (Const (c, ty)) =
translate_app thy algbr funcgr thm ((c, ty), [])
@@ -663,7 +622,7 @@
and translate_const thy algbr funcgr thm (c, ty) =
let
val tys = Sign.const_typargs thy (c, ty);
- val sorts = (map snd o fst o Code_Funcgr.typ funcgr) c;
+ val sorts = (map snd o fst o Code_Wellsorted.typ funcgr) c;
val tys_args = (fst o Term.strip_type) ty;
in
ensure_const thy algbr funcgr c
@@ -671,7 +630,7 @@
##>> fold_map (translate_typ thy algbr funcgr) tys_args
#>> (fn ((c, iss), tys) => IConst (c, (iss, tys)))
end
-and translate_app_default thy algbr funcgr thm (c_ty, ts) =
+and translate_app_const thy algbr funcgr thm (c_ty, ts) =
translate_const thy algbr funcgr thm c_ty
##>> fold_map (translate_term thy algbr funcgr thm) ts
#>> (fn (t, ts) => t `$$ ts)
@@ -683,11 +642,6 @@
val ts_clause = nth_drop t_pos ts;
fun mk_clause (co, num_co_args) t =
let
- val _ = if (is_some o Code.get_datatype_of_constr thy) co then ()
- else error ("Non-constructor " ^ quote co
- ^ " encountered in case pattern"
- ^ (case thm of NONE => ""
- | SOME thm => ", in equation\n" ^ Display.string_of_thm thm))
val (vs, body) = Term.strip_abs_eta num_co_args t;
val not_undefined = case body
of (Const (c, _)) => not (Code.is_undefined thy c)
@@ -722,26 +676,28 @@
#>> pair b) clauses
#>> (fn (((const, t), ty), ds) => mk_icase const t ty ds)
end
-and translate_app thy algbr funcgr thm ((c, ty), ts) = case get_case_scheme thy c
- of SOME (case_scheme as (num_args, _)) =>
- if length ts < num_args then
- let
- val k = length ts;
- val tys = (curry Library.take (num_args - k) o curry Library.drop k o fst o strip_type) ty;
- val ctxt = (fold o fold_aterms) Term.declare_term_frees ts Name.context;
- val vs = Name.names ctxt "a" tys;
- in
- fold_map (translate_typ thy algbr funcgr) tys
- ##>> translate_case thy algbr funcgr thm case_scheme ((c, ty), ts @ map Free vs)
- #>> (fn (tys, t) => map2 (fn (v, _) => pair v) vs tys `|--> t)
- end
- else if length ts > num_args then
- translate_case thy algbr funcgr thm case_scheme ((c, ty), Library.take (num_args, ts))
- ##>> fold_map (translate_term thy algbr funcgr thm) (Library.drop (num_args, ts))
- #>> (fn (t, ts) => t `$$ ts)
- else
- translate_case thy algbr funcgr thm case_scheme ((c, ty), ts)
- | NONE => translate_app_default thy algbr funcgr thm ((c, ty), ts);
+and translate_app_case thy algbr funcgr thm (case_scheme as (num_args, _)) ((c, ty), ts) =
+ if length ts < num_args then
+ let
+ val k = length ts;
+ val tys = (curry Library.take (num_args - k) o curry Library.drop k o fst o strip_type) ty;
+ val ctxt = (fold o fold_aterms) Term.declare_term_frees ts Name.context;
+ val vs = Name.names ctxt "a" tys;
+ in
+ fold_map (translate_typ thy algbr funcgr) tys
+ ##>> translate_case thy algbr funcgr thm case_scheme ((c, ty), ts @ map Free vs)
+ #>> (fn (tys, t) => map2 (fn (v, _) => pair v) vs tys `|--> t)
+ end
+ else if length ts > num_args then
+ translate_case thy algbr funcgr thm case_scheme ((c, ty), Library.take (num_args, ts))
+ ##>> fold_map (translate_term thy algbr funcgr thm) (Library.drop (num_args, ts))
+ #>> (fn (t, ts) => t `$$ ts)
+ else
+ translate_case thy algbr funcgr thm case_scheme ((c, ty), ts)
+and translate_app thy algbr funcgr thm (c_ty_ts as ((c, _), _)) =
+ case Code.get_case_scheme thy c
+ of SOME case_scheme => translate_app_case thy algbr funcgr thm case_scheme c_ty_ts
+ | NONE => translate_app_const thy algbr funcgr thm c_ty_ts;
(* store *)
@@ -779,7 +735,7 @@
fun generate_consts thy algebra funcgr =
fold_map (ensure_const thy algebra funcgr);
in
- invoke_generation thy (Code_Funcgr.make thy cs) generate_consts cs
+ invoke_generation thy (Code_Wellsorted.make thy cs) generate_consts cs
|-> project_consts
end;
@@ -822,8 +778,8 @@
in evaluator'' naming program vs_ty_t deps end;
in (t', evaluator') end
-fun eval_conv thy = Code_Funcgr.eval_conv thy o eval thy;
-fun eval_term thy = Code_Funcgr.eval_term thy o eval thy;
+fun eval_conv thy = Code_Wellsorted.eval_conv thy o eval thy;
+fun eval_term thy = Code_Wellsorted.eval_term thy o eval thy;
end; (*struct*)
--- a/src/Tools/float.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/float.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/float.ML
- ID: $Id$
Author: Steven Obua, Florian Haftmann, TU Muenchen
Implementation of real numbers as mantisse-exponent pairs.
--- a/src/Tools/induct.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/induct.ML Wed Mar 04 10:45:52 2009 +0100
@@ -552,7 +552,7 @@
let
fun add (SOME (SOME x, t)) ctxt =
let val ([(lhs, (_, th))], ctxt') =
- LocalDefs.add_defs [((x, NoSyn), ((Binding.empty, []), t))] ctxt
+ LocalDefs.add_defs [((x, NoSyn), (Thm.empty_binding, t))] ctxt
in ((SOME lhs, [th]), ctxt') end
| add (SOME (NONE, t)) ctxt = ((SOME t, []), ctxt)
| add NONE ctxt = ((NONE, []), ctxt);
--- a/src/Tools/induct_tacs.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/induct_tacs.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/induct_tacs.ML
- ID: $Id$
Author: Makarius
Unstructured induction and cases analysis.
--- a/src/Tools/nbe.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/nbe.ML Wed Mar 04 10:45:52 2009 +0100
@@ -389,8 +389,8 @@
val ts' = take_until is_dict ts;
val c = const_of_idx idx;
val (_, T) = Code.default_typscheme thy c;
- val T' = map_type_tvar (fn ((v, i), S) => TypeInfer.param (typidx + i) (v, [])) T;
- val typidx' = typidx + maxidx_of_typ T' + 1;
+ val T' = map_type_tfree (fn (v, _) => TypeInfer.param typidx (v, [])) T;
+ val typidx' = typidx + 1;
in of_apps bounds (Term.Const (c, T'), ts') typidx' end
| of_univ bounds (Free (name, ts)) typidx =
of_apps bounds (Term.Free (name, dummyT), ts) typidx
--- a/src/Tools/random_word.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/random_word.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/random_word.ML
- ID: $Id$
Author: Makarius
Simple generator for pseudo-random numbers, using unboxed word
--- a/src/Tools/rat.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/Tools/rat.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Tools/rat.ML
- ID: $Id$
Author: Tobias Nipkow, Florian Haftmann, TU Muenchen
Canonical implementation of exact rational numbers.
--- a/src/ZF/Tools/datatype_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/ZF/Tools/datatype_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -1,5 +1,4 @@
(* Title: ZF/Tools/datatype_package.ML
- ID: $Id$
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1994 University of Cambridge
@@ -140,11 +139,11 @@
(*Treatment of a list of constructors, for one part
Result adds a list of terms, each a function variable with arguments*)
fun add_case_list (con_ty_list, (opno, case_lists)) =
- let val (opno', case_list) = foldr add_case (opno, []) con_ty_list
+ let val (opno', case_list) = List.foldr add_case (opno, []) con_ty_list
in (opno', case_list :: case_lists) end;
(*Treatment of all parts*)
- val (_, case_lists) = foldr add_case_list (1,[]) con_ty_lists;
+ val (_, case_lists) = List.foldr add_case_list (1,[]) con_ty_lists;
(*extract the types of all the variables*)
val case_typ = List.concat (map (map (#2 o #1)) con_ty_lists) ---> @{typ "i => i"};
@@ -184,7 +183,7 @@
val rec_args = map (make_rec_call (rev case_args,0))
(List.drop(recursor_args, ncase_args))
in
- foldr add_abs
+ List.foldr add_abs
(list_comb (recursor_var,
bound_args @ rec_args)) case_args
end
@@ -216,7 +215,7 @@
val rec_ty_lists = (map (map rec_ty_elem) con_ty_lists);
(*Treatment of all parts*)
- val (_, recursor_lists) = foldr add_case_list (1,[]) rec_ty_lists;
+ val (_, recursor_lists) = List.foldr add_case_list (1,[]) rec_ty_lists;
(*extract the types of all the variables*)
val recursor_typ = List.concat (map (map (#2 o #1)) rec_ty_lists) ---> @{typ "i => i"};
--- a/src/ZF/Tools/inductive_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/ZF/Tools/inductive_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -65,7 +65,7 @@
val _ = Theory.requires thy "Inductive_ZF" "(co)inductive definitions";
val ctxt = ProofContext.init thy;
- val intr_specs = map (apfst (apfst Binding.base_name)) raw_intr_specs;
+ val intr_specs = map (apfst (apfst Binding.name_of)) raw_intr_specs;
val (intr_names, intr_tms) = split_list (map fst intr_specs);
val case_names = RuleCases.case_names intr_names;
@@ -99,7 +99,7 @@
Syntax.string_of_term ctxt t);
(*** Construct the fixedpoint definition ***)
- val mk_variant = Name.variant (foldr OldTerm.add_term_names [] intr_tms);
+ val mk_variant = Name.variant (List.foldr OldTerm.add_term_names [] intr_tms);
val z' = mk_variant"z" and X' = mk_variant"X" and w' = mk_variant"w";
@@ -113,7 +113,7 @@
val dummy = List.app (fn rec_hd => List.app (chk_prem rec_hd) prems) rec_hds
val exfrees = OldTerm.term_frees intr \\ rec_params
val zeq = FOLogic.mk_eq (Free(z',iT), #1 (rule_concl intr))
- in foldr FOLogic.mk_exists
+ in List.foldr FOLogic.mk_exists
(BalancedTree.make FOLogic.mk_conj (zeq::prems)) exfrees
end;
@@ -303,7 +303,7 @@
(*Make a premise of the induction rule.*)
fun induct_prem ind_alist intr =
let val quantfrees = map dest_Free (OldTerm.term_frees intr \\ rec_params)
- val iprems = foldr (add_induct_prem ind_alist) []
+ val iprems = List.foldr (add_induct_prem ind_alist) []
(Logic.strip_imp_prems intr)
val (t,X) = Ind_Syntax.rule_concl intr
val (SOME pred) = AList.lookup (op aconv) ind_alist X
@@ -380,7 +380,7 @@
val pfree = Free(pred_name ^ "_" ^ Sign.base_name rec_name,
elem_factors ---> FOLogic.oT)
val qconcl =
- foldr FOLogic.mk_all
+ List.foldr FOLogic.mk_all
(FOLogic.imp $
(@{const mem} $ elem_tuple $ rec_tm)
$ (list_comb (pfree, elem_frees))) elem_frees
--- a/src/ZF/Tools/primrec_package.ML Wed Mar 04 10:43:39 2009 +0100
+++ b/src/ZF/Tools/primrec_package.ML Wed Mar 04 10:45:52 2009 +0100
@@ -120,7 +120,7 @@
| SOME (rhs, cargs', eq) =>
(rhs, inst_recursor (recursor_pair, cargs'), eq)
val allowed_terms = map use_fabs (#2 (strip_comb recursor_rhs))
- val abs = foldr absterm rhs allowed_terms
+ val abs = List.foldr absterm rhs allowed_terms
in
if !Ind_Syntax.trace then
writeln ("recursor_rhs = " ^
@@ -145,7 +145,7 @@
val def_tm = Logic.mk_equals
(subst_bound (rec_arg, fabs),
list_comb (recursor,
- foldr add_case [] (cnames ~~ recursor_pairs))
+ List.foldr add_case [] (cnames ~~ recursor_pairs))
$ rec_arg)
in
@@ -164,7 +164,7 @@
let
val ((eqn_names, eqn_terms), eqn_atts) = apfst split_list (split_list args);
val SOME (fname, ftype, ls, rs, con_info, eqns) =
- foldr (process_eqn thy) NONE eqn_terms;
+ List.foldr (process_eqn thy) NONE eqn_terms;
val def = process_fun thy (fname, ftype, ls, rs, con_info, eqns);
val ([def_thm], thy1) = thy