# HG changeset patch # User wenzelm # Date 1236216276 -3600 # Node ID dcf30c9861c3e24998f1c6ce97c84854f8e5aca2 # Parent 61811c9224a60e6550e87ff76e9a48bf504fa43b# Parent 2fab27ea2a1fba1814f0c56d641a53abb12b1094 merge with dummy changeset, to recover files in doc-src/IsarImplementation/ which got lost in aea5d7fa7ef5 (potentially due to insensitive file system on Mac OS); diff -r 61811c9224a6 -r dcf30c9861c3 NEWS --- a/NEWS Thu Mar 05 02:20:06 2009 +0100 +++ b/NEWS Thu Mar 05 02:24:36 2009 +0100 @@ -501,7 +501,7 @@ Suc_not_Zero Zero_not_Suc ~> nat.distinct * The option datatype has been moved to a new theory HOL/Option.thy. -Renamed option_map to Option.map. +Renamed option_map to Option.map, and o2s to Option.set. * Library/Nat_Infinity: added addition, numeral syntax and more instantiations for algebraic structures. Removed some duplicate diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/ROOT.ML diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Base.tex --- a/doc-src/IsarImplementation/Thy/document/Base.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,29 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Base}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Base\isanewline -\isakeyword{imports}\ Pure\isanewline -\isakeyword{uses}\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isacharslash}{\isachardot}{\isachardot}{\isacharslash}antiquote{\isacharunderscore}setup{\isachardot}ML{\isachardoublequoteclose}\isanewline -\isakeyword{begin}\isanewline -\isanewline -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -\isanewline -% -\endisadelimtheory -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Integration.tex --- a/doc-src/IsarImplementation/Thy/document/Integration.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,520 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Integration}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Integration\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{System integration% -} -\isamarkuptrue% -% -\isamarkupsection{Isar toplevel \label{sec:isar-toplevel}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The Isar toplevel may be considered the centeral hub of the - Isabelle/Isar system, where all key components and sub-systems are - integrated into a single read-eval-print loop of Isar commands. We - shall even incorporate the existing {\ML} toplevel of the compiler - and run-time system (cf.\ \secref{sec:ML-toplevel}). - - Isabelle/Isar departs from the original ``LCF system architecture'' - where {\ML} was really The Meta Language for defining theories and - conducting proofs. Instead, {\ML} now only serves as the - implementation language for the system (and user extensions), while - the specific Isar toplevel supports the concepts of theory and proof - development natively. This includes the graph structure of theories - and the block structure of proofs, support for unlimited undo, - facilities for tracing, debugging, timing, profiling etc. - - \medskip The toplevel maintains an implicit state, which is - transformed by a sequence of transitions -- either interactively or - in batch-mode. In interactive mode, Isar state transitions are - encapsulated as safe transactions, such that both failure and undo - are handled conveniently without destroying the underlying draft - theory (cf.~\secref{sec:context-theory}). In batch mode, - transitions operate in a linear (destructive) fashion, such that - error conditions abort the present attempt to construct a theory or - proof altogether. - - The toplevel state is a disjoint sum of empty \isa{toplevel}, or - \isa{theory}, or \isa{proof}. On entering the main Isar loop we - start with an empty toplevel. A theory is commenced by giving a - \isa{{\isasymTHEORY}} header; within a theory we may issue theory - commands such as \isa{{\isasymDEFINITION}}, or state a \isa{{\isasymTHEOREM}} to be proven. Now we are within a proof state, with a - rich collection of Isar proof commands for structured proof - composition, or unstructured proof scripts. When the proof is - concluded we get back to the theory, which is then updated by - storing the resulting fact. Further theory declarations or theorem - statements with proofs may follow, until we eventually conclude the - theory development by issuing \isa{{\isasymEND}}. The resulting theory - is then stored within the theory database and we are back to the - empty toplevel. - - In addition to these proper state transformations, there are also - some diagnostic commands for peeking at the toplevel state without - modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term}, - \isakeyword{print-cases}).% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{Toplevel.state}\verb|type Toplevel.state| \\ - \indexdef{}{ML}{Toplevel.UNDEF}\verb|Toplevel.UNDEF: exn| \\ - \indexdef{}{ML}{Toplevel.is\_toplevel}\verb|Toplevel.is_toplevel: Toplevel.state -> bool| \\ - \indexdef{}{ML}{Toplevel.theory\_of}\verb|Toplevel.theory_of: Toplevel.state -> theory| \\ - \indexdef{}{ML}{Toplevel.proof\_of}\verb|Toplevel.proof_of: Toplevel.state -> Proof.state| \\ - \indexdef{}{ML}{Toplevel.debug}\verb|Toplevel.debug: bool ref| \\ - \indexdef{}{ML}{Toplevel.timing}\verb|Toplevel.timing: bool ref| \\ - \indexdef{}{ML}{Toplevel.profiling}\verb|Toplevel.profiling: int ref| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Toplevel.state| represents Isar toplevel states, - which are normally manipulated through the concept of toplevel - transitions only (\secref{sec:toplevel-transition}). Also note that - a raw toplevel state is subject to the same linearity restrictions - as a theory context (cf.~\secref{sec:context-theory}). - - \item \verb|Toplevel.UNDEF| is raised for undefined toplevel - operations. Many operations work only partially for certain cases, - since \verb|Toplevel.state| is a sum type. - - \item \verb|Toplevel.is_toplevel|~\isa{state} checks for an empty - toplevel state. - - \item \verb|Toplevel.theory_of|~\isa{state} selects the theory of - a theory or proof (!), otherwise raises \verb|Toplevel.UNDEF|. - - \item \verb|Toplevel.proof_of|~\isa{state} selects the Isar proof - state if available, otherwise raises \verb|Toplevel.UNDEF|. - - \item \verb|set Toplevel.debug| makes the toplevel print further - details about internal error conditions, exceptions being raised - etc. - - \item \verb|set Toplevel.timing| makes the toplevel print timing - information for each Isar command being executed. - - \item \verb|Toplevel.profiling|~\verb|:=|~\isa{n} controls - low-level profiling of the underlying {\ML} runtime system. For - Poly/ML, \isa{n\ {\isacharequal}\ {\isadigit{1}}} means time and \isa{n\ {\isacharequal}\ {\isadigit{2}}} space - profiling. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Toplevel transitions \label{sec:toplevel-transition}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -An Isar toplevel transition consists of a partial function on the - toplevel state, with additional information for diagnostics and - error reporting: there are fields for command name, source position, - optional source text, as well as flags for interactive-only commands - (which issue a warning in batch-mode), printing of result state, - etc. - - The operational part is represented as the sequential union of a - list of partial functions, which are tried in turn until the first - one succeeds. This acts like an outer case-expression for various - alternative state transitions. For example, \isakeyword{qed} acts - differently for a local proofs vs.\ the global ending of the main - proof. - - Toplevel transitions are composed via transition transformers. - Internally, Isar commands are put together from an empty transition - extended by name and source position (and optional source text). It - is then left to the individual command parser to turn the given - concrete syntax into a suitable transition transformer that adjoins - actual operations on a theory or proof state etc.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{Toplevel.print}\verb|Toplevel.print: Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.no\_timing}\verb|Toplevel.no_timing: Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.keep}\verb|Toplevel.keep: (Toplevel.state -> unit) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.theory}\verb|Toplevel.theory: (theory -> theory) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.theory\_to\_proof}\verb|Toplevel.theory_to_proof: (theory -> Proof.state) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.proof}\verb|Toplevel.proof: (Proof.state -> Proof.state) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.proofs}\verb|Toplevel.proofs: (Proof.state -> Proof.state Seq.seq) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \indexdef{}{ML}{Toplevel.end\_proof}\verb|Toplevel.end_proof: (bool -> Proof.state -> Proof.context) ->|\isasep\isanewline% -\verb| Toplevel.transition -> Toplevel.transition| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Toplevel.print|~\isa{tr} sets the print flag, which - causes the toplevel loop to echo the result state (in interactive - mode). - - \item \verb|Toplevel.no_timing|~\isa{tr} indicates that the - transition should never show timing information, e.g.\ because it is - a diagnostic command. - - \item \verb|Toplevel.keep|~\isa{tr} adjoins a diagnostic - function. - - \item \verb|Toplevel.theory|~\isa{tr} adjoins a theory - transformer. - - \item \verb|Toplevel.theory_to_proof|~\isa{tr} adjoins a global - goal function, which turns a theory into a proof state. The theory - may be changed before entering the proof; the generic Isar goal - setup includes an argument that specifies how to apply the proven - result to the theory, when the proof is finished. - - \item \verb|Toplevel.proof|~\isa{tr} adjoins a deterministic - proof command, with a singleton result. - - \item \verb|Toplevel.proofs|~\isa{tr} adjoins a general proof - command, with zero or more result states (represented as a lazy - list). - - \item \verb|Toplevel.end_proof|~\isa{tr} adjoins a concluding - proof command, that returns the resulting theory, after storing the - resulting facts in the context etc. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Toplevel control% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -There are a few special control commands that modify the behavior - the toplevel itself, and only make sense in interactive mode. Under - normal circumstances, the user encounters these only implicitly as - part of the protocol between the Isabelle/Isar system and a - user-interface such as ProofGeneral. - - \begin{description} - - \item \isacommand{undo} follows the three-level hierarchy of empty - toplevel vs.\ theory vs.\ proof: undo within a proof reverts to the - previous proof context, undo after a proof reverts to the theory - before the initial goal statement, undo of a theory command reverts - to the previous theory value, undo of a theory header discontinues - the current theory development and removes it from the theory - database (\secref{sec:theory-database}). - - \item \isacommand{kill} aborts the current level of development: - kill in a proof context reverts to the theory before the initial - goal statement, kill in a theory context aborts the current theory - development, removing it from the database. - - \item \isacommand{exit} drops out of the Isar toplevel into the - underlying {\ML} toplevel (\secref{sec:ML-toplevel}). The Isar - toplevel state is preserved and may be continued later. - - \item \isacommand{quit} terminates the Isabelle/Isar process without - saving. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{ML toplevel \label{sec:ML-toplevel}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The {\ML} toplevel provides a read-compile-eval-print loop for {\ML} - values, types, structures, and functors. {\ML} declarations operate - on the global system state, which consists of the compiler - environment plus the values of {\ML} reference variables. There is - no clean way to undo {\ML} declarations, except for reverting to a - previously saved state of the whole Isabelle process. {\ML} input - is either read interactively from a TTY, or from a string (usually - within a theory text), or from a source file (usually loaded from a - theory). - - Whenever the {\ML} toplevel is active, the current Isabelle theory - context is passed as an internal reference variable. Thus {\ML} - code may access the theory context during compilation, it may even - change the value of a theory being under construction --- while - observing the usual linearity restrictions - (cf.~\secref{sec:context-theory}).% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{the\_context}\verb|the_context: unit -> theory| \\ - \indexdef{}{ML}{Context.$>$$>$ }\verb|Context.>> : (Context.generic -> Context.generic) -> unit| \\ - \end{mldecls} - - \begin{description} - - \item \verb|the_context ()| refers to the theory context of the - {\ML} toplevel --- at compile time! {\ML} code needs to take care - to refer to \verb|the_context ()| correctly. Recall that - evaluation of a function body is delayed until actual runtime. - Moreover, persistent {\ML} toplevel bindings to an unfinished theory - should be avoided: code should either project out the desired - information immediately, or produce an explicit \verb|theory_ref| (cf.\ \secref{sec:context-theory}). - - \item \verb|Context.>>|~\isa{f} applies context transformation - \isa{f} to the implicit context of the {\ML} toplevel. - - \end{description} - - It is very important to note that the above functions are really - restricted to the compile time, even though the {\ML} compiler is - invoked at runtime! The majority of {\ML} code uses explicit - functional arguments of a theory or proof context instead. Thus it - may be invoked for an arbitrary context later on, without having to - worry about any operational details. - - \bigskip - - \begin{mldecls} - \indexdef{}{ML}{Isar.main}\verb|Isar.main: unit -> unit| \\ - \indexdef{}{ML}{Isar.loop}\verb|Isar.loop: unit -> unit| \\ - \indexdef{}{ML}{Isar.state}\verb|Isar.state: unit -> Toplevel.state| \\ - \indexdef{}{ML}{Isar.exn}\verb|Isar.exn: unit -> (exn * string) option| \\ - \indexdef{}{ML}{Isar.context}\verb|Isar.context: unit -> Proof.context| \\ - \indexdef{}{ML}{Isar.goal}\verb|Isar.goal: unit -> thm| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Isar.main ()| invokes the Isar toplevel from {\ML}, - initializing an empty toplevel state. - - \item \verb|Isar.loop ()| continues the Isar toplevel with the - current state, after having dropped out of the Isar toplevel loop. - - \item \verb|Isar.state ()| and \verb|Isar.exn ()| get current - toplevel state and error condition, respectively. This only works - after having dropped out of the Isar toplevel loop. - - \item \verb|Isar.context ()| produces the proof context from \verb|Isar.state ()|, analogous to \verb|Context.proof_of| - (\secref{sec:generic-context}). - - \item \verb|Isar.goal ()| picks the tactical goal from \verb|Isar.state ()|, represented as a theorem according to - \secref{sec:tactical-goals}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Theory database \label{sec:theory-database}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The theory database maintains a collection of theories, together - with some administrative information about their original sources, - which are held in an external store (i.e.\ some directory within the - regular file system). - - The theory database is organized as a directed acyclic graph; - entries are referenced by theory name. Although some additional - interfaces allow to include a directory specification as well, this - is only a hint to the underlying theory loader. The internal theory - name space is flat! - - Theory \isa{A} is associated with the main theory file \isa{A}\verb,.thy,, which needs to be accessible through the theory - loader path. Any number of additional {\ML} source files may be - associated with each theory, by declaring these dependencies in the - theory header as \isa{{\isasymUSES}}, and loading them consecutively - within the theory context. The system keeps track of incoming {\ML} - sources and associates them with the current theory. The file - \isa{A}\verb,.ML, is loaded after a theory has been concluded, in - order to support legacy proof {\ML} proof scripts. - - The basic internal actions of the theory database are \isa{update}, \isa{outdate}, and \isa{remove}: - - \begin{itemize} - - \item \isa{update\ A} introduces a link of \isa{A} with a - \isa{theory} value of the same name; it asserts that the theory - sources are now consistent with that value; - - \item \isa{outdate\ A} invalidates the link of a theory database - entry to its sources, but retains the present theory value; - - \item \isa{remove\ A} deletes entry \isa{A} from the theory - database. - - \end{itemize} - - These actions are propagated to sub- or super-graphs of a theory - entry as expected, in order to preserve global consistency of the - state of all loaded theories with the sources of the external store. - This implies certain causalities between actions: \isa{update} - or \isa{outdate} of an entry will \isa{outdate} all - descendants; \isa{remove} will \isa{remove} all descendants. - - \medskip There are separate user-level interfaces to operate on the - theory database directly or indirectly. The primitive actions then - just happen automatically while working with the system. In - particular, processing a theory header \isa{{\isasymTHEORY}\ A\ {\isasymIMPORTS}\ B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n\ {\isasymBEGIN}} ensures that the - sub-graph of the collective imports \isa{B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n} - is up-to-date, too. Earlier theories are reloaded as required, with - \isa{update} actions proceeding in topological order according to - theory dependencies. There may be also a wave of implied \isa{outdate} actions for derived theory nodes until a stable situation - is achieved eventually.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{theory}\verb|theory: string -> theory| \\ - \indexdef{}{ML}{use\_thy}\verb|use_thy: string -> unit| \\ - \indexdef{}{ML}{use\_thys}\verb|use_thys: string list -> unit| \\ - \indexdef{}{ML}{ThyInfo.touch\_thy}\verb|ThyInfo.touch_thy: string -> unit| \\ - \indexdef{}{ML}{ThyInfo.remove\_thy}\verb|ThyInfo.remove_thy: string -> unit| \\[1ex] - \indexdef{}{ML}{ThyInfo.begin\_theory}\verb|ThyInfo.begin_theory|\verb|: ... -> bool -> theory| \\ - \indexdef{}{ML}{ThyInfo.end\_theory}\verb|ThyInfo.end_theory: theory -> unit| \\ - \indexdef{}{ML}{ThyInfo.register\_theory}\verb|ThyInfo.register_theory: theory -> unit| \\[1ex] - \verb|datatype action = Update |\verb,|,\verb| Outdate |\verb,|,\verb| Remove| \\ - \indexdef{}{ML}{ThyInfo.add\_hook}\verb|ThyInfo.add_hook: (ThyInfo.action -> string -> unit) -> unit| \\ - \end{mldecls} - - \begin{description} - - \item \verb|theory|~\isa{A} retrieves the theory value presently - associated with name \isa{A}. Note that the result might be - outdated. - - \item \verb|use_thy|~\isa{A} ensures that theory \isa{A} is fully - up-to-date wrt.\ the external file store, reloading outdated - ancestors as required. - - \item \verb|use_thys| is similar to \verb|use_thy|, but handles - several theories simultaneously. Thus it acts like processing the - import header of a theory, without performing the merge of the - result, though. - - \item \verb|ThyInfo.touch_thy|~\isa{A} performs and \isa{outdate} action - on theory \isa{A} and all descendants. - - \item \verb|ThyInfo.remove_thy|~\isa{A} deletes theory \isa{A} and all - descendants from the theory database. - - \item \verb|ThyInfo.begin_theory| is the basic operation behind a - \isa{{\isasymTHEORY}} header declaration. This is {\ML} functions is - normally not invoked directly. - - \item \verb|ThyInfo.end_theory| concludes the loading of a theory - proper and stores the result in the theory database. - - \item \verb|ThyInfo.register_theory|~\isa{text\ thy} registers an - existing theory value with the theory loader database. There is no - management of associated sources. - - \item \verb|ThyInfo.add_hook|~\isa{f} registers function \isa{f} as a hook for theory database actions. The function will be - invoked with the action and theory name being involved; thus derived - actions may be performed in associated system components, e.g.\ - maintaining the state of an editor for the theory sources. - - The kind and order of actions occurring in practice depends both on - user interactions and the internal process of resolving theory - imports. Hooks should not rely on a particular policy here! Any - exceptions raised by the hook are ignored. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Isar.tex --- a/doc-src/IsarImplementation/Thy/document/Isar.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,86 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Isar}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Isar\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{Isar language elements% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The primary Isar language consists of three main categories of - language elements: - - \begin{enumerate} - - \item Proof commands - - \item Proof methods - - \item Attributes - - \end{enumerate}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{Proof commands% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -FIXME% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{Proof methods% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -FIXME% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{Attributes% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -FIXME% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Logic.tex --- a/doc-src/IsarImplementation/Thy/document/Logic.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,959 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Logic}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Logic\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{Primitive logic \label{ch:logic}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The logical foundations of Isabelle/Isar are that of the Pure logic, - which has been introduced as a Natural Deduction framework in - \cite{paulson700}. This is essentially the same logic as ``\isa{{\isasymlambda}HOL}'' in the more abstract setting of Pure Type Systems (PTS) - \cite{Barendregt-Geuvers:2001}, although there are some key - differences in the specific treatment of simple types in - Isabelle/Pure. - - Following type-theoretic parlance, the Pure logic consists of three - levels of \isa{{\isasymlambda}}-calculus with corresponding arrows, \isa{{\isasymRightarrow}} for syntactic function space (terms depending on terms), \isa{{\isasymAnd}} for universal quantification (proofs depending on terms), and - \isa{{\isasymLongrightarrow}} for implication (proofs depending on proofs). - - Derivations are relative to a logical theory, which declares type - constructors, constants, and axioms. Theory declarations support - schematic polymorphism, which is strictly speaking outside the - logic.\footnote{This is the deeper logical reason, why the theory - context \isa{{\isasymTheta}} is separate from the proof context \isa{{\isasymGamma}} - of the core calculus.}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{Types \label{sec:types}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The language of types is an uninterpreted order-sorted first-order - algebra; types are qualified by ordered type classes. - - \medskip A \emph{type class} is an abstract syntactic entity - declared in the theory context. The \emph{subclass relation} \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}} is specified by stating an acyclic - generating relation; the transitive closure is maintained - internally. The resulting relation is an ordering: reflexive, - transitive, and antisymmetric. - - A \emph{sort} is a list of type classes written as \isa{s\ {\isacharequal}\ {\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub m{\isacharbraceright}}, which represents symbolic - intersection. Notationally, the curly braces are omitted for - singleton intersections, i.e.\ any class \isa{c} may be read as - a sort \isa{{\isacharbraceleft}c{\isacharbraceright}}. The ordering on type classes is extended to - sorts according to the meaning of intersections: \isa{{\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}\ c\isactrlisub m{\isacharbraceright}\ {\isasymsubseteq}\ {\isacharbraceleft}d\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ d\isactrlisub n{\isacharbraceright}} iff - \isa{{\isasymforall}j{\isachardot}\ {\isasymexists}i{\isachardot}\ c\isactrlisub i\ {\isasymsubseteq}\ d\isactrlisub j}. The empty intersection - \isa{{\isacharbraceleft}{\isacharbraceright}} refers to the universal sort, which is the largest - element wrt.\ the sort order. The intersections of all (finitely - many) classes declared in the current theory are the minimal - elements wrt.\ the sort order. - - \medskip A \emph{fixed type variable} is a pair of a basic name - (starting with a \isa{{\isacharprime}} character) and a sort constraint, e.g.\ - \isa{{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ s{\isacharparenright}} which is usually printed as \isa{{\isasymalpha}\isactrlisub s}. - A \emph{schematic type variable} is a pair of an indexname and a - sort constraint, e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ s{\isacharparenright}} which is usually - printed as \isa{{\isacharquery}{\isasymalpha}\isactrlisub s}. - - Note that \emph{all} syntactic components contribute to the identity - of type variables, including the sort constraint. The core logic - handles type variables with the same name but different sorts as - different, although some outer layers of the system make it hard to - produce anything like this. - - A \emph{type constructor} \isa{{\isasymkappa}} is a \isa{k}-ary operator - on types declared in the theory. Type constructor application is - written postfix as \isa{{\isacharparenleft}{\isasymalpha}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlisub k{\isacharparenright}{\isasymkappa}}. For - \isa{k\ {\isacharequal}\ {\isadigit{0}}} the argument tuple is omitted, e.g.\ \isa{prop} - instead of \isa{{\isacharparenleft}{\isacharparenright}prop}. For \isa{k\ {\isacharequal}\ {\isadigit{1}}} the parentheses - are omitted, e.g.\ \isa{{\isasymalpha}\ list} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharparenright}list}. - Further notation is provided for specific constructors, notably the - right-associative infix \isa{{\isasymalpha}\ {\isasymRightarrow}\ {\isasymbeta}} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharcomma}\ {\isasymbeta}{\isacharparenright}fun}. - - A \emph{type} is defined inductively over type variables and type - constructors as follows: \isa{{\isasymtau}\ {\isacharequal}\ {\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharquery}{\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharparenleft}{\isasymtau}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlsub k{\isacharparenright}{\isasymkappa}}. - - A \emph{type abbreviation} is a syntactic definition \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} of an arbitrary type expression \isa{{\isasymtau}} over - variables \isa{\isactrlvec {\isasymalpha}}. Type abbreviations appear as type - constructors in the syntax, but are expanded before entering the - logical core. - - A \emph{type arity} declares the image behavior of a type - constructor wrt.\ the algebra of sorts: \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}s} means that \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub k{\isacharparenright}{\isasymkappa}} is - of sort \isa{s} if every argument type \isa{{\isasymtau}\isactrlisub i} is - of sort \isa{s\isactrlisub i}. Arity declarations are implicitly - completed, i.e.\ \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c} entails \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c{\isacharprime}} for any \isa{c{\isacharprime}\ {\isasymsupseteq}\ c}. - - \medskip The sort algebra is always maintained as \emph{coregular}, - which means that type arities are consistent with the subclass - relation: for any type constructor \isa{{\isasymkappa}}, and classes \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}, and arities \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{1}}{\isacharparenright}c\isactrlisub {\isadigit{1}}} and \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{2}}{\isacharparenright}c\isactrlisub {\isadigit{2}}} holds \isa{\isactrlvec s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ \isactrlvec s\isactrlisub {\isadigit{2}}} component-wise. - - The key property of a coregular order-sorted algebra is that sort - constraints can be solved in a most general fashion: for each type - constructor \isa{{\isasymkappa}} and sort \isa{s} there is a most general - vector of argument sorts \isa{{\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}} such - that a type scheme \isa{{\isacharparenleft}{\isasymalpha}\isactrlbsub s\isactrlisub {\isadigit{1}}\isactrlesub {\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlbsub s\isactrlisub k\isactrlesub {\isacharparenright}{\isasymkappa}} is of sort \isa{s}. - Consequently, type unification has most general solutions (modulo - equivalence of sorts), so type-inference produces primary types as - expected \cite{nipkow-prehofer}.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{class}\verb|type class| \\ - \indexdef{}{ML type}{sort}\verb|type sort| \\ - \indexdef{}{ML type}{arity}\verb|type arity| \\ - \indexdef{}{ML type}{typ}\verb|type typ| \\ - \indexdef{}{ML}{map\_atyps}\verb|map_atyps: (typ -> typ) -> typ -> typ| \\ - \indexdef{}{ML}{fold\_atyps}\verb|fold_atyps: (typ -> 'a -> 'a) -> typ -> 'a -> 'a| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML}{Sign.subsort}\verb|Sign.subsort: theory -> sort * sort -> bool| \\ - \indexdef{}{ML}{Sign.of\_sort}\verb|Sign.of_sort: theory -> typ * sort -> bool| \\ - \indexdef{}{ML}{Sign.add\_types}\verb|Sign.add_types: (string * int * mixfix) list -> theory -> theory| \\ - \indexdef{}{ML}{Sign.add\_tyabbrs\_i}\verb|Sign.add_tyabbrs_i: |\isasep\isanewline% -\verb| (string * string list * typ * mixfix) list -> theory -> theory| \\ - \indexdef{}{ML}{Sign.primitive\_class}\verb|Sign.primitive_class: string * class list -> theory -> theory| \\ - \indexdef{}{ML}{Sign.primitive\_classrel}\verb|Sign.primitive_classrel: class * class -> theory -> theory| \\ - \indexdef{}{ML}{Sign.primitive\_arity}\verb|Sign.primitive_arity: arity -> theory -> theory| \\ - \end{mldecls} - - \begin{description} - - \item \verb|class| represents type classes; this is an alias for - \verb|string|. - - \item \verb|sort| represents sorts; this is an alias for - \verb|class list|. - - \item \verb|arity| represents type arities; this is an alias for - triples of the form \isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} for \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s} described above. - - \item \verb|typ| represents types; this is a datatype with - constructors \verb|TFree|, \verb|TVar|, \verb|Type|. - - \item \verb|map_atyps|~\isa{f\ {\isasymtau}} applies the mapping \isa{f} - to all atomic types (\verb|TFree|, \verb|TVar|) occurring in \isa{{\isasymtau}}. - - \item \verb|fold_atyps|~\isa{f\ {\isasymtau}} iterates the operation \isa{f} over all occurrences of atomic types (\verb|TFree|, \verb|TVar|) - in \isa{{\isasymtau}}; the type structure is traversed from left to right. - - \item \verb|Sign.subsort|~\isa{thy\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ s\isactrlisub {\isadigit{2}}{\isacharparenright}} - tests the subsort relation \isa{s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ s\isactrlisub {\isadigit{2}}}. - - \item \verb|Sign.of_sort|~\isa{thy\ {\isacharparenleft}{\isasymtau}{\isacharcomma}\ s{\isacharparenright}} tests whether type - \isa{{\isasymtau}} is of sort \isa{s}. - - \item \verb|Sign.add_types|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ k{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares a new - type constructors \isa{{\isasymkappa}} with \isa{k} arguments and - optional mixfix syntax. - - \item \verb|Sign.add_tyabbrs_i|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec {\isasymalpha}{\isacharcomma}\ {\isasymtau}{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} - defines a new type abbreviation \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} with - optional mixfix syntax. - - \item \verb|Sign.primitive_class|~\isa{{\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub n{\isacharbrackright}{\isacharparenright}} declares a new class \isa{c}, together with class - relations \isa{c\ {\isasymsubseteq}\ c\isactrlisub i}, for \isa{i\ {\isacharequal}\ {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ n}. - - \item \verb|Sign.primitive_classrel|~\isa{{\isacharparenleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ c\isactrlisub {\isadigit{2}}{\isacharparenright}} declares the class relation \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}. - - \item \verb|Sign.primitive_arity|~\isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} declares - the arity \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Terms \label{sec:terms}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The language of terms is that of simply-typed \isa{{\isasymlambda}}-calculus - with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72} - or \cite{paulson-ml2}), with the types being determined by the - corresponding binders. In contrast, free variables and constants - are have an explicit name and type in each occurrence. - - \medskip A \emph{bound variable} is a natural number \isa{b}, - which accounts for the number of intermediate binders between the - variable occurrence in the body and its binding position. For - example, the de-Bruijn term \isa{{\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isadigit{1}}\ {\isacharplus}\ {\isadigit{0}}} would - correspond to \isa{{\isasymlambda}x\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}y\isactrlbsub nat\isactrlesub {\isachardot}\ x\ {\isacharplus}\ y} in a named - representation. Note that a bound variable may be represented by - different de-Bruijn indices at different occurrences, depending on - the nesting of abstractions. - - A \emph{loose variable} is a bound variable that is outside the - scope of local binders. The types (and names) for loose variables - can be managed as a separate context, that is maintained as a stack - of hypothetical binders. The core logic operates on closed terms, - without any loose variables. - - A \emph{fixed variable} is a pair of a basic name and a type, e.g.\ - \isa{{\isacharparenleft}x{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed \isa{x\isactrlisub {\isasymtau}}. A - \emph{schematic variable} is a pair of an indexname and a type, - e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}x{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{{\isacharquery}x\isactrlisub {\isasymtau}}. - - \medskip A \emph{constant} is a pair of a basic name and a type, - e.g.\ \isa{{\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{c\isactrlisub {\isasymtau}}. Constants are declared in the context as polymorphic - families \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}, meaning that all substitution instances - \isa{c\isactrlisub {\isasymtau}} for \isa{{\isasymtau}\ {\isacharequal}\ {\isasymsigma}{\isasymvartheta}} are valid. - - The vector of \emph{type arguments} of constant \isa{c\isactrlisub {\isasymtau}} - wrt.\ the declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is defined as the codomain of - the matcher \isa{{\isasymvartheta}\ {\isacharequal}\ {\isacharbraceleft}{\isacharquery}{\isasymalpha}\isactrlisub {\isadigit{1}}\ {\isasymmapsto}\ {\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isacharquery}{\isasymalpha}\isactrlisub n\ {\isasymmapsto}\ {\isasymtau}\isactrlisub n{\isacharbraceright}} presented in canonical order \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}}. Within a given theory context, - there is a one-to-one correspondence between any constant \isa{c\isactrlisub {\isasymtau}} and the application \isa{c{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}} of its type arguments. For example, with \isa{plus\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}}, the instance \isa{plus\isactrlbsub nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ nat\isactrlesub } corresponds to \isa{plus{\isacharparenleft}nat{\isacharparenright}}. - - Constant declarations \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} may contain sort constraints - for type variables in \isa{{\isasymsigma}}. These are observed by - type-inference as expected, but \emph{ignored} by the core logic. - This means the primitive logic is able to reason with instances of - polymorphic constants that the user-level type-checker would reject - due to violation of type class restrictions. - - \medskip An \emph{atomic} term is either a variable or constant. A - \emph{term} is defined inductively over atomic terms, with - abstraction and application as follows: \isa{t\ {\isacharequal}\ b\ {\isacharbar}\ x\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isacharquery}x\isactrlisub {\isasymtau}\ {\isacharbar}\ c\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isasymlambda}\isactrlisub {\isasymtau}{\isachardot}\ t\ {\isacharbar}\ t\isactrlisub {\isadigit{1}}\ t\isactrlisub {\isadigit{2}}}. - Parsing and printing takes care of converting between an external - representation with named bound variables. Subsequently, we shall - use the latter notation instead of internal de-Bruijn - representation. - - The inductive relation \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} assigns a (unique) type to a - term according to the structure of atomic terms, abstractions, and - applicatins: - \[ - \infer{\isa{a\isactrlisub {\isasymtau}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}}{} - \qquad - \infer{\isa{{\isacharparenleft}{\isasymlambda}x\isactrlsub {\isasymtau}{\isachardot}\ t{\isacharparenright}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}} - \qquad - \infer{\isa{t\ u\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}} & \isa{u\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}} - \] - A \emph{well-typed term} is a term that can be typed according to these rules. - - Typing information can be omitted: type-inference is able to - reconstruct the most general type of a raw term, while assigning - most general types to all of its variables and constants. - Type-inference depends on a context of type constraints for fixed - variables, and declarations for polymorphic constants. - - The identity of atomic terms consists both of the name and the type - component. This means that different variables \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{1}}\isactrlesub } and \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{2}}\isactrlesub } may become the same after type - instantiation. Some outer layers of the system make it hard to - produce variables of the same name, but different types. In - contrast, mixed instances of polymorphic constants occur frequently. - - \medskip The \emph{hidden polymorphism} of a term \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} - is the set of type variables occurring in \isa{t}, but not in - \isa{{\isasymsigma}}. This means that the term implicitly depends on type - arguments that are not accounted in the result type, i.e.\ there are - different type instances \isa{t{\isasymvartheta}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} and \isa{t{\isasymvartheta}{\isacharprime}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with the same type. This slightly - pathological situation notoriously demands additional care. - - \medskip A \emph{term abbreviation} is a syntactic definition \isa{c\isactrlisub {\isasymsigma}\ {\isasymequiv}\ t} of a closed term \isa{t} of type \isa{{\isasymsigma}}, - without any hidden polymorphism. A term abbreviation looks like a - constant in the syntax, but is expanded before entering the logical - core. Abbreviations are usually reverted when printing terms, using - \isa{t\ {\isasymrightarrow}\ c\isactrlisub {\isasymsigma}} as rules for higher-order rewriting. - - \medskip Canonical operations on \isa{{\isasymlambda}}-terms include \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion: \isa{{\isasymalpha}}-conversion refers to capture-free - renaming of bound variables; \isa{{\isasymbeta}}-conversion contracts an - abstraction applied to an argument term, substituting the argument - in the body: \isa{{\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharparenright}a} becomes \isa{b{\isacharbrackleft}a{\isacharslash}x{\isacharbrackright}}; \isa{{\isasymeta}}-conversion contracts vacuous application-abstraction: \isa{{\isasymlambda}x{\isachardot}\ f\ x} becomes \isa{f}, provided that the bound variable - does not occur in \isa{f}. - - Terms are normally treated modulo \isa{{\isasymalpha}}-conversion, which is - implicit in the de-Bruijn representation. Names for bound variables - in abstractions are maintained separately as (meaningless) comments, - mostly for parsing and printing. Full \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion is - commonplace in various standard operations (\secref{sec:obj-rules}) - that are based on higher-order unification and matching.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{term}\verb|type term| \\ - \indexdef{}{ML}{op aconv}\verb|op aconv: term * term -> bool| \\ - \indexdef{}{ML}{map\_types}\verb|map_types: (typ -> typ) -> term -> term| \\ - \indexdef{}{ML}{fold\_types}\verb|fold_types: (typ -> 'a -> 'a) -> term -> 'a -> 'a| \\ - \indexdef{}{ML}{map\_aterms}\verb|map_aterms: (term -> term) -> term -> term| \\ - \indexdef{}{ML}{fold\_aterms}\verb|fold_aterms: (term -> 'a -> 'a) -> term -> 'a -> 'a| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML}{fastype\_of}\verb|fastype_of: term -> typ| \\ - \indexdef{}{ML}{lambda}\verb|lambda: term -> term -> term| \\ - \indexdef{}{ML}{betapply}\verb|betapply: term * term -> term| \\ - \indexdef{}{ML}{Sign.declare\_const}\verb|Sign.declare_const: Properties.T -> (binding * typ) * mixfix ->|\isasep\isanewline% -\verb| theory -> term * theory| \\ - \indexdef{}{ML}{Sign.add\_abbrev}\verb|Sign.add_abbrev: string -> Properties.T -> binding * term ->|\isasep\isanewline% -\verb| theory -> (term * term) * theory| \\ - \indexdef{}{ML}{Sign.const\_typargs}\verb|Sign.const_typargs: theory -> string * typ -> typ list| \\ - \indexdef{}{ML}{Sign.const\_instance}\verb|Sign.const_instance: theory -> string * typ list -> typ| \\ - \end{mldecls} - - \begin{description} - - \item \verb|term| represents de-Bruijn terms, with comments in - abstractions, and explicitly named free variables and constants; - this is a datatype with constructors \verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|, \verb|Abs|, \verb|op $|. - - \item \isa{t}~\verb|aconv|~\isa{u} checks \isa{{\isasymalpha}}-equivalence of two terms. This is the basic equality relation - on type \verb|term|; raw datatype equality should only be used - for operations related to parsing or printing! - - \item \verb|map_types|~\isa{f\ t} applies the mapping \isa{f} to all types occurring in \isa{t}. - - \item \verb|fold_types|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of types in \isa{t}; the term - structure is traversed from left to right. - - \item \verb|map_aterms|~\isa{f\ t} applies the mapping \isa{f} - to all atomic terms (\verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|) occurring in \isa{t}. - - \item \verb|fold_aterms|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of atomic terms (\verb|Bound|, \verb|Free|, - \verb|Var|, \verb|Const|) in \isa{t}; the term structure is - traversed from left to right. - - \item \verb|fastype_of|~\isa{t} determines the type of a - well-typed term. This operation is relatively slow, despite the - omission of any sanity checks. - - \item \verb|lambda|~\isa{a\ b} produces an abstraction \isa{{\isasymlambda}a{\isachardot}\ b}, where occurrences of the atomic term \isa{a} in the - body \isa{b} are replaced by bound variables. - - \item \verb|betapply|~\isa{{\isacharparenleft}t{\isacharcomma}\ u{\isacharparenright}} produces an application \isa{t\ u}, with topmost \isa{{\isasymbeta}}-conversion if \isa{t} is an - abstraction. - - \item \verb|Sign.declare_const|~\isa{properties\ {\isacharparenleft}{\isacharparenleft}c{\isacharcomma}\ {\isasymsigma}{\isacharparenright}{\isacharcomma}\ mx{\isacharparenright}} - declares a new constant \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with optional mixfix - syntax. - - \item \verb|Sign.add_abbrev|~\isa{print{\isacharunderscore}mode\ properties\ {\isacharparenleft}c{\isacharcomma}\ t{\isacharparenright}} - introduces a new term abbreviation \isa{c\ {\isasymequiv}\ t}. - - \item \verb|Sign.const_typargs|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} and \verb|Sign.const_instance|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharbrackright}{\isacharparenright}} - convert between two representations of polymorphic constants: full - type instance vs.\ compact type arguments form. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Theorems \label{sec:thms}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{proposition} is a well-typed term of type \isa{prop}, a - \emph{theorem} is a proven proposition (depending on a context of - hypotheses and the background theory). Primitive inferences include - plain Natural Deduction rules for the primary connectives \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} of the framework. There is also a builtin - notion of equality/equivalence \isa{{\isasymequiv}}.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsubsection{Primitive connectives and rules \label{sec:prim-rules}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The theory \isa{Pure} contains constant declarations for the - primitive connectives \isa{{\isasymAnd}}, \isa{{\isasymLongrightarrow}}, and \isa{{\isasymequiv}} of - the logical framework, see \figref{fig:pure-connectives}. The - derivability judgment \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} is - defined inductively by the primitive inferences given in - \figref{fig:prim-rules}, with the global restriction that the - hypotheses must \emph{not} contain any schematic variables. The - builtin equality is conceptually axiomatized as shown in - \figref{fig:pure-equality}, although the implementation works - directly with derived inferences. - - \begin{figure}[htb] - \begin{center} - \begin{tabular}{ll} - \isa{all\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}{\isasymalpha}\ {\isasymRightarrow}\ prop{\isacharparenright}\ {\isasymRightarrow}\ prop} & universal quantification (binder \isa{{\isasymAnd}}) \\ - \isa{{\isasymLongrightarrow}\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & implication (right associative infix) \\ - \isa{{\isasymequiv}\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & equality relation (infix) \\ - \end{tabular} - \caption{Primitive connectives of Pure}\label{fig:pure-connectives} - \end{center} - \end{figure} - - \begin{figure}[htb] - \begin{center} - \[ - \infer[\isa{{\isacharparenleft}axiom{\isacharparenright}}]{\isa{{\isasymturnstile}\ A}}{\isa{A\ {\isasymin}\ {\isasymTheta}}} - \qquad - \infer[\isa{{\isacharparenleft}assume{\isacharparenright}}]{\isa{A\ {\isasymturnstile}\ A}}{} - \] - \[ - \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}} - \qquad - \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}a{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}} - \] - \[ - \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}} - \qquad - \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B}}{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B} & \isa{{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A}} - \] - \caption{Primitive inferences of Pure}\label{fig:prim-rules} - \end{center} - \end{figure} - - \begin{figure}[htb] - \begin{center} - \begin{tabular}{ll} - \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}{\isacharparenright}\ a\ {\isasymequiv}\ b{\isacharbrackleft}a{\isacharbrackright}} & \isa{{\isasymbeta}}-conversion \\ - \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ x} & reflexivity \\ - \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ y\ {\isasymLongrightarrow}\ P\ x\ {\isasymLongrightarrow}\ P\ y} & substitution \\ - \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ f\ x\ {\isasymequiv}\ g\ x{\isacharparenright}\ {\isasymLongrightarrow}\ f\ {\isasymequiv}\ g} & extensionality \\ - \isa{{\isasymturnstile}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymequiv}\ B} & logical equivalence \\ - \end{tabular} - \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality} - \end{center} - \end{figure} - - The introduction and elimination rules for \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} are analogous to formation of dependently typed \isa{{\isasymlambda}}-terms representing the underlying proof objects. Proof terms - are irrelevant in the Pure logic, though; they cannot occur within - propositions. The system provides a runtime option to record - explicit proof terms for primitive inferences. Thus all three - levels of \isa{{\isasymlambda}}-calculus become explicit: \isa{{\isasymRightarrow}} for - terms, and \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} for proofs (cf.\ - \cite{Berghofer-Nipkow:2000:TPHOL}). - - Observe that locally fixed parameters (as in \isa{{\isasymAnd}{\isacharunderscore}intro}) need - not be recorded in the hypotheses, because the simple syntactic - types of Pure are always inhabitable. ``Assumptions'' \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} for type-membership are only present as long as some \isa{x\isactrlisub {\isasymtau}} occurs in the statement body.\footnote{This is the key - difference to ``\isa{{\isasymlambda}HOL}'' in the PTS framework - \cite{Barendregt-Geuvers:2001}, where hypotheses \isa{x\ {\isacharcolon}\ A} are - treated uniformly for propositions and types.} - - \medskip The axiomatization of a theory is implicitly closed by - forming all instances of type and term variables: \isa{{\isasymturnstile}\ A{\isasymvartheta}} holds for any substitution instance of an axiom - \isa{{\isasymturnstile}\ A}. By pushing substitutions through derivations - inductively, we also get admissible \isa{generalize} and \isa{instance} rules as shown in \figref{fig:subst-rules}. - - \begin{figure}[htb] - \begin{center} - \[ - \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} & \isa{{\isasymalpha}\ {\isasymnotin}\ {\isasymGamma}}} - \quad - \infer[\quad\isa{{\isacharparenleft}generalize{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}} - \] - \[ - \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymtau}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}} - \quad - \infer[\quad\isa{{\isacharparenleft}instantiate{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}t{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}} - \] - \caption{Admissible substitution rules}\label{fig:subst-rules} - \end{center} - \end{figure} - - Note that \isa{instantiate} does not require an explicit - side-condition, because \isa{{\isasymGamma}} may never contain schematic - variables. - - In principle, variables could be substituted in hypotheses as well, - but this would disrupt the monotonicity of reasoning: deriving - \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymturnstile}\ B{\isasymvartheta}} from \isa{{\isasymGamma}\ {\isasymturnstile}\ B} is - correct, but \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymsupseteq}\ {\isasymGamma}} does not necessarily hold: - the result belongs to a different proof context. - - \medskip An \emph{oracle} is a function that produces axioms on the - fly. Logically, this is an instance of the \isa{axiom} rule - (\figref{fig:prim-rules}), but there is an operational difference. - The system always records oracle invocations within derivations of - theorems by a unique tag. - - Axiomatizations should be limited to the bare minimum, typically as - part of the initial logical basis of an object-logic formalization. - Later on, theories are usually developed in a strictly definitional - fashion, by stating only certain equalities over new constants. - - A \emph{simple definition} consists of a constant declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} together with an axiom \isa{{\isasymturnstile}\ c\ {\isasymequiv}\ t}, where \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is a closed term without any hidden polymorphism. The RHS - may depend on further defined constants, but not \isa{c} itself. - Definitions of functions may be presented as \isa{c\ \isactrlvec x\ {\isasymequiv}\ t} instead of the puristic \isa{c\ {\isasymequiv}\ {\isasymlambda}\isactrlvec x{\isachardot}\ t}. - - An \emph{overloaded definition} consists of a collection of axioms - for the same constant, with zero or one equations \isa{c{\isacharparenleft}{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}{\isacharparenright}\ {\isasymequiv}\ t} for each type constructor \isa{{\isasymkappa}} (for - distinct variables \isa{\isactrlvec {\isasymalpha}}). The RHS may mention - previously defined constants as above, or arbitrary constants \isa{d{\isacharparenleft}{\isasymalpha}\isactrlisub i{\isacharparenright}} for some \isa{{\isasymalpha}\isactrlisub i} projected from \isa{\isactrlvec {\isasymalpha}}. Thus overloaded definitions essentially work by - primitive recursion over the syntactic structure of a single type - argument.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{ctyp}\verb|type ctyp| \\ - \indexdef{}{ML type}{cterm}\verb|type cterm| \\ - \indexdef{}{ML}{Thm.ctyp\_of}\verb|Thm.ctyp_of: theory -> typ -> ctyp| \\ - \indexdef{}{ML}{Thm.cterm\_of}\verb|Thm.cterm_of: theory -> term -> cterm| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{thm}\verb|type thm| \\ - \indexdef{}{ML}{proofs}\verb|proofs: int ref| \\ - \indexdef{}{ML}{Thm.assume}\verb|Thm.assume: cterm -> thm| \\ - \indexdef{}{ML}{Thm.forall\_intr}\verb|Thm.forall_intr: cterm -> thm -> thm| \\ - \indexdef{}{ML}{Thm.forall\_elim}\verb|Thm.forall_elim: cterm -> thm -> thm| \\ - \indexdef{}{ML}{Thm.implies\_intr}\verb|Thm.implies_intr: cterm -> thm -> thm| \\ - \indexdef{}{ML}{Thm.implies\_elim}\verb|Thm.implies_elim: thm -> thm -> thm| \\ - \indexdef{}{ML}{Thm.generalize}\verb|Thm.generalize: string list * string list -> int -> thm -> thm| \\ - \indexdef{}{ML}{Thm.instantiate}\verb|Thm.instantiate: (ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm| \\ - \indexdef{}{ML}{Thm.axiom}\verb|Thm.axiom: theory -> string -> thm| \\ - \indexdef{}{ML}{Thm.add\_oracle}\verb|Thm.add_oracle: bstring * ('a -> cterm) -> theory|\isasep\isanewline% -\verb| -> (string * ('a -> thm)) * theory| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML}{Theory.add\_axioms\_i}\verb|Theory.add_axioms_i: (binding * term) list -> theory -> theory| \\ - \indexdef{}{ML}{Theory.add\_deps}\verb|Theory.add_deps: string -> string * typ -> (string * typ) list -> theory -> theory| \\ - \indexdef{}{ML}{Theory.add\_defs\_i}\verb|Theory.add_defs_i: bool -> bool -> (binding * term) list -> theory -> theory| \\ - \end{mldecls} - - \begin{description} - - \item \verb|ctyp| and \verb|cterm| represent certified types - and terms, respectively. These are abstract datatypes that - guarantee that its values have passed the full well-formedness (and - well-typedness) checks, relative to the declarations of type - constructors, constants etc. in the theory. - - \item \verb|Thm.ctyp_of|~\isa{thy\ {\isasymtau}} and \verb|Thm.cterm_of|~\isa{thy\ t} explicitly checks types and terms, - respectively. This also involves some basic normalizations, such - expansion of type and term abbreviations from the theory context. - - Re-certification is relatively slow and should be avoided in tight - reasoning loops. There are separate operations to decompose - certified entities (including actual theorems). - - \item \verb|thm| represents proven propositions. This is an - abstract datatype that guarantees that its values have been - constructed by basic principles of the \verb|Thm| module. - Every \verb|thm| value contains a sliding back-reference to the - enclosing theory, cf.\ \secref{sec:context-theory}. - - \item \verb|proofs| determines the detail of proof recording within - \verb|thm| values: \verb|0| records only the names of oracles, - \verb|1| records oracle names and propositions, \verb|2| additionally - records full proof terms. Officially named theorems that contribute - to a result are always recorded. - - \item \verb|Thm.assume|, \verb|Thm.forall_intr|, \verb|Thm.forall_elim|, \verb|Thm.implies_intr|, and \verb|Thm.implies_elim| - correspond to the primitive inferences of \figref{fig:prim-rules}. - - \item \verb|Thm.generalize|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharcomma}\ \isactrlvec x{\isacharparenright}} - corresponds to the \isa{generalize} rules of - \figref{fig:subst-rules}. Here collections of type and term - variables are generalized simultaneously, specified by the given - basic names. - - \item \verb|Thm.instantiate|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}\isactrlisub s{\isacharcomma}\ \isactrlvec x\isactrlisub {\isasymtau}{\isacharparenright}} corresponds to the \isa{instantiate} rules - of \figref{fig:subst-rules}. Type variables are substituted before - term variables. Note that the types in \isa{\isactrlvec x\isactrlisub {\isasymtau}} - refer to the instantiated versions. - - \item \verb|Thm.axiom|~\isa{thy\ name} retrieves a named - axiom, cf.\ \isa{axiom} in \figref{fig:prim-rules}. - - \item \verb|Thm.add_oracle|~\isa{{\isacharparenleft}name{\isacharcomma}\ oracle{\isacharparenright}} produces a named - oracle rule, essentially generating arbitrary axioms on the fly, - cf.\ \isa{axiom} in \figref{fig:prim-rules}. - - \item \verb|Theory.add_axioms_i|~\isa{{\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ A{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares - arbitrary propositions as axioms. - - \item \verb|Theory.add_deps|~\isa{name\ c\isactrlisub {\isasymtau}\ \isactrlvec d\isactrlisub {\isasymsigma}} declares dependencies of a named specification - for constant \isa{c\isactrlisub {\isasymtau}}, relative to existing - specifications for constants \isa{\isactrlvec d\isactrlisub {\isasymsigma}}. - - \item \verb|Theory.add_defs_i|~\isa{unchecked\ overloaded\ {\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ c\ \isactrlvec x\ {\isasymequiv}\ t{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} states a definitional axiom for an existing - constant \isa{c}. Dependencies are recorded (cf.\ \verb|Theory.add_deps|), unless the \isa{unchecked} option is set. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Auxiliary definitions% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Theory \isa{Pure} provides a few auxiliary definitions, see - \figref{fig:pure-aux}. These special constants are normally not - exposed to the user, but appear in internal encodings. - - \begin{figure}[htb] - \begin{center} - \begin{tabular}{ll} - \isa{conjunction\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & (infix \isa{{\isacharampersand}}) \\ - \isa{{\isasymturnstile}\ A\ {\isacharampersand}\ B\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}C{\isachardot}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isacharparenright}} \\[1ex] - \isa{prop\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop} & (prefix \isa{{\isacharhash}}, suppressed) \\ - \isa{{\isacharhash}A\ {\isasymequiv}\ A} \\[1ex] - \isa{term\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & (prefix \isa{TERM}) \\ - \isa{term\ x\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}A{\isachardot}\ A\ {\isasymLongrightarrow}\ A{\isacharparenright}} \\[1ex] - \isa{TYPE\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself} & (prefix \isa{TYPE}) \\ - \isa{{\isacharparenleft}unspecified{\isacharparenright}} \\ - \end{tabular} - \caption{Definitions of auxiliary connectives}\label{fig:pure-aux} - \end{center} - \end{figure} - - Derived conjunction rules include introduction \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isacharampersand}\ B}, and destructions \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ A} and \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ B}. - Conjunction allows to treat simultaneous assumptions and conclusions - uniformly. For example, multiple claims are intermediately - represented as explicit conjunction, but this is refined into - separate sub-goals before the user continues the proof; the final - result is projected into a list of theorems (cf.\ - \secref{sec:tactical-goals}). - - The \isa{prop} marker (\isa{{\isacharhash}}) makes arbitrarily complex - propositions appear as atomic, without changing the meaning: \isa{{\isasymGamma}\ {\isasymturnstile}\ A} and \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isacharhash}A} are interchangeable. See - \secref{sec:tactical-goals} for specific operations. - - The \isa{term} marker turns any well-typed term into a derivable - proposition: \isa{{\isasymturnstile}\ TERM\ t} holds unconditionally. Although - this is logically vacuous, it allows to treat terms and proofs - uniformly, similar to a type-theoretic framework. - - The \isa{TYPE} constructor is the canonical representative of - the unspecified type \isa{{\isasymalpha}\ itself}; it essentially injects the - language of types into that of terms. There is specific notation - \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} for \isa{TYPE\isactrlbsub {\isasymtau}\ itself\isactrlesub }. - Although being devoid of any particular meaning, the \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} accounts for the type \isa{{\isasymtau}} within the term - language. In particular, \isa{TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}} may be used as formal - argument in primitive definitions, in order to circumvent hidden - polymorphism (cf.\ \secref{sec:terms}). For example, \isa{c\ TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}\ {\isasymequiv}\ A{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} defines \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself\ {\isasymRightarrow}\ prop} in terms of - a proposition \isa{A} that depends on an additional type - argument, which is essentially a predicate on types.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{Conjunction.intr}\verb|Conjunction.intr: thm -> thm -> thm| \\ - \indexdef{}{ML}{Conjunction.elim}\verb|Conjunction.elim: thm -> thm * thm| \\ - \indexdef{}{ML}{Drule.mk\_term}\verb|Drule.mk_term: cterm -> thm| \\ - \indexdef{}{ML}{Drule.dest\_term}\verb|Drule.dest_term: thm -> cterm| \\ - \indexdef{}{ML}{Logic.mk\_type}\verb|Logic.mk_type: typ -> term| \\ - \indexdef{}{ML}{Logic.dest\_type}\verb|Logic.dest_type: term -> typ| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Conjunction.intr| derives \isa{A\ {\isacharampersand}\ B} from \isa{A} and \isa{B}. - - \item \verb|Conjunction.elim| derives \isa{A} and \isa{B} - from \isa{A\ {\isacharampersand}\ B}. - - \item \verb|Drule.mk_term| derives \isa{TERM\ t}. - - \item \verb|Drule.dest_term| recovers term \isa{t} from \isa{TERM\ t}. - - \item \verb|Logic.mk_type|~\isa{{\isasymtau}} produces the term \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}}. - - \item \verb|Logic.dest_type|~\isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} recovers the type - \isa{{\isasymtau}}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Object-level rules \label{sec:obj-rules}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The primitive inferences covered so far mostly serve foundational - purposes. User-level reasoning usually works via object-level rules - that are represented as theorems of Pure. Composition of rules - involves \emph{backchaining}, \emph{higher-order unification} modulo - \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion of \isa{{\isasymlambda}}-terms, and so-called - \emph{lifting} of rules into a context of \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} connectives. Thus the full power of higher-order Natural - Deduction in Isabelle/Pure becomes readily available.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsubsection{Hereditary Harrop Formulae% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The idea of object-level rules is to model Natural Deduction - inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow - arbitrary nesting similar to \cite{extensions91}. The most basic - rule format is that of a \emph{Horn Clause}: - \[ - \infer{\isa{A}}{\isa{A\isactrlsub {\isadigit{1}}} & \isa{{\isasymdots}} & \isa{A\isactrlsub n}} - \] - where \isa{A{\isacharcomma}\ A\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlsub n} are atomic propositions - of the framework, usually of the form \isa{Trueprop\ B}, where - \isa{B} is a (compound) object-level statement. This - object-level inference corresponds to an iterated implication in - Pure like this: - \[ - \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlsub n\ {\isasymLongrightarrow}\ A} - \] - As an example consider conjunction introduction: \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isasymand}\ B}. Any parameters occurring in such rule statements are - conceptionally treated as arbitrary: - \[ - \isa{{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ A\isactrlsub {\isadigit{1}}\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlsub n\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymLongrightarrow}\ A\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m} - \] - - Nesting of rules means that the positions of \isa{A\isactrlsub i} may - again hold compound rules, not just atomic propositions. - Propositions of this format are called \emph{Hereditary Harrop - Formulae} in the literature \cite{Miller:1991}. Here we give an - inductive characterization as follows: - - \medskip - \begin{tabular}{ll} - \isa{\isactrlbold x} & set of variables \\ - \isa{\isactrlbold A} & set of atomic propositions \\ - \isa{\isactrlbold H\ \ {\isacharequal}\ \ {\isasymAnd}\isactrlbold x\isactrlsup {\isacharasterisk}{\isachardot}\ \isactrlbold H\isactrlsup {\isacharasterisk}\ {\isasymLongrightarrow}\ \isactrlbold A} & set of Hereditary Harrop Formulas \\ - \end{tabular} - \medskip - - \noindent Thus we essentially impose nesting levels on propositions - formed from \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}}. At each level there is a - prefix of parameters and compound premises, concluding an atomic - proposition. Typical examples are \isa{{\isasymlongrightarrow}}-introduction \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymlongrightarrow}\ B} or mathematical induction \isa{P\ {\isadigit{0}}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}n{\isachardot}\ P\ n\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}Suc\ n{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ P\ n}. Even deeper nesting occurs in well-founded - induction \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ {\isacharparenleft}{\isasymAnd}y{\isachardot}\ y\ {\isasymprec}\ x\ {\isasymLongrightarrow}\ P\ y{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x}, but this - already marks the limit of rule complexity seen in practice. - - \medskip Regular user-level inferences in Isabelle/Pure always - maintain the following canonical form of results: - - \begin{itemize} - - \item Normalization by \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}{\isacharparenright}\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ A\ {\isasymLongrightarrow}\ B\ x{\isacharparenright}}, - which is a theorem of Pure, means that quantifiers are pushed in - front of implication at each level of nesting. The normal form is a - Hereditary Harrop Formula. - - \item The outermost prefix of parameters is represented via - schematic variables: instead of \isa{{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x} we have \isa{\isactrlvec H\ {\isacharquery}\isactrlvec x\ {\isasymLongrightarrow}\ A\ {\isacharquery}\isactrlvec x}. - Note that this representation looses information about the order of - parameters, and vacuous quantifiers vanish automatically. - - \end{itemize}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{MetaSimplifier.norm\_hhf}\verb|MetaSimplifier.norm_hhf: thm -> thm| \\ - \end{mldecls} - - \begin{description} - - \item \verb|MetaSimplifier.norm_hhf|~\isa{thm} normalizes the given - theorem according to the canonical form specified above. This is - occasionally helpful to repair some low-level tools that do not - handle Hereditary Harrop Formulae properly. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Rule composition% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The rule calculus of Isabelle/Pure provides two main inferences: - \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} (i.e.\ back-chaining of rules) and - \hyperlink{inference.assumption}{\mbox{\isa{assumption}}} (i.e.\ closing a branch), both modulo - higher-order unification. There are also combined variants, notably - \hyperlink{inference.elim-resolution}{\mbox{\isa{elim{\isacharunderscore}resolution}}} and \hyperlink{inference.dest-resolution}{\mbox{\isa{dest{\isacharunderscore}resolution}}}. - - To understand the all-important \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle, - we first consider raw \indexdef{}{inference}{composition}\hypertarget{inference.composition}{\hyperlink{inference.composition}{\mbox{\isa{composition}}}} (modulo - higher-order unification with substitution \isa{{\isasymvartheta}}): - \[ - \infer[(\indexdef{}{inference}{composition}\hypertarget{inference.composition}{\hyperlink{inference.composition}{\mbox{\isa{composition}}}})]{\isa{\isactrlvec A{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}} - {\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B} & \isa{B{\isacharprime}\ {\isasymLongrightarrow}\ C} & \isa{B{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}}} - \] - Here the conclusion of the first rule is unified with the premise of - the second; the resulting rule instance inherits the premises of the - first and conclusion of the second. Note that \isa{C} can again - consist of iterated implications. We can also permute the premises - of the second rule back-and-forth in order to compose with \isa{B{\isacharprime}} in any position (subsequently we shall always refer to - position 1 w.l.o.g.). - - In \hyperlink{inference.composition}{\mbox{\isa{composition}}} the internal structure of the common - part \isa{B} and \isa{B{\isacharprime}} is not taken into account. For - proper \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} we require \isa{B} to be atomic, - and explicitly observe the structure \isa{{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x} of the premise of the second rule. The - idea is to adapt the first rule by ``lifting'' it into this context, - by means of iterated application of the following inferences: - \[ - \infer[(\indexdef{}{inference}{imp\_lift}\hypertarget{inference.imp-lift}{\hyperlink{inference.imp-lift}{\mbox{\isa{imp{\isacharunderscore}lift}}}})]{\isa{{\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ \isactrlvec A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ B{\isacharparenright}}}{\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B}} - \] - \[ - \infer[(\indexdef{}{inference}{all\_lift}\hypertarget{inference.all-lift}{\hyperlink{inference.all-lift}{\mbox{\isa{all{\isacharunderscore}lift}}}})]{\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}}}{\isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a}} - \] - By combining raw composition with lifting, we get full \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} as follows: - \[ - \infer[(\indexdef{}{inference}{resolution}\hypertarget{inference.resolution}{\hyperlink{inference.resolution}{\mbox{\isa{resolution}}}})] - {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}} - {\begin{tabular}{l} - \isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a} \\ - \isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} \\ - \isa{{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}} \\ - \end{tabular}} - \] - - Continued resolution of rules allows to back-chain a problem towards - more and sub-problems. Branches are closed either by resolving with - a rule of 0 premises, or by producing a ``short-circuit'' within a - solved situation (again modulo unification): - \[ - \infer[(\indexdef{}{inference}{assumption}\hypertarget{inference.assumption}{\hyperlink{inference.assumption}{\mbox{\isa{assumption}}}})]{\isa{C{\isasymvartheta}}} - {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} & \isa{A{\isasymvartheta}\ {\isacharequal}\ H\isactrlsub i{\isasymvartheta}}~~\text{(for some~\isa{i})}} - \] - - FIXME \indexdef{}{inference}{elim\_resolution}\hypertarget{inference.elim-resolution}{\hyperlink{inference.elim-resolution}{\mbox{\isa{elim{\isacharunderscore}resolution}}}}, \indexdef{}{inference}{dest\_resolution}\hypertarget{inference.dest-resolution}{\hyperlink{inference.dest-resolution}{\mbox{\isa{dest{\isacharunderscore}resolution}}}}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{op RS}\verb|op RS: thm * thm -> thm| \\ - \indexdef{}{ML}{op OF}\verb|op OF: thm * thm list -> thm| \\ - \end{mldecls} - - \begin{description} - - \item \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}} resolves \isa{rule\isactrlsub {\isadigit{1}}} with \isa{rule\isactrlsub {\isadigit{2}}} according to the - \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle explained above. Note that the - corresponding attribute in the Isar language is called \hyperlink{attribute.THEN}{\mbox{\isa{THEN}}}. - - \item \isa{rule\ OF\ rules} resolves a list of rules with the - first rule, addressing its premises \isa{{\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ length\ rules} - (operating from last to first). This means the newly emerging - premises are all concatenated, without interfering. Also note that - compared to \isa{RS}, the rule argument order is swapped: \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}\ {\isacharequal}\ rule\isactrlsub {\isadigit{2}}\ OF\ {\isacharbrackleft}rule\isactrlsub {\isadigit{1}}{\isacharbrackright}}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/ML.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Prelim.tex --- a/doc-src/IsarImplementation/Thy/document/Prelim.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,896 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Prelim}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Prelim\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{Preliminaries% -} -\isamarkuptrue% -% -\isamarkupsection{Contexts \label{sec:context}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A logical context represents the background that is required for - formulating statements and composing proofs. It acts as a medium to - produce formal content, depending on earlier material (declarations, - results etc.). - - For example, derivations within the Isabelle/Pure logic can be - described as a judgment \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}, which means that a - proposition \isa{{\isasymphi}} is derivable from hypotheses \isa{{\isasymGamma}} - within the theory \isa{{\isasymTheta}}. There are logical reasons for - keeping \isa{{\isasymTheta}} and \isa{{\isasymGamma}} separate: theories can be - liberal about supporting type constructors and schematic - polymorphism of constants and axioms, while the inner calculus of - \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}} is strictly limited to Simple Type Theory (with - fixed type variables in the assumptions). - - \medskip Contexts and derivations are linked by the following key - principles: - - \begin{itemize} - - \item Transfer: monotonicity of derivations admits results to be - transferred into a \emph{larger} context, i.e.\ \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}} implies \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\isactrlsub {\isacharprime}\ {\isasymphi}} for contexts \isa{{\isasymTheta}{\isacharprime}\ {\isasymsupseteq}\ {\isasymTheta}} and \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}}. - - \item Export: discharge of hypotheses admits results to be exported - into a \emph{smaller} context, i.e.\ \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}} - implies \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymDelta}\ {\isasymLongrightarrow}\ {\isasymphi}} where \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}} and - \isa{{\isasymDelta}\ {\isacharequal}\ {\isasymGamma}{\isacharprime}\ {\isacharminus}\ {\isasymGamma}}. Note that \isa{{\isasymTheta}} remains unchanged here, - only the \isa{{\isasymGamma}} part is affected. - - \end{itemize} - - \medskip By modeling the main characteristics of the primitive - \isa{{\isasymTheta}} and \isa{{\isasymGamma}} above, and abstracting over any - particular logical content, we arrive at the fundamental notions of - \emph{theory context} and \emph{proof context} in Isabelle/Isar. - These implement a certain policy to manage arbitrary \emph{context - data}. There is a strongly-typed mechanism to declare new kinds of - data at compile time. - - The internal bootstrap process of Isabelle/Pure eventually reaches a - stage where certain data slots provide the logical content of \isa{{\isasymTheta}} and \isa{{\isasymGamma}} sketched above, but this does not stop there! - Various additional data slots support all kinds of mechanisms that - are not necessarily part of the core logic. - - For example, there would be data for canonical introduction and - elimination rules for arbitrary operators (depending on the - object-logic and application), which enables users to perform - standard proof steps implicitly (cf.\ the \isa{rule} method - \cite{isabelle-isar-ref}). - - \medskip Thus Isabelle/Isar is able to bring forth more and more - concepts successively. In particular, an object-logic like - Isabelle/HOL continues the Isabelle/Pure setup by adding specific - components for automated reasoning (classical reasoner, tableau - prover, structured induction etc.) and derived specification - mechanisms (inductive predicates, recursive functions etc.). All of - this is ultimately based on the generic data management by theory - and proof contexts introduced here.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsubsection{Theory context \label{sec:context-theory}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{theory} is a data container with explicit name and unique - identifier. Theories are related by a (nominal) sub-theory - relation, which corresponds to the dependency graph of the original - construction; each theory is derived from a certain sub-graph of - ancestor theories. - - The \isa{merge} operation produces the least upper bound of two - theories, which actually degenerates into absorption of one theory - into the other (due to the nominal sub-theory relation). - - The \isa{begin} operation starts a new theory by importing - several parent theories and entering a special \isa{draft} mode, - which is sustained until the final \isa{end} operation. A draft - theory acts like a linear type, where updates invalidate earlier - versions. An invalidated draft is called ``stale''. - - The \isa{checkpoint} operation produces an intermediate stepping - stone that will survive the next update: both the original and the - changed theory remain valid and are related by the sub-theory - relation. Checkpointing essentially recovers purely functional - theory values, at the expense of some extra internal bookkeeping. - - The \isa{copy} operation produces an auxiliary version that has - the same data content, but is unrelated to the original: updates of - the copy do not affect the original, neither does the sub-theory - relation hold. - - \medskip The example in \figref{fig:ex-theory} below shows a theory - graph derived from \isa{Pure}, with theory \isa{Length} - importing \isa{Nat} and \isa{List}. The body of \isa{Length} consists of a sequence of updates, working mostly on - drafts. Intermediate checkpoints may occur as well, due to the - history mechanism provided by the Isar top-level, cf.\ - \secref{sec:isar-toplevel}. - - \begin{figure}[htb] - \begin{center} - \begin{tabular}{rcccl} - & & \isa{Pure} \\ - & & \isa{{\isasymdown}} \\ - & & \isa{FOL} \\ - & $\swarrow$ & & $\searrow$ & \\ - \isa{Nat} & & & & \isa{List} \\ - & $\searrow$ & & $\swarrow$ \\ - & & \isa{Length} \\ - & & \multicolumn{3}{l}{~~\hyperlink{keyword.imports}{\mbox{\isa{\isakeyword{imports}}}}} \\ - & & \multicolumn{3}{l}{~~\hyperlink{keyword.begin}{\mbox{\isa{\isakeyword{begin}}}}} \\ - & & $\vdots$~~ \\ - & & \isa{{\isasymbullet}}~~ \\ - & & $\vdots$~~ \\ - & & \isa{{\isasymbullet}}~~ \\ - & & $\vdots$~~ \\ - & & \multicolumn{3}{l}{~~\hyperlink{command.end}{\mbox{\isa{\isacommand{end}}}}} \\ - \end{tabular} - \caption{A theory definition depending on ancestors}\label{fig:ex-theory} - \end{center} - \end{figure} - - \medskip There is a separate notion of \emph{theory reference} for - maintaining a live link to an evolving theory context: updates on - drafts are propagated automatically. Dynamic updating stops after - an explicit \isa{end} only. - - Derived entities may store a theory reference in order to indicate - the context they belong to. This implicitly assumes monotonic - reasoning, because the referenced context may become larger without - further notice.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{theory}\verb|type theory| \\ - \indexdef{}{ML}{Theory.subthy}\verb|Theory.subthy: theory * theory -> bool| \\ - \indexdef{}{ML}{Theory.merge}\verb|Theory.merge: theory * theory -> theory| \\ - \indexdef{}{ML}{Theory.checkpoint}\verb|Theory.checkpoint: theory -> theory| \\ - \indexdef{}{ML}{Theory.copy}\verb|Theory.copy: theory -> theory| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{theory\_ref}\verb|type theory_ref| \\ - \indexdef{}{ML}{Theory.deref}\verb|Theory.deref: theory_ref -> theory| \\ - \indexdef{}{ML}{Theory.check\_thy}\verb|Theory.check_thy: theory -> theory_ref| \\ - \end{mldecls} - - \begin{description} - - \item \verb|theory| represents theory contexts. This is - essentially a linear type! Most operations destroy the original - version, which then becomes ``stale''. - - \item \verb|Theory.subthy|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}} - compares theories according to the inherent graph structure of the - construction. This sub-theory relation is a nominal approximation - of inclusion (\isa{{\isasymsubseteq}}) of the corresponding content. - - \item \verb|Theory.merge|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}} - absorbs one theory into the other. This fails for unrelated - theories! - - \item \verb|Theory.checkpoint|~\isa{thy} produces a safe - stepping stone in the linear development of \isa{thy}. The next - update will result in two related, valid theories. - - \item \verb|Theory.copy|~\isa{thy} produces a variant of \isa{thy} that holds a copy of the same data. The result is not - related to the original; the original is unchanged. - - \item \verb|theory_ref| represents a sliding reference to an - always valid theory; updates on the original are propagated - automatically. - - \item \verb|Theory.deref|~\isa{thy{\isacharunderscore}ref} turns a \verb|theory_ref| into an \verb|theory| value. As the referenced - theory evolves monotonically over time, later invocations of \verb|Theory.deref| may refer to a larger context. - - \item \verb|Theory.check_thy|~\isa{thy} produces a \verb|theory_ref| from a valid \verb|theory| value. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Proof context \label{sec:context-proof}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A proof context is a container for pure data with a back-reference - to the theory it belongs to. The \isa{init} operation creates a - proof context from a given theory. Modifications to draft theories - are propagated to the proof context as usual, but there is also an - explicit \isa{transfer} operation to force resynchronization - with more substantial updates to the underlying theory. The actual - context data does not require any special bookkeeping, thanks to the - lack of destructive features. - - Entities derived in a proof context need to record inherent logical - requirements explicitly, since there is no separate context - identification as for theories. For example, hypotheses used in - primitive derivations (cf.\ \secref{sec:thms}) are recorded - separately within the sequent \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}}, just to make double - sure. Results could still leak into an alien proof context due to - programming errors, but Isabelle/Isar includes some extra validity - checks in critical positions, notably at the end of a sub-proof. - - Proof contexts may be manipulated arbitrarily, although the common - discipline is to follow block structure as a mental model: a given - context is extended consecutively, and results are exported back - into the original context. Note that the Isar proof states model - block-structured reasoning explicitly, using a stack of proof - contexts internally.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{Proof.context}\verb|type Proof.context| \\ - \indexdef{}{ML}{ProofContext.init}\verb|ProofContext.init: theory -> Proof.context| \\ - \indexdef{}{ML}{ProofContext.theory\_of}\verb|ProofContext.theory_of: Proof.context -> theory| \\ - \indexdef{}{ML}{ProofContext.transfer}\verb|ProofContext.transfer: theory -> Proof.context -> Proof.context| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Proof.context| represents proof contexts. Elements - of this type are essentially pure values, with a sliding reference - to the background theory. - - \item \verb|ProofContext.init|~\isa{thy} produces a proof context - derived from \isa{thy}, initializing all data. - - \item \verb|ProofContext.theory_of|~\isa{ctxt} selects the - background theory from \isa{ctxt}, dereferencing its internal - \verb|theory_ref|. - - \item \verb|ProofContext.transfer|~\isa{thy\ ctxt} promotes the - background theory of \isa{ctxt} to the super theory \isa{thy}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Generic contexts \label{sec:generic-context}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A generic context is the disjoint sum of either a theory or proof - context. Occasionally, this enables uniform treatment of generic - context data, typically extra-logical information. Operations on - generic contexts include the usual injections, partial selections, - and combinators for lifting operations on either component of the - disjoint sum. - - Moreover, there are total operations \isa{theory{\isacharunderscore}of} and \isa{proof{\isacharunderscore}of} to convert a generic context into either kind: a theory - can always be selected from the sum, while a proof context might - have to be constructed by an ad-hoc \isa{init} operation.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{Context.generic}\verb|type Context.generic| \\ - \indexdef{}{ML}{Context.theory\_of}\verb|Context.theory_of: Context.generic -> theory| \\ - \indexdef{}{ML}{Context.proof\_of}\verb|Context.proof_of: Context.generic -> Proof.context| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Context.generic| is the direct sum of \verb|theory| and \verb|Proof.context|, with the datatype - constructors \verb|Context.Theory| and \verb|Context.Proof|. - - \item \verb|Context.theory_of|~\isa{context} always produces a - theory from the generic \isa{context}, using \verb|ProofContext.theory_of| as required. - - \item \verb|Context.proof_of|~\isa{context} always produces a - proof context from the generic \isa{context}, using \verb|ProofContext.init| as required (note that this re-initializes the - context data with each invocation). - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Context data \label{sec:context-data}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The main purpose of theory and proof contexts is to manage arbitrary - data. New data types can be declared incrementally at compile time. - There are separate declaration mechanisms for any of the three kinds - of contexts: theory, proof, generic. - - \paragraph{Theory data} may refer to destructive entities, which are - maintained in direct correspondence to the linear evolution of - theory values, including explicit copies.\footnote{Most existing - instances of destructive theory data are merely historical relics - (e.g.\ the destructive theorem storage, and destructive hints for - the Simplifier and Classical rules).} A theory data declaration - needs to implement the following SML signature: - - \medskip - \begin{tabular}{ll} - \isa{{\isasymtype}\ T} & representing type \\ - \isa{{\isasymval}\ empty{\isacharcolon}\ T} & empty default value \\ - \isa{{\isasymval}\ copy{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & refresh impure data \\ - \isa{{\isasymval}\ extend{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & re-initialize on import \\ - \isa{{\isasymval}\ merge{\isacharcolon}\ T\ {\isasymtimes}\ T\ {\isasymrightarrow}\ T} & join on import \\ - \end{tabular} - \medskip - - \noindent The \isa{empty} value acts as initial default for - \emph{any} theory that does not declare actual data content; \isa{copy} maintains persistent integrity for impure data, it is just - the identity for pure values; \isa{extend} is acts like a - unitary version of \isa{merge}, both operations should also - include the functionality of \isa{copy} for impure data. - - \paragraph{Proof context data} is purely functional. A declaration - needs to implement the following SML signature: - - \medskip - \begin{tabular}{ll} - \isa{{\isasymtype}\ T} & representing type \\ - \isa{{\isasymval}\ init{\isacharcolon}\ theory\ {\isasymrightarrow}\ T} & produce initial value \\ - \end{tabular} - \medskip - - \noindent The \isa{init} operation is supposed to produce a pure - value from the given background theory. - - \paragraph{Generic data} provides a hybrid interface for both theory - and proof data. The declaration is essentially the same as for - (pure) theory data, without \isa{copy}. The \isa{init} - operation for proof contexts merely selects the current data value - from the background theory. - - \bigskip A data declaration of type \isa{T} results in the - following interface: - - \medskip - \begin{tabular}{ll} - \isa{init{\isacharcolon}\ theory\ {\isasymrightarrow}\ T} \\ - \isa{get{\isacharcolon}\ context\ {\isasymrightarrow}\ T} \\ - \isa{put{\isacharcolon}\ T\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\ - \isa{map{\isacharcolon}\ {\isacharparenleft}T\ {\isasymrightarrow}\ T{\isacharparenright}\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\ - \end{tabular} - \medskip - - \noindent Here \isa{init} is only applicable to impure theory - data to install a fresh copy persistently (destructive update on - uninitialized has no permanent effect). The other operations provide - access for the particular kind of context (theory, proof, or generic - context). Note that this is a safe interface: there is no other way - to access the corresponding data slot of a context. By keeping - these operations private, a component may maintain abstract values - authentically, without other components interfering.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML functor}{TheoryDataFun}\verb|functor TheoryDataFun| \\ - \indexdef{}{ML functor}{ProofDataFun}\verb|functor ProofDataFun| \\ - \indexdef{}{ML functor}{GenericDataFun}\verb|functor GenericDataFun| \\ - \end{mldecls} - - \begin{description} - - \item \verb|TheoryDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} declares data for - type \verb|theory| according to the specification provided as - argument structure. The resulting structure provides data init and - access operations as described above. - - \item \verb|ProofDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to - \verb|TheoryDataFun| for type \verb|Proof.context|. - - \item \verb|GenericDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to - \verb|TheoryDataFun| for type \verb|Context.generic|. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Names \label{sec:names}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -In principle, a name is just a string, but there are various - convention for encoding additional structure. For example, ``\isa{Foo{\isachardot}bar{\isachardot}baz}'' is considered as a qualified name consisting of - three basic name components. The individual constituents of a name - may have further substructure, e.g.\ the string - ``\verb,\,\verb,,'' encodes as a single symbol.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsubsection{Strings of symbols% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{symbol} constitutes the smallest textual unit in Isabelle - --- raw characters are normally not encountered at all. Isabelle - strings consist of a sequence of symbols, represented as a packed - string or a list of strings. Each symbol is in itself a small - string, which has either one of the following forms: - - \begin{enumerate} - - \item a single ASCII character ``\isa{c}'', for example - ``\verb,a,'', - - \item a regular symbol ``\verb,\,\verb,<,\isa{ident}\verb,>,'', - for example ``\verb,\,\verb,,'', - - \item a control symbol ``\verb,\,\verb,<^,\isa{ident}\verb,>,'', - for example ``\verb,\,\verb,<^bold>,'', - - \item a raw symbol ``\verb,\,\verb,<^raw:,\isa{text}\verb,>,'' - where \isa{text} constists of printable characters excluding - ``\verb,.,'' and ``\verb,>,'', for example - ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'', - - \item a numbered raw control symbol ``\verb,\,\verb,<^raw,\isa{n}\verb,>, where \isa{n} consists of digits, for example - ``\verb,\,\verb,<^raw42>,''. - - \end{enumerate} - - \noindent The \isa{ident} syntax for symbol names is \isa{letter\ {\isacharparenleft}letter\ {\isacharbar}\ digit{\isacharparenright}\isactrlsup {\isacharasterisk}}, where \isa{letter\ {\isacharequal}\ A{\isachardot}{\isachardot}Za{\isachardot}{\isachardot}z} and \isa{digit\ {\isacharequal}\ {\isadigit{0}}{\isachardot}{\isachardot}{\isadigit{9}}}. There are infinitely many - regular symbols and control symbols, but a fixed collection of - standard symbols is treated specifically. For example, - ``\verb,\,\verb,,'' is classified as a letter, which means it - may occur within regular Isabelle identifiers. - - Since the character set underlying Isabelle symbols is 7-bit ASCII - and 8-bit characters are passed through transparently, Isabelle may - also process Unicode/UCS data in UTF-8 encoding. Unicode provides - its own collection of mathematical symbols, but there is no built-in - link to the standard collection of Isabelle. - - \medskip Output of Isabelle symbols depends on the print mode - (\secref{print-mode}). For example, the standard {\LaTeX} setup of - the Isabelle document preparation system would present - ``\verb,\,\verb,,'' as \isa{{\isasymalpha}}, and - ``\verb,\,\verb,<^bold>,\verb,\,\verb,,'' as \isa{\isactrlbold {\isasymalpha}}.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{Symbol.symbol}\verb|type Symbol.symbol| \\ - \indexdef{}{ML}{Symbol.explode}\verb|Symbol.explode: string -> Symbol.symbol list| \\ - \indexdef{}{ML}{Symbol.is\_letter}\verb|Symbol.is_letter: Symbol.symbol -> bool| \\ - \indexdef{}{ML}{Symbol.is\_digit}\verb|Symbol.is_digit: Symbol.symbol -> bool| \\ - \indexdef{}{ML}{Symbol.is\_quasi}\verb|Symbol.is_quasi: Symbol.symbol -> bool| \\ - \indexdef{}{ML}{Symbol.is\_blank}\verb|Symbol.is_blank: Symbol.symbol -> bool| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{Symbol.sym}\verb|type Symbol.sym| \\ - \indexdef{}{ML}{Symbol.decode}\verb|Symbol.decode: Symbol.symbol -> Symbol.sym| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Symbol.symbol| represents individual Isabelle - symbols; this is an alias for \verb|string|. - - \item \verb|Symbol.explode|~\isa{str} produces a symbol list - from the packed form. This function supercedes \verb|String.explode| for virtually all purposes of manipulating text in - Isabelle! - - \item \verb|Symbol.is_letter|, \verb|Symbol.is_digit|, \verb|Symbol.is_quasi|, \verb|Symbol.is_blank| classify standard - symbols according to fixed syntactic conventions of Isabelle, cf.\ - \cite{isabelle-isar-ref}. - - \item \verb|Symbol.sym| is a concrete datatype that represents - the different kinds of symbols explicitly, with constructors \verb|Symbol.Char|, \verb|Symbol.Sym|, \verb|Symbol.Ctrl|, \verb|Symbol.Raw|. - - \item \verb|Symbol.decode| converts the string representation of a - symbol into the datatype version. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Basic names \label{sec:basic-names}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{basic name} essentially consists of a single Isabelle - identifier. There are conventions to mark separate classes of basic - names, by attaching a suffix of underscores: one underscore means - \emph{internal name}, two underscores means \emph{Skolem name}, - three underscores means \emph{internal Skolem name}. - - For example, the basic name \isa{foo} has the internal version - \isa{foo{\isacharunderscore}}, with Skolem versions \isa{foo{\isacharunderscore}{\isacharunderscore}} and \isa{foo{\isacharunderscore}{\isacharunderscore}{\isacharunderscore}}, respectively. - - These special versions provide copies of the basic name space, apart - from anything that normally appears in the user text. For example, - system generated variables in Isar proof contexts are usually marked - as internal, which prevents mysterious name references like \isa{xaa} to appear in the text. - - \medskip Manipulating binding scopes often requires on-the-fly - renamings. A \emph{name context} contains a collection of already - used names. The \isa{declare} operation adds names to the - context. - - The \isa{invents} operation derives a number of fresh names from - a given starting point. For example, the first three names derived - from \isa{a} are \isa{a}, \isa{b}, \isa{c}. - - The \isa{variants} operation produces fresh names by - incrementing tentative names as base-26 numbers (with digits \isa{a{\isachardot}{\isachardot}z}) until all clashes are resolved. For example, name \isa{foo} results in variants \isa{fooa}, \isa{foob}, \isa{fooc}, \dots, \isa{fooaa}, \isa{fooab} etc.; each renaming - step picks the next unused variant from this sequence.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{Name.internal}\verb|Name.internal: string -> string| \\ - \indexdef{}{ML}{Name.skolem}\verb|Name.skolem: string -> string| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{Name.context}\verb|type Name.context| \\ - \indexdef{}{ML}{Name.context}\verb|Name.context: Name.context| \\ - \indexdef{}{ML}{Name.declare}\verb|Name.declare: string -> Name.context -> Name.context| \\ - \indexdef{}{ML}{Name.invents}\verb|Name.invents: Name.context -> string -> int -> string list| \\ - \indexdef{}{ML}{Name.variants}\verb|Name.variants: string list -> Name.context -> string list * Name.context| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Name.internal|~\isa{name} produces an internal name - by adding one underscore. - - \item \verb|Name.skolem|~\isa{name} produces a Skolem name by - adding two underscores. - - \item \verb|Name.context| represents the context of already used - names; the initial value is \verb|Name.context|. - - \item \verb|Name.declare|~\isa{name} enters a used name into the - context. - - \item \verb|Name.invents|~\isa{context\ name\ n} produces \isa{n} fresh names derived from \isa{name}. - - \item \verb|Name.variants|~\isa{names\ context} produces fresh - variants of \isa{names}; the result is entered into the context. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Indexed names% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -An \emph{indexed name} (or \isa{indexname}) is a pair of a basic - name and a natural number. This representation allows efficient - renaming by incrementing the second component only. The canonical - way to rename two collections of indexnames apart from each other is - this: determine the maximum index \isa{maxidx} of the first - collection, then increment all indexes of the second collection by - \isa{maxidx\ {\isacharplus}\ {\isadigit{1}}}; the maximum index of an empty collection is - \isa{{\isacharminus}{\isadigit{1}}}. - - Occasionally, basic names and indexed names are injected into the - same pair type: the (improper) indexname \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} is used - to encode basic names. - - \medskip Isabelle syntax observes the following rules for - representing an indexname \isa{{\isacharparenleft}x{\isacharcomma}\ i{\isacharparenright}} as a packed string: - - \begin{itemize} - - \item \isa{{\isacharquery}x} if \isa{x} does not end with a digit and \isa{i\ {\isacharequal}\ {\isadigit{0}}}, - - \item \isa{{\isacharquery}xi} if \isa{x} does not end with a digit, - - \item \isa{{\isacharquery}x{\isachardot}i} otherwise. - - \end{itemize} - - Indexnames may acquire large index numbers over time. Results are - normalized towards \isa{{\isadigit{0}}} at certain checkpoints, notably at - the end of a proof. This works by producing variants of the - corresponding basic name components. For example, the collection - \isa{{\isacharquery}x{\isadigit{1}}{\isacharcomma}\ {\isacharquery}x{\isadigit{7}}{\isacharcomma}\ {\isacharquery}x{\isadigit{4}}{\isadigit{2}}} becomes \isa{{\isacharquery}x{\isacharcomma}\ {\isacharquery}xa{\isacharcomma}\ {\isacharquery}xb}.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{indexname}\verb|type indexname| \\ - \end{mldecls} - - \begin{description} - - \item \verb|indexname| represents indexed names. This is an - abbreviation for \verb|string * int|. The second component is - usually non-negative, except for situations where \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} - is used to embed basic names into this type. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Qualified names and name spaces% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{qualified name} consists of a non-empty sequence of basic - name components. The packed representation uses a dot as separator, - as in ``\isa{A{\isachardot}b{\isachardot}c}''. The last component is called \emph{base} - name, the remaining prefix \emph{qualifier} (which may be empty). - The idea of qualified names is to encode nested structures by - recording the access paths as qualifiers. For example, an item - named ``\isa{A{\isachardot}b{\isachardot}c}'' may be understood as a local entity \isa{c}, within a local structure \isa{b}, within a global - structure \isa{A}. Typically, name space hierarchies consist of - 1--2 levels of qualification, but this need not be always so. - - The empty name is commonly used as an indication of unnamed - entities, whenever this makes any sense. The basic operations on - qualified names are smart enough to pass through such improper names - unchanged. - - \medskip A \isa{naming} policy tells how to turn a name - specification into a fully qualified internal name (by the \isa{full} operation), and how fully qualified names may be accessed - externally. For example, the default naming policy is to prefix an - implicit path: \isa{full\ x} produces \isa{path{\isachardot}x}, and the - standard accesses for \isa{path{\isachardot}x} include both \isa{x} and - \isa{path{\isachardot}x}. Normally, the naming is implicit in the theory or - proof context; there are separate versions of the corresponding. - - \medskip A \isa{name\ space} manages a collection of fully - internalized names, together with a mapping between external names - and internal names (in both directions). The corresponding \isa{intern} and \isa{extern} operations are mostly used for - parsing and printing only! The \isa{declare} operation augments - a name space according to the accesses determined by the naming - policy. - - \medskip As a general principle, there is a separate name space for - each kind of formal entity, e.g.\ logical constant, type - constructor, type class, theorem. It is usually clear from the - occurrence in concrete syntax (or from the scope) which kind of - entity a name refers to. For example, the very same name \isa{c} may be used uniformly for a constant, type constructor, and - type class. - - There are common schemes to name theorems systematically, according - to the name of the main logical entity involved, e.g.\ \isa{c{\isachardot}intro} for a canonical theorem related to constant \isa{c}. - This technique of mapping names from one space into another requires - some care in order to avoid conflicts. In particular, theorem names - derived from a type constructor or type class are better suffixed in - addition to the usual qualification, e.g.\ \isa{c{\isacharunderscore}type{\isachardot}intro} - and \isa{c{\isacharunderscore}class{\isachardot}intro} for theorems related to type \isa{c} - and class \isa{c}, respectively.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{NameSpace.base}\verb|NameSpace.base: string -> string| \\ - \indexdef{}{ML}{NameSpace.qualifier}\verb|NameSpace.qualifier: string -> string| \\ - \indexdef{}{ML}{NameSpace.append}\verb|NameSpace.append: string -> string -> string| \\ - \indexdef{}{ML}{NameSpace.implode}\verb|NameSpace.implode: string list -> string| \\ - \indexdef{}{ML}{NameSpace.explode}\verb|NameSpace.explode: string -> string list| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{NameSpace.naming}\verb|type NameSpace.naming| \\ - \indexdef{}{ML}{NameSpace.default\_naming}\verb|NameSpace.default_naming: NameSpace.naming| \\ - \indexdef{}{ML}{NameSpace.add\_path}\verb|NameSpace.add_path: string -> NameSpace.naming -> NameSpace.naming| \\ - \indexdef{}{ML}{NameSpace.full\_name}\verb|NameSpace.full_name: NameSpace.naming -> binding -> string| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML type}{NameSpace.T}\verb|type NameSpace.T| \\ - \indexdef{}{ML}{NameSpace.empty}\verb|NameSpace.empty: NameSpace.T| \\ - \indexdef{}{ML}{NameSpace.merge}\verb|NameSpace.merge: NameSpace.T * NameSpace.T -> NameSpace.T| \\ - \indexdef{}{ML}{NameSpace.declare}\verb|NameSpace.declare: NameSpace.naming -> binding -> NameSpace.T -> string * NameSpace.T| \\ - \indexdef{}{ML}{NameSpace.intern}\verb|NameSpace.intern: NameSpace.T -> string -> string| \\ - \indexdef{}{ML}{NameSpace.extern}\verb|NameSpace.extern: NameSpace.T -> string -> string| \\ - \end{mldecls} - - \begin{description} - - \item \verb|NameSpace.base|~\isa{name} returns the base name of a - qualified name. - - \item \verb|NameSpace.qualifier|~\isa{name} returns the qualifier - of a qualified name. - - \item \verb|NameSpace.append|~\isa{name\isactrlisub {\isadigit{1}}\ name\isactrlisub {\isadigit{2}}} - appends two qualified names. - - \item \verb|NameSpace.implode|~\isa{name} and \verb|NameSpace.explode|~\isa{names} convert between the packed string - representation and the explicit list form of qualified names. - - \item \verb|NameSpace.naming| represents the abstract concept of - a naming policy. - - \item \verb|NameSpace.default_naming| is the default naming policy. - In a theory context, this is usually augmented by a path prefix - consisting of the theory name. - - \item \verb|NameSpace.add_path|~\isa{path\ naming} augments the - naming policy by extending its path component. - - \item \verb|NameSpace.full_name|\isa{naming\ binding} turns a name - binding (usually a basic name) into the fully qualified - internal name, according to the given naming policy. - - \item \verb|NameSpace.T| represents name spaces. - - \item \verb|NameSpace.empty| and \verb|NameSpace.merge|~\isa{{\isacharparenleft}space\isactrlisub {\isadigit{1}}{\isacharcomma}\ space\isactrlisub {\isadigit{2}}{\isacharparenright}} are the canonical operations for - maintaining name spaces according to theory data management - (\secref{sec:context-data}). - - \item \verb|NameSpace.declare|~\isa{naming\ bindings\ space} enters a - name binding as fully qualified internal name into the name space, - with external accesses determined by the naming policy. - - \item \verb|NameSpace.intern|~\isa{space\ name} internalizes a - (partially qualified) external name. - - This operation is mostly for parsing! Note that fully qualified - names stemming from declarations are produced via \verb|NameSpace.full_name| and \verb|NameSpace.declare| - (or their derivatives for \verb|theory| and - \verb|Proof.context|). - - \item \verb|NameSpace.extern|~\isa{space\ name} externalizes a - (fully qualified) internal name. - - This operation is mostly for printing! Note unqualified names are - produced via \verb|NameSpace.base|. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Proof.tex --- a/doc-src/IsarImplementation/Thy/document/Proof.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,394 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Proof}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Proof\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{Structured proofs% -} -\isamarkuptrue% -% -\isamarkupsection{Variables \label{sec:variables}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Any variable that is not explicitly bound by \isa{{\isasymlambda}}-abstraction - is considered as ``free''. Logically, free variables act like - outermost universal quantification at the sequent level: \isa{A\isactrlisub {\isadigit{1}}{\isacharparenleft}x{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n{\isacharparenleft}x{\isacharparenright}\ {\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} means that the result - holds \emph{for all} values of \isa{x}. Free variables for - terms (not types) can be fully internalized into the logic: \isa{{\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} and \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} are interchangeable, provided - that \isa{x} does not occur elsewhere in the context. - Inspecting \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} more closely, we see that inside the - quantifier, \isa{x} is essentially ``arbitrary, but fixed'', - while from outside it appears as a place-holder for instantiation - (thanks to \isa{{\isasymAnd}} elimination). - - The Pure logic represents the idea of variables being either inside - or outside the current scope by providing separate syntactic - categories for \emph{fixed variables} (e.g.\ \isa{x}) vs.\ - \emph{schematic variables} (e.g.\ \isa{{\isacharquery}x}). Incidently, a - universal result \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} has the HHF normal form \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}x{\isacharparenright}}, which represents its generality nicely without requiring - an explicit quantifier. The same principle works for type - variables: \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}{\isasymalpha}{\isacharparenright}} represents the idea of ``\isa{{\isasymturnstile}\ {\isasymforall}{\isasymalpha}{\isachardot}\ B{\isacharparenleft}{\isasymalpha}{\isacharparenright}}'' without demanding a truly polymorphic framework. - - \medskip Additional care is required to treat type variables in a - way that facilitates type-inference. In principle, term variables - depend on type variables, which means that type variables would have - to be declared first. For example, a raw type-theoretic framework - would demand the context to be constructed in stages as follows: - \isa{{\isasymGamma}\ {\isacharequal}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ x{\isacharcolon}\ {\isasymalpha}{\isacharcomma}\ a{\isacharcolon}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}. - - We allow a slightly less formalistic mode of operation: term - variables \isa{x} are fixed without specifying a type yet - (essentially \emph{all} potential occurrences of some instance - \isa{x\isactrlisub {\isasymtau}} are fixed); the first occurrence of \isa{x} - within a specific term assigns its most general type, which is then - maintained consistently in the context. The above example becomes - \isa{{\isasymGamma}\ {\isacharequal}\ x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}, where type \isa{{\isasymalpha}} is fixed \emph{after} term \isa{x}, and the constraint - \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}} is an implicit consequence of the occurrence of - \isa{x\isactrlisub {\isasymalpha}} in the subsequent proposition. - - This twist of dependencies is also accommodated by the reverse - operation of exporting results from a context: a type variable - \isa{{\isasymalpha}} is considered fixed as long as it occurs in some fixed - term variable of the context. For example, exporting \isa{x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} produces in the first step - \isa{x{\isacharcolon}\ term\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} for fixed \isa{{\isasymalpha}}, - and only in the second step \isa{{\isasymturnstile}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}\ {\isacharequal}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}} for schematic \isa{{\isacharquery}x} and \isa{{\isacharquery}{\isasymalpha}}. - - \medskip The Isabelle/Isar proof context manages the gory details of - term vs.\ type variables, with high-level principles for moving the - frontier between fixed and schematic variables. - - The \isa{add{\isacharunderscore}fixes} operation explictly declares fixed - variables; the \isa{declare{\isacharunderscore}term} operation absorbs a term into - a context by fixing new type variables and adding syntactic - constraints. - - The \isa{export} operation is able to perform the main work of - generalizing term and type variables as sketched above, assuming - that fixing variables and terms have been declared properly. - - There \isa{import} operation makes a generalized fact a genuine - part of the context, by inventing fixed variables for the schematic - ones. The effect can be reversed by using \isa{export} later, - potentially with an extended context; the result is equivalent to - the original modulo renaming of schematic variables. - - The \isa{focus} operation provides a variant of \isa{import} - for nested propositions (with explicit quantification): \isa{{\isasymAnd}x\isactrlisub {\isadigit{1}}\ {\isasymdots}\ x\isactrlisub n{\isachardot}\ B{\isacharparenleft}x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n{\isacharparenright}} is - decomposed by inventing fixed variables \isa{x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n} for the body.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{Variable.add\_fixes}\verb|Variable.add_fixes: |\isasep\isanewline% -\verb| string list -> Proof.context -> string list * Proof.context| \\ - \indexdef{}{ML}{Variable.variant\_fixes}\verb|Variable.variant_fixes: |\isasep\isanewline% -\verb| string list -> Proof.context -> string list * Proof.context| \\ - \indexdef{}{ML}{Variable.declare\_term}\verb|Variable.declare_term: term -> Proof.context -> Proof.context| \\ - \indexdef{}{ML}{Variable.declare\_constraints}\verb|Variable.declare_constraints: term -> Proof.context -> Proof.context| \\ - \indexdef{}{ML}{Variable.export}\verb|Variable.export: Proof.context -> Proof.context -> thm list -> thm list| \\ - \indexdef{}{ML}{Variable.polymorphic}\verb|Variable.polymorphic: Proof.context -> term list -> term list| \\ - \indexdef{}{ML}{Variable.import\_thms}\verb|Variable.import_thms: bool -> thm list -> Proof.context ->|\isasep\isanewline% -\verb| ((ctyp list * cterm list) * thm list) * Proof.context| \\ - \indexdef{}{ML}{Variable.focus}\verb|Variable.focus: cterm -> Proof.context -> (cterm list * cterm) * Proof.context| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Variable.add_fixes|~\isa{xs\ ctxt} fixes term - variables \isa{xs}, returning the resulting internal names. By - default, the internal representation coincides with the external - one, which also means that the given variables must not be fixed - already. There is a different policy within a local proof body: the - given names are just hints for newly invented Skolem variables. - - \item \verb|Variable.variant_fixes| is similar to \verb|Variable.add_fixes|, but always produces fresh variants of the given - names. - - \item \verb|Variable.declare_term|~\isa{t\ ctxt} declares term - \isa{t} to belong to the context. This automatically fixes new - type variables, but not term variables. Syntactic constraints for - type and term variables are declared uniformly, though. - - \item \verb|Variable.declare_constraints|~\isa{t\ ctxt} declares - syntactic constraints from term \isa{t}, without making it part - of the context yet. - - \item \verb|Variable.export|~\isa{inner\ outer\ thms} generalizes - fixed type and term variables in \isa{thms} according to the - difference of the \isa{inner} and \isa{outer} context, - following the principles sketched above. - - \item \verb|Variable.polymorphic|~\isa{ctxt\ ts} generalizes type - variables in \isa{ts} as far as possible, even those occurring - in fixed term variables. The default policy of type-inference is to - fix newly introduced type variables, which is essentially reversed - with \verb|Variable.polymorphic|: here the given terms are detached - from the context as far as possible. - - \item \verb|Variable.import_thms|~\isa{open\ thms\ ctxt} invents fixed - type and term variables for the schematic ones occurring in \isa{thms}. The \isa{open} flag indicates whether the fixed names - should be accessible to the user, otherwise newly introduced names - are marked as ``internal'' (\secref{sec:names}). - - \item \verb|Variable.focus|~\isa{B} decomposes the outermost \isa{{\isasymAnd}} prefix of proposition \isa{B}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Assumptions \label{sec:assumptions}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -An \emph{assumption} is a proposition that it is postulated in the - current context. Local conclusions may use assumptions as - additional facts, but this imposes implicit hypotheses that weaken - the overall statement. - - Assumptions are restricted to fixed non-schematic statements, i.e.\ - all generality needs to be expressed by explicit quantifiers. - Nevertheless, the result will be in HHF normal form with outermost - quantifiers stripped. For example, by assuming \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x} we get \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x\ {\isasymturnstile}\ P\ {\isacharquery}x} for schematic \isa{{\isacharquery}x} - of fixed type \isa{{\isasymalpha}}. Local derivations accumulate more and - more explicit references to hypotheses: \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} where \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n} needs to - be covered by the assumptions of the current context. - - \medskip The \isa{add{\isacharunderscore}assms} operation augments the context by - local assumptions, which are parameterized by an arbitrary \isa{export} rule (see below). - - The \isa{export} operation moves facts from a (larger) inner - context into a (smaller) outer context, by discharging the - difference of the assumptions as specified by the associated export - rules. Note that the discharged portion is determined by the - difference contexts, not the facts being exported! There is a - separate flag to indicate a goal context, where the result is meant - to refine an enclosing sub-goal of a structured proof state. - - \medskip The most basic export rule discharges assumptions directly - by means of the \isa{{\isasymLongrightarrow}} introduction rule: - \[ - \infer[(\isa{{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}} - \] - - The variant for goal refinements marks the newly introduced - premises, which causes the canonical Isar goal refinement scheme to - enforce unification with local premises within the goal: - \[ - \infer[(\isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}} - \] - - \medskip Alternative versions of assumptions may perform arbitrary - transformations on export, as long as the corresponding portion of - hypotheses is removed from the given facts. For example, a local - definition works by fixing \isa{x} and assuming \isa{x\ {\isasymequiv}\ t}, - with the following export rule to reverse the effect: - \[ - \infer[(\isa{{\isasymequiv}{\isacharminus}expand})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ x\ {\isasymequiv}\ t\ {\isasymturnstile}\ B\ t}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B\ x}} - \] - This works, because the assumption \isa{x\ {\isasymequiv}\ t} was introduced in - a context with \isa{x} being fresh, so \isa{x} does not - occur in \isa{{\isasymGamma}} here.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{Assumption.export}\verb|type Assumption.export| \\ - \indexdef{}{ML}{Assumption.assume}\verb|Assumption.assume: cterm -> thm| \\ - \indexdef{}{ML}{Assumption.add\_assms}\verb|Assumption.add_assms: Assumption.export ->|\isasep\isanewline% -\verb| cterm list -> Proof.context -> thm list * Proof.context| \\ - \indexdef{}{ML}{Assumption.add\_assumes}\verb|Assumption.add_assumes: |\isasep\isanewline% -\verb| cterm list -> Proof.context -> thm list * Proof.context| \\ - \indexdef{}{ML}{Assumption.export}\verb|Assumption.export: bool -> Proof.context -> Proof.context -> thm -> thm| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Assumption.export| represents arbitrary export - rules, which is any function of type \verb|bool -> cterm list -> thm -> thm|, - where the \verb|bool| indicates goal mode, and the \verb|cterm list| the collection of assumptions to be discharged - simultaneously. - - \item \verb|Assumption.assume|~\isa{A} turns proposition \isa{A} into a raw assumption \isa{A\ {\isasymturnstile}\ A{\isacharprime}}, where the conclusion - \isa{A{\isacharprime}} is in HHF normal form. - - \item \verb|Assumption.add_assms|~\isa{r\ As} augments the context - by assumptions \isa{As} with export rule \isa{r}. The - resulting facts are hypothetical theorems as produced by the raw - \verb|Assumption.assume|. - - \item \verb|Assumption.add_assumes|~\isa{As} is a special case of - \verb|Assumption.add_assms| where the export rule performs \isa{{\isasymLongrightarrow}{\isacharunderscore}intro} or \isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro}, depending on goal mode. - - \item \verb|Assumption.export|~\isa{is{\isacharunderscore}goal\ inner\ outer\ thm} - exports result \isa{thm} from the the \isa{inner} context - back into the \isa{outer} one; \isa{is{\isacharunderscore}goal\ {\isacharequal}\ true} means - this is a goal context. The result is in HHF normal form. Note - that \verb|ProofContext.export| combines \verb|Variable.export| - and \verb|Assumption.export| in the canonical way. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Results \label{sec:results}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Local results are established by monotonic reasoning from facts - within a context. This allows common combinations of theorems, - e.g.\ via \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} elimination, resolution rules, or equational - reasoning, see \secref{sec:thms}. Unaccounted context manipulations - should be avoided, notably raw \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} introduction or ad-hoc - references to free variables or assumptions not present in the proof - context. - - \medskip The \isa{SUBPROOF} combinator allows to structure a - tactical proof recursively by decomposing a selected sub-goal: - \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ A{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ B{\isacharparenleft}x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}} is turned into \isa{B{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}} - after fixing \isa{x} and assuming \isa{A{\isacharparenleft}x{\isacharparenright}}. This means - the tactic needs to solve the conclusion, but may use the premise as - a local fact, for locally fixed variables. - - The \isa{prove} operation provides an interface for structured - backwards reasoning under program control, with some explicit sanity - checks of the result. The goal context can be augmented by - additional fixed variables (cf.\ \secref{sec:variables}) and - assumptions (cf.\ \secref{sec:assumptions}), which will be available - as local facts during the proof and discharged into implications in - the result. Type and term variables are generalized as usual, - according to the context. - - The \isa{obtain} operation produces results by eliminating - existing facts by means of a given tactic. This acts like a dual - conclusion: the proof demonstrates that the context may be augmented - by certain fixed variables and assumptions. See also - \cite{isabelle-isar-ref} for the user-level \isa{{\isasymOBTAIN}} and - \isa{{\isasymGUESS}} elements. Final results, which may not refer to - the parameters in the conclusion, need to exported explicitly into - the original context.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{SUBPROOF}\verb|SUBPROOF: ({context: Proof.context, schematics: ctyp list * cterm list,|\isasep\isanewline% -\verb| params: cterm list, asms: cterm list, concl: cterm,|\isasep\isanewline% -\verb| prems: thm list} -> tactic) -> Proof.context -> int -> tactic| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML}{Goal.prove}\verb|Goal.prove: Proof.context -> string list -> term list -> term ->|\isasep\isanewline% -\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm| \\ - \indexdef{}{ML}{Goal.prove\_multi}\verb|Goal.prove_multi: Proof.context -> string list -> term list -> term list ->|\isasep\isanewline% -\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm list| \\ - \end{mldecls} - \begin{mldecls} - \indexdef{}{ML}{Obtain.result}\verb|Obtain.result: (Proof.context -> tactic) ->|\isasep\isanewline% -\verb| thm list -> Proof.context -> (cterm list * thm list) * Proof.context| \\ - \end{mldecls} - - \begin{description} - - \item \verb|SUBPROOF|~\isa{tac\ ctxt\ i} decomposes the structure - of the specified sub-goal, producing an extended context and a - reduced goal, which needs to be solved by the given tactic. All - schematic parameters of the goal are imported into the context as - fixed ones, which may not be instantiated in the sub-proof. - - \item \verb|Goal.prove|~\isa{ctxt\ xs\ As\ C\ tac} states goal \isa{C} in the context augmented by fixed variables \isa{xs} and - assumptions \isa{As}, and applies tactic \isa{tac} to solve - it. The latter may depend on the local assumptions being presented - as facts. The result is in HHF normal form. - - \item \verb|Goal.prove_multi| is simular to \verb|Goal.prove|, but - states several conclusions simultaneously. The goal is encoded by - means of Pure conjunction; \verb|Goal.conjunction_tac| will turn this - into a collection of individual subgoals. - - \item \verb|Obtain.result|~\isa{tac\ thms\ ctxt} eliminates the - given facts using a tactic, which results in additional fixed - variables and assumptions in the context. Final results need to be - exported explicitly. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/Tactic.tex --- a/doc-src/IsarImplementation/Thy/document/Tactic.tex Thu Mar 05 02:20:06 2009 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,497 +0,0 @@ -% -\begin{isabellebody}% -\def\isabellecontext{Tactic}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{theory}\isamarkupfalse% -\ Tactic\isanewline -\isakeyword{imports}\ Base\isanewline -\isakeyword{begin}% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -% -\isamarkupchapter{Tactical reasoning% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Tactical reasoning works by refining the initial claim in a - backwards fashion, until a solved form is reached. A \isa{goal} - consists of several subgoals that need to be solved in order to - achieve the main statement; zero subgoals means that the proof may - be finished. A \isa{tactic} is a refinement operation that maps - a goal to a lazy sequence of potential successors. A \isa{tactical} is a combinator for composing tactics.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isamarkupsection{Goals \label{sec:tactical-goals}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Isabelle/Pure represents a goal as a theorem stating that the - subgoals imply the main goal: \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}. The outermost goal structure is that of a Horn Clause: i.e.\ - an iterated implication without any quantifiers\footnote{Recall that - outermost \isa{{\isasymAnd}x{\isachardot}\ {\isasymphi}{\isacharbrackleft}x{\isacharbrackright}} is always represented via schematic - variables in the body: \isa{{\isasymphi}{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}. These variables may get - instantiated during the course of reasoning.}. For \isa{n\ {\isacharequal}\ {\isadigit{0}}} - a goal is called ``solved''. - - The structure of each subgoal \isa{A\isactrlsub i} is that of a - general Hereditary Harrop Formula \isa{{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ {\isasymAnd}x\isactrlsub k{\isachardot}\ H\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ H\isactrlsub m\ {\isasymLongrightarrow}\ B}. Here \isa{x\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlsub k} are goal parameters, i.e.\ - arbitrary-but-fixed entities of certain types, and \isa{H\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ H\isactrlsub m} are goal hypotheses, i.e.\ facts that may - be assumed locally. Together, this forms the goal context of the - conclusion \isa{B} to be established. The goal hypotheses may be - again arbitrary Hereditary Harrop Formulas, although the level of - nesting rarely exceeds 1--2 in practice. - - The main conclusion \isa{C} is internally marked as a protected - proposition, which is represented explicitly by the notation \isa{{\isacharhash}C}. This ensures that the decomposition into subgoals and main - conclusion is well-defined for arbitrarily structured claims. - - \medskip Basic goal management is performed via the following - Isabelle/Pure rules: - - \[ - \infer[\isa{{\isacharparenleft}init{\isacharparenright}}]{\isa{C\ {\isasymLongrightarrow}\ {\isacharhash}C}}{} \qquad - \infer[\isa{{\isacharparenleft}finish{\isacharparenright}}]{\isa{C}}{\isa{{\isacharhash}C}} - \] - - \medskip The following low-level variants admit general reasoning - with protected propositions: - - \[ - \infer[\isa{{\isacharparenleft}protect{\isacharparenright}}]{\isa{{\isacharhash}C}}{\isa{C}} \qquad - \infer[\isa{{\isacharparenleft}conclude{\isacharparenright}}]{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}}{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ {\isacharhash}C}} - \]% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{Goal.init}\verb|Goal.init: cterm -> thm| \\ - \indexdef{}{ML}{Goal.finish}\verb|Goal.finish: thm -> thm| \\ - \indexdef{}{ML}{Goal.protect}\verb|Goal.protect: thm -> thm| \\ - \indexdef{}{ML}{Goal.conclude}\verb|Goal.conclude: thm -> thm| \\ - \end{mldecls} - - \begin{description} - - \item \verb|Goal.init|~\isa{C} initializes a tactical goal from - the well-formed proposition \isa{C}. - - \item \verb|Goal.finish|~\isa{thm} checks whether theorem - \isa{thm} is a solved goal (no subgoals), and concludes the - result by removing the goal protection. - - \item \verb|Goal.protect|~\isa{thm} protects the full statement - of theorem \isa{thm}. - - \item \verb|Goal.conclude|~\isa{thm} removes the goal - protection, even if there are pending subgoals. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Tactics% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \isa{tactic} is a function \isa{goal\ {\isasymrightarrow}\ goal\isactrlsup {\isacharasterisk}\isactrlsup {\isacharasterisk}} that - maps a given goal state (represented as a theorem, cf.\ - \secref{sec:tactical-goals}) to a lazy sequence of potential - successor states. The underlying sequence implementation is lazy - both in head and tail, and is purely functional in \emph{not} - supporting memoing.\footnote{The lack of memoing and the strict - nature of SML requires some care when working with low-level - sequence operations, to avoid duplicate or premature evaluation of - results.} - - An \emph{empty result sequence} means that the tactic has failed: in - a compound tactic expressions other tactics might be tried instead, - or the whole refinement step might fail outright, producing a - toplevel error message. When implementing tactics from scratch, one - should take care to observe the basic protocol of mapping regular - error conditions to an empty result; only serious faults should - emerge as exceptions. - - By enumerating \emph{multiple results}, a tactic can easily express - the potential outcome of an internal search process. There are also - combinators for building proof tools that involve search - systematically, see also \secref{sec:tacticals}. - - \medskip As explained in \secref{sec:tactical-goals}, a goal state - essentially consists of a list of subgoals that imply the main goal - (conclusion). Tactics may operate on all subgoals or on a - particularly specified subgoal, but must not change the main - conclusion (apart from instantiating schematic goal variables). - - Tactics with explicit \emph{subgoal addressing} are of the form - \isa{int\ {\isasymrightarrow}\ tactic} and may be applied to a particular subgoal - (counting from 1). If the subgoal number is out of range, the - tactic should fail with an empty result sequence, but must not raise - an exception! - - Operating on a particular subgoal means to replace it by an interval - of zero or more subgoals in the same place; other subgoals must not - be affected, apart from instantiating schematic variables ranging - over the whole goal state. - - A common pattern of composing tactics with subgoal addressing is to - try the first one, and then the second one only if the subgoal has - not been solved yet. Special care is required here to avoid bumping - into unrelated subgoals that happen to come after the original - subgoal. Assuming that there is only a single initial subgoal is a - very common error when implementing tactics! - - Tactics with internal subgoal addressing should expose the subgoal - index as \isa{int} argument in full generality; a hardwired - subgoal 1 inappropriate. - - \medskip The main well-formedness conditions for proper tactics are - summarized as follows. - - \begin{itemize} - - \item General tactic failure is indicated by an empty result, only - serious faults may produce an exception. - - \item The main conclusion must not be changed, apart from - instantiating schematic variables. - - \item A tactic operates either uniformly on all subgoals, or - specifically on a selected subgoal (without bumping into unrelated - subgoals). - - \item Range errors in subgoal addressing produce an empty result. - - \end{itemize} - - Some of these conditions are checked by higher-level goal - infrastructure (\secref{sec:results}); others are not checked - explicitly, and violating them merely results in ill-behaved tactics - experienced by the user (e.g.\ tactics that insist in being - applicable only to singleton goals, or disallow composition with - basic tacticals).% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML type}{tactic}\verb|type tactic = thm -> thm Seq.seq| \\ - \indexdef{}{ML}{no\_tac}\verb|no_tac: tactic| \\ - \indexdef{}{ML}{all\_tac}\verb|all_tac: tactic| \\ - \indexdef{}{ML}{print\_tac}\verb|print_tac: string -> tactic| \\[1ex] - \indexdef{}{ML}{PRIMITIVE}\verb|PRIMITIVE: (thm -> thm) -> tactic| \\[1ex] - \indexdef{}{ML}{SUBGOAL}\verb|SUBGOAL: (term * int -> tactic) -> int -> tactic| \\ - \indexdef{}{ML}{CSUBGOAL}\verb|CSUBGOAL: (cterm * int -> tactic) -> int -> tactic| \\ - \end{mldecls} - - \begin{description} - - \item \verb|tactic| represents tactics. The well-formedness - conditions described above need to be observed. See also \hyperlink{file.~~/src/Pure/General/seq.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}General{\isacharslash}seq{\isachardot}ML}}}} for the underlying implementation of - lazy sequences. - - \item \verb|int -> tactic| represents tactics with explicit - subgoal addressing, with well-formedness conditions as described - above. - - \item \verb|no_tac| is a tactic that always fails, returning the - empty sequence. - - \item \verb|all_tac| is a tactic that always succeeds, returning a - singleton sequence with unchanged goal state. - - \item \verb|print_tac|~\isa{message} is like \verb|all_tac|, but - prints a message together with the goal state on the tracing - channel. - - \item \verb|PRIMITIVE|~\isa{rule} turns a primitive inference rule - into a tactic with unique result. Exception \verb|THM| is considered - a regular tactic failure and produces an empty result; other - exceptions are passed through. - - \item \verb|SUBGOAL|~\isa{{\isacharparenleft}fn\ {\isacharparenleft}subgoal{\isacharcomma}\ i{\isacharparenright}\ {\isacharequal}{\isachargreater}\ tactic{\isacharparenright}} is the - most basic form to produce a tactic with subgoal addressing. The - given abstraction over the subgoal term and subgoal number allows to - peek at the relevant information of the full goal state. The - subgoal range is checked as required above. - - \item \verb|CSUBGOAL| is similar to \verb|SUBGOAL|, but passes the - subgoal as \verb|cterm| instead of raw \verb|term|. This - avoids expensive re-certification in situations where the subgoal is - used directly for primitive inferences. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Resolution and assumption tactics \label{sec:resolve-assume-tac}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -\emph{Resolution} is the most basic mechanism for refining a - subgoal using a theorem as object-level rule. - \emph{Elim-resolution} is particularly suited for elimination rules: - it resolves with a rule, proves its first premise by assumption, and - finally deletes that assumption from any new subgoals. - \emph{Destruct-resolution} is like elim-resolution, but the given - destruction rules are first turned into canonical elimination - format. \emph{Forward-resolution} is like destruct-resolution, but - without deleting the selected assumption. The \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f} - naming convention is maintained for several different kinds of - resolution rules and tactics. - - Assumption tactics close a subgoal by unifying some of its premises - against its conclusion. - - \medskip All the tactics in this section operate on a subgoal - designated by a positive integer. Other subgoals might be affected - indirectly, due to instantiation of schematic variables. - - There are various sources of non-determinism, the tactic result - sequence enumerates all possibilities of the following choices (if - applicable): - - \begin{enumerate} - - \item selecting one of the rules given as argument to the tactic; - - \item selecting a subgoal premise to eliminate, unifying it against - the first premise of the rule; - - \item unifying the conclusion of the subgoal to the conclusion of - the rule. - - \end{enumerate} - - Recall that higher-order unification may produce multiple results - that are enumerated here.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{resolve\_tac}\verb|resolve_tac: thm list -> int -> tactic| \\ - \indexdef{}{ML}{eresolve\_tac}\verb|eresolve_tac: thm list -> int -> tactic| \\ - \indexdef{}{ML}{dresolve\_tac}\verb|dresolve_tac: thm list -> int -> tactic| \\ - \indexdef{}{ML}{forward\_tac}\verb|forward_tac: thm list -> int -> tactic| \\[1ex] - \indexdef{}{ML}{assume\_tac}\verb|assume_tac: int -> tactic| \\ - \indexdef{}{ML}{eq\_assume\_tac}\verb|eq_assume_tac: int -> tactic| \\[1ex] - \indexdef{}{ML}{match\_tac}\verb|match_tac: thm list -> int -> tactic| \\ - \indexdef{}{ML}{ematch\_tac}\verb|ematch_tac: thm list -> int -> tactic| \\ - \indexdef{}{ML}{dmatch\_tac}\verb|dmatch_tac: thm list -> int -> tactic| \\ - \end{mldecls} - - \begin{description} - - \item \verb|resolve_tac|~\isa{thms\ i} refines the goal state - using the given theorems, which should normally be introduction - rules. The tactic resolves a rule's conclusion with subgoal \isa{i}, replacing it by the corresponding versions of the rule's - premises. - - \item \verb|eresolve_tac|~\isa{thms\ i} performs elim-resolution - with the given theorems, which should normally be elimination rules. - - \item \verb|dresolve_tac|~\isa{thms\ i} performs - destruct-resolution with the given theorems, which should normally - be destruction rules. This replaces an assumption by the result of - applying one of the rules. - - \item \verb|forward_tac| is like \verb|dresolve_tac| except that the - selected assumption is not deleted. It applies a rule to an - assumption, adding the result as a new assumption. - - \item \verb|assume_tac|~\isa{i} attempts to solve subgoal \isa{i} - by assumption (modulo higher-order unification). - - \item \verb|eq_assume_tac| is similar to \verb|assume_tac|, but checks - only for immediate \isa{{\isasymalpha}}-convertibility instead of using - unification. It succeeds (with a unique next state) if one of the - assumptions is equal to the subgoal's conclusion. Since it does not - instantiate variables, it cannot make other subgoals unprovable. - - \item \verb|match_tac|, \verb|ematch_tac|, and \verb|dmatch_tac| are - similar to \verb|resolve_tac|, \verb|eresolve_tac|, and \verb|dresolve_tac|, respectively, but do not instantiate schematic - variables in the goal state. - - Flexible subgoals are not updated at will, but are left alone. - Strictly speaking, matching means to treat the unknowns in the goal - state as constants; these tactics merely discard unifiers that would - update the goal state. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsubsection{Explicit instantiation within a subgoal context% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -The main resolution tactics (\secref{sec:resolve-assume-tac}) - use higher-order unification, which works well in many practical - situations despite its daunting theoretical properties. - Nonetheless, there are important problem classes where unguided - higher-order unification is not so useful. This typically involves - rules like universal elimination, existential introduction, or - equational substitution. Here the unification problem involves - fully flexible \isa{{\isacharquery}P\ {\isacharquery}x} schemes, which are hard to manage - without further hints. - - By providing a (small) rigid term for \isa{{\isacharquery}x} explicitly, the - remaining unification problem is to assign a (large) term to \isa{{\isacharquery}P}, according to the shape of the given subgoal. This is - sufficiently well-behaved in most practical situations. - - \medskip Isabelle provides separate versions of the standard \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f} resolution tactics that allow to provide explicit - instantiations of unknowns of the given rule, wrt.\ terms that refer - to the implicit context of the selected subgoal. - - An instantiation consists of a list of pairs of the form \isa{{\isacharparenleft}{\isacharquery}x{\isacharcomma}\ t{\isacharparenright}}, where \isa{{\isacharquery}x} is a schematic variable occurring in - the given rule, and \isa{t} is a term from the current proof - context, augmented by the local goal parameters of the selected - subgoal; cf.\ the \isa{focus} operation described in - \secref{sec:variables}. - - Entering the syntactic context of a subgoal is a brittle operation, - because its exact form is somewhat accidental, and the choice of - bound variable names depends on the presence of other local and - global names. Explicit renaming of subgoal parameters prior to - explicit instantiation might help to achieve a bit more robustness. - - Type instantiations may be given as well, via pairs like \isa{{\isacharparenleft}{\isacharquery}{\isacharprime}a{\isacharcomma}\ {\isasymtau}{\isacharparenright}}. Type instantiations are distinguished from term - instantiations by the syntactic form of the schematic variable. - Types are instantiated before terms are. Since term instantiation - already performs type-inference as expected, explicit type - instantiations are seldom necessary.% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimmlref -% -\endisadelimmlref -% -\isatagmlref -% -\begin{isamarkuptext}% -\begin{mldecls} - \indexdef{}{ML}{res\_inst\_tac}\verb|res_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\ - \indexdef{}{ML}{eres\_inst\_tac}\verb|eres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\ - \indexdef{}{ML}{dres\_inst\_tac}\verb|dres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\ - \indexdef{}{ML}{forw\_inst\_tac}\verb|forw_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\[1ex] - \indexdef{}{ML}{rename\_tac}\verb|rename_tac: string list -> int -> tactic| \\ - \end{mldecls} - - \begin{description} - - \item \verb|res_inst_tac|~\isa{ctxt\ insts\ thm\ i} instantiates the - rule \isa{thm} with the instantiations \isa{insts}, as described - above, and then performs resolution on subgoal \isa{i}. - - \item \verb|eres_inst_tac| is like \verb|res_inst_tac|, but performs - elim-resolution. - - \item \verb|dres_inst_tac| is like \verb|res_inst_tac|, but performs - destruct-resolution. - - \item \verb|forw_inst_tac| is like \verb|dres_inst_tac| except that - the selected assumption is not deleted. - - \item \verb|rename_tac|~\isa{names\ i} renames the innermost - parameters of subgoal \isa{i} according to the provided \isa{names} (which need to be distinct indentifiers). - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% -\endisatagmlref -{\isafoldmlref}% -% -\isadelimmlref -% -\endisadelimmlref -% -\isamarkupsection{Tacticals \label{sec:tacticals}% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -A \emph{tactical} is a functional combinator for building up complex - tactics from simpler ones. Typical tactical perform sequential - composition, disjunction (choice), iteration, or goal addressing. - Various search strategies may be expressed via tacticals. - - \medskip FIXME% -\end{isamarkuptext}% -\isamarkuptrue% -% -\isadelimtheory -% -\endisadelimtheory -% -\isatagtheory -\isacommand{end}\isamarkupfalse% -% -\endisatagtheory -{\isafoldtheory}% -% -\isadelimtheory -% -\endisadelimtheory -\isanewline -\end{isabellebody}% -%%% Local Variables: -%%% mode: latex -%%% TeX-master: "root" -%%% End: diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/Thy/document/session.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/implementation.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarImplementation/style.sty diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/IsaMakefile diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/HOL_Specific.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Introduction.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Outer_Syntax.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Proof.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Quick_Reference.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/ROOT.ML diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Spec.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/Symbols.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Generic.tex --- a/doc-src/IsarRef/Thy/document/Generic.tex Thu Mar 05 02:20:06 2009 +0100 +++ b/doc-src/IsarRef/Thy/document/Generic.tex Thu Mar 05 02:24:36 2009 +0100 @@ -503,7 +503,7 @@ \item \hyperlink{command.simproc-setup}{\mbox{\isa{\isacommand{simproc{\isacharunderscore}setup}}}} defines a named simplification procedure that is invoked by the Simplifier whenever any of the given term patterns match the current redex. The implementation, - which is provided as ML source text, needs to be of type \verb|"morphism -> simpset -> cterm -> thm option"|, where the \verb|cterm| represents the current redex \isa{r} and the result is + which is provided as ML source text, needs to be of type \verb|morphism -> simpset -> cterm -> thm option|, where the \verb|cterm| represents the current redex \isa{r} and the result is supposed to be some proven rewrite rule \isa{{\isachardoublequote}r\ {\isasymequiv}\ r{\isacharprime}{\isachardoublequote}} (or a generalized version), or \verb|NONE| to indicate failure. The \verb|simpset| argument holds the full context of the current diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Inner_Syntax.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Introduction.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Outer_Syntax.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Proof.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Quick_Reference.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Spec.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/Thy/document/Symbols.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/isar-ref.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/IsarRef/style.sty diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/Ref/ref.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/TutorialI/Types/Numbers.thy diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/TutorialI/Types/document/Numbers.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/TutorialI/Types/numerics.tex diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/antiquote_setup.ML diff -r 61811c9224a6 -r dcf30c9861c3 doc-src/manual.bib diff -r 61811c9224a6 -r dcf30c9861c3 doc/Contents diff -r 61811c9224a6 -r dcf30c9861c3 etc/settings diff -r 61811c9224a6 -r dcf30c9861c3 lib/Tools/codegen diff -r 61811c9224a6 -r dcf30c9861c3 src/FOL/IFOL.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/FOL/IsaMakefile diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Algebra/Exponent.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Decision_Procs/MIR.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Decision_Procs/cooper_tac.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Decision_Procs/ferrack_tac.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Decision_Procs/mir_tac.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Deriv.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Divides.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Fact.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Finite_Set.thy --- a/src/HOL/Finite_Set.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Finite_Set.thy Thu Mar 05 02:24:36 2009 +0100 @@ -878,9 +878,54 @@ fold_image times g 1 A * fold_image times h 1 A" by (erule finite_induct) (simp_all add: mult_ac) +lemma fold_image_related: + assumes Re: "R e e" + and Rop: "\x1 y1 x2 y2. R x1 x2 \ R y1 y2 \ R (x1 * y1) (x2 * y2)" + and fS: "finite S" and Rfg: "\x\S. R (h x) (g x)" + shows "R (fold_image (op *) h e S) (fold_image (op *) g e S)" + using fS by (rule finite_subset_induct) (insert assms, auto) + +lemma fold_image_eq_general: + assumes fS: "finite S" + and h: "\y\S'. \!x. x\ S \ h(x) = y" + and f12: "\x\S. h x \ S' \ f2(h x) = f1 x" + shows "fold_image (op *) f1 e S = fold_image (op *) f2 e S'" +proof- + from h f12 have hS: "h ` S = S'" by auto + {fix x y assume H: "x \ S" "y \ S" "h x = h y" + from f12 h H have "x = y" by auto } + hence hinj: "inj_on h S" unfolding inj_on_def Ex1_def by blast + from f12 have th: "\x. x \ S \ (f2 \ h) x = f1 x" by auto + from hS have "fold_image (op *) f2 e S' = fold_image (op *) f2 e (h ` S)" by simp + also have "\ = fold_image (op *) (f2 o h) e S" + using fold_image_reindex[OF fS hinj, of f2 e] . + also have "\ = fold_image (op *) f1 e S " using th fold_image_cong[OF fS, of "f2 o h" f1 e] + by blast + finally show ?thesis .. +qed + +lemma fold_image_eq_general_inverses: + assumes fS: "finite S" + and kh: "\y. y \ T \ k y \ S \ h (k y) = y" + and hk: "\x. x \ S \ h x \ T \ k (h x) = x \ g (h x) = f x" + shows "fold_image (op *) f e S = fold_image (op *) g e T" + (* metis solves it, but not yet available here *) + apply (rule fold_image_eq_general[OF fS, of T h g f e]) + apply (rule ballI) + apply (frule kh) + apply (rule ex1I[]) + apply blast + apply clarsimp + apply (drule hk) apply simp + apply (rule sym) + apply (erule conjunct1[OF conjunct2[OF hk]]) + apply (rule ballI) + apply (drule hk) + apply blast + done + end - subsection {* Generalized summation over a set *} interpretation comm_monoid_add!: comm_monoid_mult "0::'a::comm_monoid_add" "op +" @@ -1092,6 +1137,31 @@ using setsum_delta[OF fS, of a b, symmetric] by (auto intro: setsum_cong) +lemma setsum_restrict_set: + assumes fA: "finite A" + shows "setsum f (A \ B) = setsum (\x. if x \ B then f x else 0) A" +proof- + from fA have fab: "finite (A \ B)" by auto + have aba: "A \ B \ A" by blast + let ?g = "\x. if x \ A\B then f x else 0" + from setsum_mono_zero_left[OF fA aba, of ?g] + show ?thesis by simp +qed + +lemma setsum_cases: + assumes fA: "finite A" + shows "setsum (\x. if x \ B then f x else g x) A = + setsum f (A \ B) + setsum g (A \ - B)" +proof- + have a: "A = A \ B \ A \ -B" "(A \ B) \ (A \ -B) = {}" + by blast+ + from fA + have f: "finite (A \ B)" "finite (A \ -B)" by auto + let ?g = "\x. if x \ B then f x else g x" + from setsum_Un_disjoint[OF f a(2), of ?g] a(1) + show ?thesis by simp +qed + (*But we can't get rid of finite I. If infinite, although the rhs is 0, the lhs need not be, since UNION I A could still be finite.*) @@ -1158,6 +1228,62 @@ setsum f A + setsum f B - setsum f (A Int B)" by (subst setsum_Un_Int [symmetric], auto simp add: algebra_simps) +lemma (in comm_monoid_mult) fold_image_1: "finite S \ (\x\S. f x = 1) \ fold_image op * f 1 S = 1" + apply (induct set: finite) + apply simp by (auto simp add: fold_image_insert) + +lemma (in comm_monoid_mult) fold_image_Un_one: + assumes fS: "finite S" and fT: "finite T" + and I0: "\x \ S\T. f x = 1" + shows "fold_image (op *) f 1 (S \ T) = fold_image (op *) f 1 S * fold_image (op *) f 1 T" +proof- + have "fold_image op * f 1 (S \ T) = 1" + apply (rule fold_image_1) + using fS fT I0 by auto + with fold_image_Un_Int[OF fS fT] show ?thesis by simp +qed + +lemma setsum_eq_general_reverses: + assumes fS: "finite S" and fT: "finite T" + and kh: "\y. y \ T \ k y \ S \ h (k y) = y" + and hk: "\x. x \ S \ h x \ T \ k (h x) = x \ g (h x) = f x" + shows "setsum f S = setsum g T" + apply (simp add: setsum_def fS fT) + apply (rule comm_monoid_add.fold_image_eq_general_inverses[OF fS]) + apply (erule kh) + apply (erule hk) + done + + + +lemma setsum_Un_zero: + assumes fS: "finite S" and fT: "finite T" + and I0: "\x \ S\T. f x = 0" + shows "setsum f (S \ T) = setsum f S + setsum f T" + using fS fT + apply (simp add: setsum_def) + apply (rule comm_monoid_add.fold_image_Un_one) + using I0 by auto + + +lemma setsum_UNION_zero: + assumes fS: "finite S" and fSS: "\T \ S. finite T" + and f0: "\T1 T2 x. T1\S \ T2\S \ T1 \ T2 \ x \ T1 \ x \ T2 \ f x = 0" + shows "setsum f (\S) = setsum (\T. setsum f T) S" + using fSS f0 +proof(induct rule: finite_induct[OF fS]) + case 1 thus ?case by simp +next + case (2 T F) + then have fTF: "finite T" "\T\F. finite T" "finite F" and TF: "T \ F" + and H: "setsum f (\ F) = setsum (setsum f) F" by (auto simp add: finite_insert) + from fTF have fUF: "finite (\F)" by (auto intro: finite_Union) + from "2.prems" TF fTF + show ?case + by (auto simp add: H[symmetric] intro: setsum_Un_zero[OF fTF(1) fUF, of f]) +qed + + lemma setsum_diff1_nat: "(setsum f (A - {a}) :: nat) = (if a:A then setsum f A - f a else setsum f A)" apply (case_tac "finite A") @@ -1539,6 +1665,15 @@ by (erule eq[symmetric]) qed +lemma setprod_Un_one: + assumes fS: "finite S" and fT: "finite T" + and I0: "\x \ S\T. f x = 1" + shows "setprod f (S \ T) = setprod f S * setprod f T" + using fS fT + apply (simp add: setprod_def) + apply (rule fold_image_Un_one) + using I0 by auto + lemma setprod_1: "setprod (%i. 1) A = 1" apply (case_tac "finite A") diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/GCD.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Groebner_Basis.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/HOL.thy --- a/src/HOL/HOL.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/HOL.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1709,6 +1709,11 @@ subsection {* Nitpick theorem store *} ML {* +structure Nitpick_Const_Def_Thms = NamedThmsFun +( + val name = "nitpick_const_def" + val description = "alternative definitions of constants as needed by Nitpick" +) structure Nitpick_Const_Simp_Thms = NamedThmsFun ( val name = "nitpick_const_simp" @@ -1725,7 +1730,8 @@ val description = "introduction rules for (co)inductive predicates as needed by Nitpick" ) *} -setup {* Nitpick_Const_Simp_Thms.setup +setup {* Nitpick_Const_Def_Thms.setup + #> Nitpick_Const_Simp_Thms.setup #> Nitpick_Const_Psimp_Thms.setup #> Nitpick_Ind_Intro_Thms.setup *} diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Int.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/IntDiv.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/IsaMakefile --- a/src/HOL/IsaMakefile Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/IsaMakefile Thu Mar 05 02:24:36 2009 +0100 @@ -314,7 +314,7 @@ Library/Euclidean_Space.thy Library/Glbs.thy Library/normarith.ML \ Library/Executable_Set.thy Library/Infinite_Set.thy \ Library/FuncSet.thy Library/Permutations.thy Library/Determinants.thy\ - Library/Bit.thy \ + Library/Bit.thy Library/Topology_Euclidean_Space.thy \ Library/Finite_Cartesian_Product.thy \ Library/FrechetDeriv.thy \ Library/Fundamental_Theorem_Algebra.thy \ diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Code_Index.thy --- a/src/HOL/Library/Code_Index.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Code_Index.thy Thu Mar 05 02:24:36 2009 +0100 @@ -87,12 +87,14 @@ then show "P k" by simp qed simp_all -lemmas [code del] = index.recs index.cases - declare index_case [case_names nat, cases type: index] declare index.induct [case_names nat, induct type: index] -lemma [code]: +lemma index_decr [termination_simp]: + "k \ Code_Index.of_nat 0 \ Code_Index.nat_of k - Suc 0 < Code_Index.nat_of k" + by (cases k) simp + +lemma [simp, code]: "index_size = nat_of" proof (rule ext) fix k @@ -102,7 +104,7 @@ finally show "index_size k = nat_of k" . qed -lemma [code]: +lemma [simp, code]: "size = nat_of" proof (rule ext) fix k @@ -110,6 +112,8 @@ by (induct k) (simp_all del: zero_index_def Suc_index_def, simp_all) qed +lemmas [code del] = index.recs index.cases + lemma [code]: "eq_class.eq k l \ eq_class.eq (nat_of k) (nat_of l)" by (cases k, cases l) (simp add: eq) diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Determinants.thy --- a/src/HOL/Library/Determinants.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Determinants.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1,5 +1,4 @@ (* Title: Determinants - ID: $Id: Author: Amine Chaieb, University of Cambridge *) @@ -176,7 +175,7 @@ from ld[OF i(1) piU i(2)] i(1) have ex:"\i \ ?U. A$i$p i = 0" by blast from setprod_zero[OF fU ex] have "?pp p = 0" by simp} then have p0: "\p \ ?PU -{id}. ?pp p = 0" by blast - from setsum_superset[OF fPU id0 p0] show ?thesis + from setsum_mono_zero_cong_left[OF fPU id0 p0] show ?thesis unfolding det_def by (simp add: sign_id) qed @@ -199,7 +198,7 @@ from ld[OF i(1) piU i(2)] i(1) have ex:"\i \ ?U. A$i$p i = 0" by blast from setprod_zero[OF fU ex] have "?pp p = 0" by simp} then have p0: "\p \ ?PU -{id}. ?pp p = 0" by blast - from setsum_superset[OF fPU id0 p0] show ?thesis + from setsum_mono_zero_cong_left[OF fPU id0 p0] show ?thesis unfolding det_def by (simp add: sign_id) qed @@ -750,8 +749,8 @@ have "det (A**B) = setsum (\f. det (\ i. A $ i $ f i *s B $ f i)) ?F" unfolding matrix_mul_setsum_alt det_linear_rows_setsum[OF fU] .. also have "\ = setsum (\f. det (\ i. A$i$f i *s B$f i)) ?PU" - unfolding setsum_superset[OF fF PUF zth, symmetric] - unfolding det_rows_mul .. + using setsum_mono_zero_cong_left[OF fF PUF zth, symmetric] + unfolding det_rows_mul by auto finally show ?thesis unfolding th2 . qed diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Euclidean_Space.thy --- a/src/HOL/Library/Euclidean_Space.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Euclidean_Space.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1,5 +1,4 @@ (* Title: Library/Euclidean_Space - ID: $Id: Author: Amine Chaieb, University of Cambridge *) @@ -626,7 +625,7 @@ ultimately show ?thesis by metis qed -lemma dot_pos_lt: "(0 < x \ x) \ (x::'a::{ordered_ring_strict,ring_no_zero_divisors} ^ 'n) \ 0" using dot_eq_0[of x] dot_pos_le[of x] +lemma dot_pos_lt[simp]: "(0 < x \ x) \ (x::'a::{ordered_ring_strict,ring_no_zero_divisors} ^ 'n) \ 0" using dot_eq_0[of x] dot_pos_le[of x] by (auto simp add: le_less) subsection{* The collapse of the general concepts to dimension one. *} @@ -759,10 +758,10 @@ text{* Hence derive more interesting properties of the norm. *} -lemma norm_0: "norm (0::real ^ 'n) = 0" +lemma norm_0[simp]: "norm (0::real ^ 'n) = 0" by (rule norm_zero) -lemma norm_mul: "norm(a *s x) = abs(a) * norm x" +lemma norm_mul[simp]: "norm(a *s x) = abs(a) * norm x" by (simp add: vector_norm_def vector_component setL2_right_distrib abs_mult cong: strong_setL2_cong) lemma norm_eq_0_dot: "(norm x = 0) \ (x \ x = (0::real))" @@ -772,11 +771,11 @@ lemma norm_pow_2: "norm x ^ 2 = x \ x" by (simp add: real_vector_norm_def) lemma norm_eq_0_imp: "norm x = 0 ==> x = (0::real ^'n)" by (metis norm_eq_zero) -lemma vector_mul_eq_0: "(a *s x = 0) \ a = (0::'a::idom) \ x = 0" +lemma vector_mul_eq_0[simp]: "(a *s x = 0) \ a = (0::'a::idom) \ x = 0" by vector -lemma vector_mul_lcancel: "a *s x = a *s y \ a = (0::real) \ x = y" +lemma vector_mul_lcancel[simp]: "a *s x = a *s y \ a = (0::real) \ x = y" by (metis eq_iff_diff_eq_0 vector_mul_eq_0 vector_ssub_ldistrib) -lemma vector_mul_rcancel: "a *s x = b *s x \ (a::real) = b \ x = 0" +lemma vector_mul_rcancel[simp]: "a *s x = b *s x \ (a::real) = b \ x = 0" by (metis eq_iff_diff_eq_0 vector_mul_eq_0 vector_sub_rdistrib) lemma vector_mul_lcancel_imp: "a \ (0::real) ==> a *s x = a *s y ==> (x = y)" by (metis vector_mul_lcancel) @@ -814,28 +813,6 @@ lemma norm_triangle_lt: "norm(x::real ^'n) + norm(y) < e ==> norm(x + y) < e" by (metis basic_trans_rules(21) norm_triangle_ineq) -lemma setsum_delta: - assumes fS: "finite S" - shows "setsum (\k. if k=a then b k else 0) S = (if a \ S then b a else 0)" -proof- - let ?f = "(\k. if k=a then b k else 0)" - {assume a: "a \ S" - hence "\ k\ S. ?f k = 0" by simp - hence ?thesis using a by simp} - moreover - {assume a: "a \ S" - let ?A = "S - {a}" - let ?B = "{a}" - have eq: "S = ?A \ ?B" using a by blast - have dj: "?A \ ?B = {}" by simp - from fS have fAB: "finite ?A" "finite ?B" by auto - have "setsum ?f S = setsum ?f ?A + setsum ?f ?B" - using setsum_Un_disjoint[OF fAB dj, of ?f, unfolded eq[symmetric]] - by simp - then have ?thesis using a by simp} - ultimately show ?thesis by blast -qed - lemma component_le_norm: "i \ {1 .. dimindex(UNIV :: 'n set)} ==> \x$i\ <= norm (x::real ^ 'n)" apply (simp add: vector_norm_def) apply (rule member_le_setL2, simp_all) @@ -852,7 +829,7 @@ lemma norm_le_l1: "norm (x:: real ^'n) <= setsum(\i. \x$i\) {1..dimindex(UNIV::'n set)}" by (simp add: vector_norm_def setL2_le_setsum) -lemma real_abs_norm: "\ norm x\ = norm (x :: real ^'n)" +lemma real_abs_norm[simp]: "\ norm x\ = norm (x :: real ^'n)" by (rule abs_norm_cancel) lemma real_abs_sub_norm: "\norm(x::real ^'n) - norm y\ <= norm(x - y)" by (rule norm_triangle_ineq3) @@ -929,6 +906,7 @@ apply simp_all done + (* FIXME: Move all these theorems into the ML code using lemma antiquotation *) lemma norm_add_rule_thm: "b1 >= norm(x1 :: real ^'n) \ b2 >= norm(x2) ==> b1 + b2 >= norm(x1 + x2)" apply (rule norm_triangle_le) by simp @@ -977,17 +955,17 @@ text{* Hence more metric properties. *} -lemma dist_refl: "dist x x = 0" by norm +lemma dist_refl[simp]: "dist x x = 0" by norm lemma dist_sym: "dist x y = dist y x"by norm -lemma dist_pos_le: "0 <= dist x y" by norm +lemma dist_pos_le[simp]: "0 <= dist x y" by norm lemma dist_triangle: "dist x z <= dist x y + dist y z" by norm lemma dist_triangle_alt: "dist y z <= dist x y + dist x z" by norm -lemma dist_eq_0: "dist x y = 0 \ x = y" by norm +lemma dist_eq_0[simp]: "dist x y = 0 \ x = y" by norm lemma dist_pos_lt: "x \ y ==> 0 < dist x y" by norm lemma dist_nz: "x \ y \ 0 < dist x y" by norm @@ -1003,12 +981,12 @@ lemma dist_triangle_add: "dist (x + y) (x' + y') <= dist x x' + dist y y'" by norm -lemma dist_mul: "dist (c *s x) (c *s y) = \c\ * dist x y" +lemma dist_mul[simp]: "dist (c *s x) (c *s y) = \c\ * dist x y" unfolding dist_def vector_ssub_ldistrib[symmetric] norm_mul .. lemma dist_triangle_add_half: " dist x x' < e / 2 \ dist y y' < e / 2 ==> dist(x + y) (x' + y') < e" by norm -lemma dist_le_0: "dist x y <= 0 \ x = y" by norm +lemma dist_le_0[simp]: "dist x y <= 0 \ x = y" by norm lemma setsum_eq: "setsum f S = (\ i. setsum (\x. (f x)$i ) S)" apply vector @@ -1035,47 +1013,6 @@ shows "(setsum f S)$i = setsum (\x. (f x)$i) S" using i by (simp add: setsum_eq Cart_lambda_beta) - (* This needs finiteness assumption due to the definition of fold!!! *) - -lemma setsum_superset: - assumes fb: "finite B" and ab: "A \ B" - and f0: "\x \ B - A. f x = 0" - shows "setsum f B = setsum f A" -proof- - from ab fb have fa: "finite A" by (metis finite_subset) - from fb have fba: "finite (B - A)" by (metis finite_Diff) - have d: "A \ (B - A) = {}" by blast - from ab have b: "B = A \ (B - A)" by blast - from setsum_Un_disjoint[OF fa fba d, of f] b - setsum_0'[OF f0] - show "setsum f B = setsum f A" by simp -qed - -lemma setsum_restrict_set: - assumes fA: "finite A" - shows "setsum f (A \ B) = setsum (\x. if x \ B then f x else 0) A" -proof- - from fA have fab: "finite (A \ B)" by auto - have aba: "A \ B \ A" by blast - let ?g = "\x. if x \ A\B then f x else 0" - from setsum_superset[OF fA aba, of ?g] - show ?thesis by simp -qed - -lemma setsum_cases: - assumes fA: "finite A" - shows "setsum (\x. if x \ B then f x else g x) A = - setsum f (A \ B) + setsum g (A \ - B)" -proof- - have a: "A = A \ B \ A \ -B" "(A \ B) \ (A \ -B) = {}" - by blast+ - from fA - have f: "finite (A \ B)" "finite (A \ -B)" by auto - let ?g = "\x. if x \ B then f x else g x" - from setsum_Un_disjoint[OF f a(2), of ?g] a(1) - show ?thesis by simp -qed - lemma setsum_norm: fixes f :: "'a \ 'b::real_normed_vector" assumes fS: "finite S" @@ -1173,41 +1110,6 @@ from setsum_Un_disjoint[of "?A" "?B" f] eq d show ?thesis by auto qed -lemma setsum_reindex_nonzero: - assumes fS: "finite S" - and nz: "\ x y. x \ S \ y \ S \ x \ y \ f x = f y \ h (f x) = 0" - shows "setsum h (f ` S) = setsum (h o f) S" -using nz -proof(induct rule: finite_induct[OF fS]) - case 1 thus ?case by simp -next - case (2 x F) - {assume fxF: "f x \ f ` F" hence "\y \ F . f y = f x" by auto - then obtain y where y: "y \ F" "f x = f y" by auto - from "2.hyps" y have xy: "x \ y" by auto - - from "2.prems"[of x y] "2.hyps" xy y have h0: "h (f x) = 0" by simp - have "setsum h (f ` insert x F) = setsum h (f ` F)" using fxF by auto - also have "\ = setsum (h o f) (insert x F)" - using "2.hyps" "2.prems" h0 by auto - finally have ?case .} - moreover - {assume fxF: "f x \ f ` F" - have "setsum h (f ` insert x F) = h (f x) + setsum h (f ` F)" - using fxF "2.hyps" by simp - also have "\ = setsum (h o f) (insert x F)" - using "2.hyps" "2.prems" fxF - apply auto apply metis done - finally have ?case .} - ultimately show ?case by blast -qed - -lemma setsum_Un_nonzero: - assumes fS: "finite S" and fF: "finite F" - and f: "\ x\ S \ F . f x = (0::'a::ab_group_add)" - shows "setsum f (S \ F) = setsum f S + setsum f F" - using setsum_Un[OF fS fF, of f] setsum_0'[OF f] by simp - lemma setsum_natinterval_left: assumes mn: "(m::nat) <= n" shows "setsum f {m..n} = f m + setsum f {m + 1..n}" @@ -1249,109 +1151,9 @@ shows "setsum (\y. setsum g {x. x\ S \ f x = y}) T = setsum g S" apply (subst setsum_image_gen[OF fS, of g f]) -apply (rule setsum_superset[OF fT fST]) +apply (rule setsum_mono_zero_right[OF fT fST]) by (auto intro: setsum_0') -(* FIXME: Change the name to fold_image\ *) -lemma (in comm_monoid_mult) fold_1': "finite S \ (\x\S. f x = 1) \ fold_image op * f 1 S = 1" - apply (induct set: finite) - apply simp by (auto simp add: fold_image_insert) - -lemma (in comm_monoid_mult) fold_union_nonzero: - assumes fS: "finite S" and fT: "finite T" - and I0: "\x \ S\T. f x = 1" - shows "fold_image (op *) f 1 (S \ T) = fold_image (op *) f 1 S * fold_image (op *) f 1 T" -proof- - have "fold_image op * f 1 (S \ T) = 1" - apply (rule fold_1') - using fS fT I0 by auto - with fold_image_Un_Int[OF fS fT] show ?thesis by simp -qed - -lemma setsum_union_nonzero: - assumes fS: "finite S" and fT: "finite T" - and I0: "\x \ S\T. f x = 0" - shows "setsum f (S \ T) = setsum f S + setsum f T" - using fS fT - apply (simp add: setsum_def) - apply (rule comm_monoid_add.fold_union_nonzero) - using I0 by auto - -lemma setprod_union_nonzero: - assumes fS: "finite S" and fT: "finite T" - and I0: "\x \ S\T. f x = 1" - shows "setprod f (S \ T) = setprod f S * setprod f T" - using fS fT - apply (simp add: setprod_def) - apply (rule fold_union_nonzero) - using I0 by auto - -lemma setsum_unions_nonzero: - assumes fS: "finite S" and fSS: "\T \ S. finite T" - and f0: "\T1 T2 x. T1\S \ T2\S \ T1 \ T2 \ x \ T1 \ x \ T2 \ f x = 0" - shows "setsum f (\S) = setsum (\T. setsum f T) S" - using fSS f0 -proof(induct rule: finite_induct[OF fS]) - case 1 thus ?case by simp -next - case (2 T F) - then have fTF: "finite T" "\T\F. finite T" "finite F" and TF: "T \ F" - and H: "setsum f (\ F) = setsum (setsum f) F" by (auto simp add: finite_insert) - from fTF have fUF: "finite (\F)" by (auto intro: finite_Union) - from "2.prems" TF fTF - show ?case - by (auto simp add: H[symmetric] intro: setsum_union_nonzero[OF fTF(1) fUF, of f]) -qed - - (* FIXME : Copied from Pocklington --- should be moved to Finite_Set!!!!!!!! *) - - -lemma (in comm_monoid_mult) fold_related: - assumes Re: "R e e" - and Rop: "\x1 y1 x2 y2. R x1 x2 \ R y1 y2 \ R (x1 * y1) (x2 * y2)" - and fS: "finite S" and Rfg: "\x\S. R (h x) (g x)" - shows "R (fold_image (op *) h e S) (fold_image (op *) g e S)" - using fS by (rule finite_subset_induct) (insert assms, auto) - - (* FIXME: I think we can get rid of the finite assumption!! *) -lemma (in comm_monoid_mult) - fold_eq_general: - assumes fS: "finite S" - and h: "\y\S'. \!x. x\ S \ h(x) = y" - and f12: "\x\S. h x \ S' \ f2(h x) = f1 x" - shows "fold_image (op *) f1 e S = fold_image (op *) f2 e S'" -proof- - from h f12 have hS: "h ` S = S'" by auto - {fix x y assume H: "x \ S" "y \ S" "h x = h y" - from f12 h H have "x = y" by auto } - hence hinj: "inj_on h S" unfolding inj_on_def Ex1_def by blast - from f12 have th: "\x. x \ S \ (f2 \ h) x = f1 x" by auto - from hS have "fold_image (op *) f2 e S' = fold_image (op *) f2 e (h ` S)" by simp - also have "\ = fold_image (op *) (f2 o h) e S" - using fold_image_reindex[OF fS hinj, of f2 e] . - also have "\ = fold_image (op *) f1 e S " using th fold_image_cong[OF fS, of "f2 o h" f1 e] - by blast - finally show ?thesis .. -qed - -lemma (in comm_monoid_mult) fold_eq_general_inverses: - assumes fS: "finite S" - and kh: "\y. y \ T \ k y \ S \ h (k y) = y" - and hk: "\x. x \ S \ h x \ T \ k (h x) = x \ g (h x) = f x" - shows "fold_image (op *) f e S = fold_image (op *) g e T" - using fold_eq_general[OF fS, of T h g f e] kh hk by metis - -lemma setsum_eq_general_reverses: - assumes fS: "finite S" and fT: "finite T" - and kh: "\y. y \ T \ k y \ S \ h (k y) = y" - and hk: "\x. x \ S \ h x \ T \ k (h x) = x \ g (h x) = f x" - shows "setsum f S = setsum g T" - apply (simp add: setsum_def fS fT) - apply (rule comm_monoid_add.fold_eq_general_inverses[OF fS]) - apply (erule kh) - apply (erule hk) - done - lemma vsum_norm_allsubsets_bound: fixes f:: "'a \ real ^'n" assumes fP: "finite P" and fPs: "\Q. Q \ P \ norm (setsum f Q) \ e" @@ -1383,7 +1185,7 @@ by (auto simp add: setsum_negf setsum_component vector_component intro: abs_le_D1) have "setsum (\x. \f x $ i\) P = setsum (\x. \f x $ i\) ?Pp + setsum (\x. \f x $ i\) ?Pn" apply (subst thp) - apply (rule setsum_Un_nonzero) + apply (rule setsum_Un_zero) using fP thp0 by auto also have "\ \ 2*e" using Pne Ppe by arith finally show "setsum (\x. \f x $ i\) P \ 2*e" . @@ -1392,7 +1194,7 @@ qed lemma dot_lsum: "finite S \ setsum f S \ (y::'a::{comm_ring}^'n) = setsum (\x. f x \ y) S " - by (induct rule: finite_induct, auto simp add: dot_lzero dot_ladd) + by (induct rule: finite_induct, auto simp add: dot_lzero dot_ladd dot_radd) lemma dot_rsum: "finite S \ (y::'a::{comm_ring}^'n) \ setsum f S = setsum (\x. y \ f x) S " by (induct rule: finite_induct, auto simp add: dot_rzero dot_radd) @@ -4137,7 +3939,8 @@ apply (subst Cy) using C(1) fth apply (simp only: setsum_clauses) - apply (auto simp add: dot_ladd dot_lmult dot_eq_0 dot_sym[of y a] dot_lsum[OF fth]) + thm dot_ladd + apply (auto simp add: dot_ladd dot_radd dot_lmult dot_rmult dot_eq_0 dot_sym[of y a] dot_lsum[OF fth]) apply (rule setsum_0') apply clarsimp apply (rule C(4)[unfolded pairwise_def orthogonal_def, rule_format]) @@ -5294,14 +5097,11 @@ have ?lhs unfolding collinear_def c apply (rule exI[where x=x]) apply auto - apply (rule exI[where x=0], simp) apply (rule exI[where x="- 1"], simp only: vector_smult_lneg vector_smult_lid) apply (rule exI[where x= "-c"], simp only: vector_smult_lneg) apply (rule exI[where x=1], simp) - apply (rule exI[where x=0], simp) apply (rule exI[where x="1 - c"], simp add: vector_smult_lneg vector_sub_rdistrib) apply (rule exI[where x="c - 1"], simp add: vector_smult_lneg vector_sub_rdistrib) - apply (rule exI[where x=0], simp) done} ultimately have ?thesis by blast} ultimately show ?thesis by blast diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Finite_Cartesian_Product.thy --- a/src/HOL/Library/Finite_Cartesian_Product.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Finite_Cartesian_Product.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1,5 +1,4 @@ (* Title: HOL/Library/Finite_Cartesian_Product - ID: $Id: Finite_Cartesian_Product.thy,v 1.5 2009/01/29 22:59:46 chaieb Exp $ Author: Amine Chaieb, University of Cambridge *) diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Float.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Fundamental_Theorem_Algebra.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Glbs.thy --- a/src/HOL/Library/Glbs.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Glbs.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1,5 +1,4 @@ (* Title: Glbs - ID: $Id: Author: Amine Chaieb, University of Cambridge *) diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Library.thy --- a/src/HOL/Library/Library.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Library.thy Thu Mar 05 02:24:36 2009 +0100 @@ -50,6 +50,7 @@ Reflection RBT State_Monad + Topology_Euclidean_Space Univ_Poly While_Combinator Word diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Numeral_Type.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Permutations.thy --- a/src/HOL/Library/Permutations.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Permutations.thy Thu Mar 05 02:24:36 2009 +0100 @@ -1,5 +1,4 @@ (* Title: Library/Permutations - ID: $Id: Author: Amine Chaieb, University of Cambridge *) diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Pocklington.thy --- a/src/HOL/Library/Pocklington.thy Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Library/Pocklington.thy Thu Mar 05 02:24:36 2009 +0100 @@ -554,12 +554,6 @@ (* Fermat's Little theorem / Fermat-Euler theorem. *) -lemma (in comm_monoid_mult) fold_image_related: - assumes Re: "R e e" - and Rop: "\x1 y1 x2 y2. R x1 x2 \ R y1 y2 \ R (x1 * y1) (x2 * y2)" - and fS: "finite S" and Rfg: "\x\S. R (h x) (g x)" - shows "R (fold_image (op *) h e S) (fold_image (op *) g e S)" - using fS by (rule finite_subset_induct) (insert assms, auto) lemma nproduct_mod: assumes fS: "finite S" and n0: "n \ 0" @@ -585,26 +579,6 @@ using fS unfolding setprod_def by (rule finite_subset_induct) (insert Sn, auto simp add: coprime_mul) -lemma (in comm_monoid_mult) - fold_image_eq_general: - assumes fS: "finite S" - and h: "\y\S'. \!x. x\ S \ h(x) = y" - and f12: "\x\S. h x \ S' \ f2(h x) = f1 x" - shows "fold_image (op *) f1 e S = fold_image (op *) f2 e S'" -proof- - from h f12 have hS: "h ` S = S'" by auto - {fix x y assume H: "x \ S" "y \ S" "h x = h y" - from f12 h H have "x = y" by auto } - hence hinj: "inj_on h S" unfolding inj_on_def Ex1_def by blast - from f12 have th: "\x. x \ S \ (f2 \ h) x = f1 x" by auto - from hS have "fold_image (op *) f2 e S' = fold_image (op *) f2 e (h ` S)" by simp - also have "\ = fold_image (op *) (f2 o h) e S" - using fold_image_reindex[OF fS hinj, of f2 e] . - also have "\ = fold_image (op *) f1 e S " using th fold_image_cong[OF fS, of "f2 o h" f1 e] - by blast - finally show ?thesis .. -qed - lemma fermat_little: assumes an: "coprime a n" shows "[a ^ (\ n) = 1] (mod n)" proof- @@ -1287,5 +1261,4 @@ show ?thesis by blast qed - end diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Primes.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Library/Topology_Euclidean_Space.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Library/Topology_Euclidean_Space.thy Thu Mar 05 02:24:36 2009 +0100 @@ -0,0 +1,5691 @@ +(* Title: Topology + Author: Amine Chaieb, University of Cambridge + Author: Robert Himmelmann, TU Muenchen +*) + +header {* Elementary topology in Euclidean space. *} + +theory Topology_Euclidean_Space + imports SEQ Euclidean_Space +begin + + +declare fstcart_pastecart[simp] sndcart_pastecart[simp] + +subsection{* General notion of a topology *} + +definition "istopology L \ {} \ L \ (\S \L. \T \L. S \ T \ L) \ (\K. K \L \ \ K \ L)" +typedef (open) 'a topology = "{L::('a set) set. istopology L}" + morphisms "openin" "topology" + unfolding istopology_def by blast + +lemma istopology_open_in[intro]: "istopology(openin U)" + using openin[of U] by blast + +lemma topology_inverse': "istopology U \ openin (topology U) = U" + using topology_inverse[unfolded mem_def Collect_def] . + +lemma topology_inverse_iff: "istopology U \ openin (topology U) = U" + using topology_inverse[of U] istopology_open_in[of "topology U"] by auto + +lemma topology_eq: "T1 = T2 \ (\S. openin T1 S \ openin T2 S)" +proof- + {assume "T1=T2" hence "\S. openin T1 S \ openin T2 S" by simp} + moreover + {assume H: "\S. openin T1 S \ openin T2 S" + hence "openin T1 = openin T2" by (metis mem_def set_ext) + hence "topology (openin T1) = topology (openin T2)" by simp + hence "T1 = T2" unfolding openin_inverse .} + ultimately show ?thesis by blast +qed + +text{* Infer the "universe" from union of all sets in the topology. *} + +definition "topspace T = \{S. openin T S}" + +subsection{* Main properties of open sets *} + +lemma openin_clauses: + fixes U :: "'a topology" + shows "openin U {}" + "\S T. openin U S \ openin U T \ openin U (S\T)" + "\K. (\S \ K. openin U S) \ openin U (\K)" + using openin[of U] unfolding istopology_def Collect_def mem_def + by (metis mem_def subset_eq)+ + +lemma openin_subset[intro]: "openin U S \ S \ topspace U" + unfolding topspace_def by blast +lemma openin_empty[simp]: "openin U {}" by (simp add: openin_clauses) + +lemma openin_Int[intro]: "openin U S \ openin U T \ openin U (S \ T)" + by (simp add: openin_clauses) + +lemma openin_Union[intro]: "(\S \K. openin U S) \ openin U (\ K)" by (simp add: openin_clauses) + +lemma openin_Un[intro]: "openin U S \ openin U T \ openin U (S \ T)" + using openin_Union[of "{S,T}" U] by auto + +lemma openin_topspace[intro, simp]: "openin U (topspace U)" by (simp add: openin_Union topspace_def) + +lemma openin_subopen: "openin U S \ (\x \ S. \T. openin U T \ x \ T \ T \ S)" (is "?lhs \ ?rhs") +proof- + {assume ?lhs then have ?rhs by auto } + moreover + {assume H: ?rhs + then obtain t where t: "\x\S. openin U (t x) \ x \ t x \ t x \ S" + unfolding Ball_def ex_simps(6)[symmetric] choice_iff by blast + from t have th0: "\x\ t`S. openin U x" by auto + have "\ t`S = S" using t by auto + with openin_Union[OF th0] have "openin U S" by simp } + ultimately show ?thesis by blast +qed + +subsection{* Closed sets *} + +definition "closedin U S \ S \ topspace U \ openin U (topspace U - S)" + +lemma closedin_subset: "closedin U S \ S \ topspace U" by (metis closedin_def) +lemma closedin_empty[simp]: "closedin U {}" by (simp add: closedin_def) +lemma closedin_topspace[intro,simp]: + "closedin U (topspace U)" by (simp add: closedin_def) +lemma closedin_Un[intro]: "closedin U S \ closedin U T \ closedin U (S \ T)" + by (auto simp add: Diff_Un closedin_def) + +lemma Diff_Inter[intro]: "A - \S = \ {A - s|s. s\S}" by auto +lemma closedin_Inter[intro]: assumes Ke: "K \ {}" and Kc: "\S \K. closedin U S" + shows "closedin U (\ K)" using Ke Kc unfolding closedin_def Diff_Inter by auto + +lemma closedin_Int[intro]: "closedin U S \ closedin U T \ closedin U (S \ T)" + using closedin_Inter[of "{S,T}" U] by auto + +lemma Diff_Diff_Int: "A - (A - B) = A \ B" by blast +lemma openin_closedin_eq: "openin U S \ S \ topspace U \ closedin U (topspace U - S)" + apply (auto simp add: closedin_def) + apply (metis openin_subset subset_eq) + apply (auto simp add: Diff_Diff_Int) + apply (subgoal_tac "topspace U \ S = S") + by auto + +lemma openin_closedin: "S \ topspace U \ (openin U S \ closedin U (topspace U - S))" + by (simp add: openin_closedin_eq) + +lemma openin_diff[intro]: assumes oS: "openin U S" and cT: "closedin U T" shows "openin U (S - T)" +proof- + have "S - T = S \ (topspace U - T)" using openin_subset[of U S] oS cT + by (auto simp add: topspace_def openin_subset) + then show ?thesis using oS cT by (auto simp add: closedin_def) +qed + +lemma closedin_diff[intro]: assumes oS: "closedin U S" and cT: "openin U T" shows "closedin U (S - T)" +proof- + have "S - T = S \ (topspace U - T)" using closedin_subset[of U S] oS cT + by (auto simp add: topspace_def ) + then show ?thesis using oS cT by (auto simp add: openin_closedin_eq) +qed + +subsection{* Subspace topology. *} + +definition "subtopology U V = topology {S \ V |S. openin U S}" + +lemma istopology_subtopology: "istopology {S \ V |S. openin U S}" (is "istopology ?L") +proof- + have "{} \ ?L" by blast + {fix A B assume A: "A \ ?L" and B: "B \ ?L" + from A B obtain Sa and Sb where Sa: "openin U Sa" "A = Sa \ V" and Sb: "openin U Sb" "B = Sb \ V" by blast + have "A\B = (Sa \ Sb) \ V" "openin U (Sa \ Sb)" using Sa Sb by blast+ + then have "A \ B \ ?L" by blast} + moreover + {fix K assume K: "K \ ?L" + have th0: "?L = (\S. S \ V) ` openin U " + apply (rule set_ext) + apply (simp add: Ball_def image_iff) + by (metis mem_def) + from K[unfolded th0 subset_image_iff] + obtain Sk where Sk: "Sk \ openin U" "K = (\S. S \ V) ` Sk" by blast + have "\K = (\Sk) \ V" using Sk by auto + moreover have "openin U (\ Sk)" using Sk by (auto simp add: subset_eq mem_def) + ultimately have "\K \ ?L" by blast} + ultimately show ?thesis unfolding istopology_def by blast +qed + +lemma openin_subtopology: + "openin (subtopology U V) S \ (\ T. (openin U T) \ (S = T \ V))" + unfolding subtopology_def topology_inverse'[OF istopology_subtopology] + by (auto simp add: Collect_def) + +lemma topspace_subtopology: "topspace(subtopology U V) = topspace U \ V" + by (auto simp add: topspace_def openin_subtopology) + +lemma closedin_subtopology: + "closedin (subtopology U V) S \ (\T. closedin U T \ S = T \ V)" + unfolding closedin_def topspace_subtopology + apply (simp add: openin_subtopology) + apply (rule iffI) + apply clarify + apply (rule_tac x="topspace U - T" in exI) + by auto + +lemma openin_subtopology_refl: "openin (subtopology U V) V \ V \ topspace U" + unfolding openin_subtopology + apply (rule iffI, clarify) + apply (frule openin_subset[of U]) apply blast + apply (rule exI[where x="topspace U"]) + by auto + +lemma subtopology_superset: assumes UV: "topspace U \ V" + shows "subtopology U V = U" +proof- + {fix S + {fix T assume T: "openin U T" "S = T \ V" + from T openin_subset[OF T(1)] UV have eq: "S = T" by blast + have "openin U S" unfolding eq using T by blast} + moreover + {assume S: "openin U S" + hence "\T. openin U T \ S = T \ V" + using openin_subset[OF S] UV by auto} + ultimately have "(\T. openin U T \ S = T \ V) \ openin U S" by blast} + then show ?thesis unfolding topology_eq openin_subtopology by blast +qed + + +lemma subtopology_topspace[simp]: "subtopology U (topspace U) = U" + by (simp add: subtopology_superset) + +lemma subtopology_UNIV[simp]: "subtopology U UNIV = U" + by (simp add: subtopology_superset) + +subsection{* The universal Euclidean versions are what we use most of the time *} +definition "open S \ (\x \ S. \e >0. \x'. dist x' x < e \ x' \ S)" +definition "closed S \ open(UNIV - S)" +definition "euclidean = topology open" + +lemma open_empty[intro,simp]: "open {}" by (simp add: open_def) +lemma open_UNIV[intro,simp]: "open UNIV" + by (simp add: open_def, rule exI[where x="1"], auto) + +lemma open_inter[intro]: assumes S: "open S" and T: "open T" + shows "open (S \ T)" +proof- + note thS = S[unfolded open_def, rule_format] + note thT = T[unfolded open_def, rule_format] + {fix x assume x: "x \ S\T" + hence xS: "x \ S" and xT: "x \ T" by simp_all + from thS[OF xS] obtain eS where eS: "eS > 0" "\x'. dist x' x < eS \ x' \ S" by blast + from thT[OF xT] obtain eT where eT: "eT > 0" "\x'. dist x' x < eT \ x' \ T" by blast + from real_lbound_gt_zero[OF eS(1) eT(1)] obtain e where e: "e > 0" "e < eS" "e < eT" by blast + { fix x' assume d: "dist x' x < e" + hence dS: "dist x' x < eS" and dT: "dist x' x < eT" using e by arith+ + from eS(2)[rule_format, OF dS] eT(2)[rule_format, OF dT] have "x' \ S\T" by blast} + hence "\e >0. \x'. dist x' x < e \ x' \ (S\T)" using e by blast} + then show ?thesis unfolding open_def by blast +qed + +lemma open_Union[intro]: "(\S\K. open S) \ open (\ K)" + by (simp add: open_def) metis + +lemma open_openin: "open S \ openin euclidean S" + unfolding euclidean_def + apply (rule cong[where x=S and y=S]) + apply (rule topology_inverse[symmetric]) + apply (auto simp add: istopology_def) + by (auto simp add: mem_def subset_eq) + +lemma topspace_euclidean: "topspace euclidean = UNIV" + apply (simp add: topspace_def) + apply (rule set_ext) + by (auto simp add: open_openin[symmetric]) + +lemma topspace_euclidean_subtopology[simp]: "topspace (subtopology euclidean S) = S" + by (simp add: topspace_euclidean topspace_subtopology) + +lemma closed_closedin: "closed S \ closedin euclidean S" + by (simp add: closed_def closedin_def topspace_euclidean open_openin) + +lemma open_Un[intro]: "open S \ open T \ open (S\T)" + by (auto simp add: open_openin) + +lemma open_subopen: "open S \ (\x\S. \T. open T \ x \ T \ T \ S)" + by (simp add: open_openin openin_subopen[symmetric]) + +lemma closed_empty[intro, simp]: "closed {}" by (simp add: closed_closedin) + +lemma closed_UNIV[simp,intro]: "closed UNIV" + by (simp add: closed_closedin topspace_euclidean[symmetric]) + +lemma closed_Un[intro]: "closed S \ closed T \ closed (S\T)" + by (auto simp add: closed_closedin) + +lemma closed_Int[intro]: "closed S \ closed T \ closed (S\T)" + by (auto simp add: closed_closedin) + +lemma closed_Inter[intro]: assumes H: "\S \K. closed S" shows "closed (\K)" + using H + unfolding closed_closedin + apply (cases "K = {}") + apply (simp add: closed_closedin[symmetric]) + apply (rule closedin_Inter, auto) + done + +lemma open_closed: "open S \ closed (UNIV - S)" + by (simp add: open_openin closed_closedin topspace_euclidean openin_closedin_eq) + +lemma closed_open: "closed S \ open(UNIV - S)" + by (simp add: open_openin closed_closedin topspace_euclidean closedin_def) + +lemma open_diff[intro]: "open S \ closed T \ open (S - T)" + by (auto simp add: open_openin closed_closedin) + +lemma closed_diff[intro]: "closed S \ open T \ closed(S-T)" + by (auto simp add: open_openin closed_closedin) + +lemma open_Inter[intro]: assumes fS: "finite S" and h: "\T\S. open T" shows "open (\S)" + using h by (induct rule: finite_induct[OF fS], auto) + +lemma closed_Union[intro]: assumes fS: "finite S" and h: "\T\S. closed T" shows "closed (\S)" + using h by (induct rule: finite_induct[OF fS], auto) + +subsection{* Open and closed balls. *} + +definition "ball x e = {y. dist x y < e}" +definition "cball x e = {y. dist x y \ e}" + +lemma mem_ball[simp]: "y \ ball x e \ dist x y < e" by (simp add: ball_def) +lemma mem_cball[simp]: "y \ cball x e \ dist x y \ e" by (simp add: cball_def) +lemma mem_ball_0[simp]: "x \ ball 0 e \ norm x < e" by (simp add: dist_def) +lemma mem_cball_0[simp]: "x \ cball 0 e \ norm x \ e" by (simp add: dist_def) +lemma centre_in_cball[simp]: "x \ cball x e \ 0\ e" by simp +lemma ball_subset_cball[simp,intro]: "ball x e \ cball x e" by (simp add: subset_eq) +lemma subset_ball[intro]: "d <= e ==> ball x d \ ball x e" by (simp add: subset_eq) +lemma subset_cball[intro]: "d <= e ==> cball x d \ cball x e" by (simp add: subset_eq) +lemma ball_max_Un: "ball a (max r s) = ball a r \ ball a s" + by (simp add: expand_set_eq) arith + +lemma ball_min_Int: "ball a (min r s) = ball a r \ ball a s" + by (simp add: expand_set_eq) + +subsection{* Topological properties of open balls *} + +lemma diff_less_iff: "(a::real) - b > 0 \ a > b" + "(a::real) - b < 0 \ a < b" + "a - b < c \ a < c +b" "a - b > c \ a > c +b" by arith+ +lemma diff_le_iff: "(a::real) - b \ 0 \ a \ b" "(a::real) - b \ 0 \ a \ b" + "a - b \ c \ a \ c +b" "a - b \ c \ a \ c +b" by arith+ + +lemma open_ball[intro, simp]: "open (ball x e)" + unfolding open_def ball_def Collect_def Ball_def mem_def + unfolding dist_sym + apply clarify + apply (rule_tac x="e - dist xa x" in exI) + using dist_triangle_alt[where z=x] + apply (clarsimp simp add: diff_less_iff) + apply atomize + apply (erule_tac x="x'" in allE) + apply (erule_tac x="xa" in allE) + by arith + +lemma centre_in_ball[simp]: "x \ ball x e \ e > 0" by (metis mem_ball dist_refl) +lemma open_contains_ball: "open S \ (\x\S. \e>0. ball x e \ S)" + unfolding open_def subset_eq mem_ball Ball_def dist_sym .. + +lemma open_contains_ball_eq: "open S \ \x. x\S \ (\e>0. ball x e \ S)" + by (metis open_contains_ball subset_eq centre_in_ball) + +lemma ball_eq_empty[simp]: "ball x e = {} \ e \ 0" + unfolding mem_ball expand_set_eq + apply (simp add: not_less) + by (metis dist_pos_le order_trans dist_refl) + +lemma ball_empty[intro]: "e \ 0 ==> ball x e = {}" by simp + +subsection{* Basic "localization" results are handy for connectedness. *} + +lemma openin_open: "openin (subtopology euclidean U) S \ (\T. open T \ (S = U \ T))" + by (auto simp add: openin_subtopology open_openin[symmetric]) + +lemma openin_open_Int[intro]: "open S \ openin (subtopology euclidean U) (U \ S)" + by (auto simp add: openin_open) + +lemma open_openin_trans[trans]: + "open S \ open T \ T \ S \ openin (subtopology euclidean S) T" + by (metis Int_absorb1 openin_open_Int) + +lemma open_subset: "S \ T \ open S \ openin (subtopology euclidean T) S" + by (auto simp add: openin_open) + +lemma closedin_closed: "closedin (subtopology euclidean U) S \ (\T. closed T \ S = U \ T)" + by (simp add: closedin_subtopology closed_closedin Int_ac) + +lemma closedin_closed_Int: "closed S ==> closedin (subtopology euclidean U) (U \ S)" + by (metis closedin_closed) + +lemma closed_closedin_trans: "closed S \ closed T \ T \ S \ closedin (subtopology euclidean S) T" + apply (subgoal_tac "S \ T = T" ) + apply auto + apply (frule closedin_closed_Int[of T S]) + by simp + +lemma closed_subset: "S \ T \ closed S \ closedin (subtopology euclidean T) S" + by (auto simp add: closedin_closed) + +lemma openin_euclidean_subtopology_iff: "openin (subtopology euclidean U) S + \ S \ U \ (\x\S. \e>0. \x'\U. dist x' x < e \ x'\ S)" (is "?lhs \ ?rhs") +proof- + {assume ?lhs hence ?rhs unfolding openin_subtopology open_openin[symmetric] + by (simp add: open_def) blast} + moreover + {assume SU: "S \ U" and H: "\x. x \ S \ \e>0. \x'\U. dist x' x < e \ x' \ S" + from H obtain d where d: "\x . x\ S \ d x > 0 \ (\x' \ U. dist x' x < d x \ x' \ S)" + by metis + let ?T = "\{B. \x\S. B = ball x (d x)}" + have oT: "open ?T" by auto + { fix x assume "x\S" + hence "x \ \{B. \x\S. B = ball x (d x)}" + apply simp apply(rule_tac x="ball x(d x)" in exI) apply auto + unfolding dist_refl using d[of x] by auto + hence "x\ ?T \ U" using SU and `x\S` by auto } + moreover + { fix y assume "y\?T" + then obtain B where "y\B" "B\{B. \x\S. B = ball x (d x)}" by auto + then obtain x where "x\S" and x:"y \ ball x (d x)" by auto + assume "y\U" + hence "y\S" using d[OF `x\S`] and x by(auto simp add: dist_sym) } + ultimately have "S = ?T \ U" by blast + with oT have ?lhs unfolding openin_subtopology open_openin[symmetric] by blast} + ultimately show ?thesis by blast +qed + +text{* These "transitivity" results are handy too. *} + +lemma openin_trans[trans]: "openin (subtopology euclidean T) S \ openin (subtopology euclidean U) T + \ openin (subtopology euclidean U) S" + unfolding open_openin openin_open by blast + +lemma openin_open_trans: "openin (subtopology euclidean T) S \ open T \ open S" + by (auto simp add: openin_open intro: openin_trans) + +lemma closedin_trans[trans]: + "closedin (subtopology euclidean T) S \ + closedin (subtopology euclidean U) T + ==> closedin (subtopology euclidean U) S" + by (auto simp add: closedin_closed closed_closedin closed_Inter Int_assoc) + +lemma closedin_closed_trans: "closedin (subtopology euclidean T) S \ closed T \ closed S" + by (auto simp add: closedin_closed intro: closedin_trans) + +subsection{* Connectedness *} + +definition "connected S \ + ~(\e1 e2. open e1 \ open e2 \ S \ (e1 \ e2) \ (e1 \ e2 \ S = {}) + \ ~(e1 \ S = {}) \ ~(e2 \ S = {}))" + +lemma connected_local: + "connected S \ ~(\e1 e2. + openin (subtopology euclidean S) e1 \ + openin (subtopology euclidean S) e2 \ + S \ e1 \ e2 \ + e1 \ e2 = {} \ + ~(e1 = {}) \ + ~(e2 = {}))" +unfolding connected_def openin_open by blast + +lemma exists_diff: "(\S. P(UNIV - S)) \ (\S. P S)" (is "?lhs \ ?rhs") +proof- + + {assume "?lhs" hence ?rhs by blast } + moreover + {fix S assume H: "P S" + have "S = UNIV - (UNIV - S)" by auto + with H have "P (UNIV - (UNIV - S))" by metis } + ultimately show ?thesis by metis +qed + +lemma connected_clopen: "connected S \ + (\T. openin (subtopology euclidean S) T \ + closedin (subtopology euclidean S) T \ T = {} \ T = S)" (is "?lhs \ ?rhs") +proof- + have " \ connected S \ (\e1 e2. open e1 \ open (UNIV - e2) \ S \ e1 \ (UNIV - e2) \ e1 \ (UNIV - e2) \ S = {} \ e1 \ S \ {} \ (UNIV - e2) \ S \ {})" + unfolding connected_def openin_open closedin_closed + apply (subst exists_diff) by blast + hence th0: "connected S \ \ (\e2 e1. closed e2 \ open e1 \ S \ e1 \ (UNIV - e2) \ e1 \ (UNIV - e2) \ S = {} \ e1 \ S \ {} \ (UNIV - e2) \ S \ {})" + (is " _ \ \ (\e2 e1. ?P e2 e1)") apply (simp add: closed_def) by metis + + have th1: "?rhs \ \ (\t' t. closed t'\t = S\t' \ t\{} \ t\S \ (\t'. open t' \ t = S \ t'))" + (is "_ \ \ (\t' t. ?Q t' t)") + unfolding connected_def openin_open closedin_closed by auto + {fix e2 + {fix e1 have "?P e2 e1 \ (\t. closed e2 \ t = S\e2 \ open e1 \ t = S\e1 \ t\{} \ t\S)" + by auto} + then have "(\e1. ?P e2 e1) \ (\t. ?Q e2 t)" by metis} + then have "\e2. (\e1. ?P e2 e1) \ (\t. ?Q e2 t)" by blast + then show ?thesis unfolding th0 th1 by simp +qed + +lemma connected_empty[simp, intro]: "connected {}" + by (simp add: connected_def) + +subsection{* Hausdorff and other separation properties *} + +lemma hausdorff: + assumes xy: "x \ y" + shows "\U V. open U \ open V \ x\ U \ y \ V \ (U \ V = {})" (is "\U V. ?P U V") +proof- + let ?U = "ball x (dist x y / 2)" + let ?V = "ball y (dist x y / 2)" + have th0: "\d x y z. (d x z :: real) <= d x y + d y z \ d y z = d z y + ==> ~(d x y * 2 < d x z \ d z y * 2 < d x z)" by arith + have "?P ?U ?V" using dist_pos_lt[OF xy] th0[of dist,OF dist_triangle dist_sym] + by (auto simp add: dist_refl expand_set_eq Arith_Tools.less_divide_eq_number_of1) + then show ?thesis by blast +qed + +lemma separation_t2: "x \ y \ (\U V. open U \ open V \ x \ U \ y \ V \ U \ V = {})" + using hausdorff[of x y] by blast + +lemma separation_t1: "x \ y \ (\U V. open U \ open V \ x \U \ y\ U \ x\V \ y\V)" + using separation_t2[of x y] by blast + +lemma separation_t0: "x \ y \ (\U. open U \ ~(x\U \ y\U))" by(metis separation_t1) + +subsection{* Limit points *} + +definition islimpt:: "real ^'n \ (real^'n) set \ bool" (infixr "islimpt" 60) where + islimpt_def: "x islimpt S \ (\T. x\T \ open T \ (\y\S. y\T \ y\x))" + + (* FIXME: Sure this form is OK????*) +lemma islimptE: assumes "x islimpt S" and "x \ T" and "open T" + obtains "(\y\S. y\T \ y\x)" + using assms unfolding islimpt_def by auto + +lemma islimpt_subset: "x islimpt S \ S \ T ==> x islimpt T" by (auto simp add: islimpt_def) +lemma islimpt_approachable: "x islimpt S \ (\e>0. \x'\S. x' \ x \ dist x' x < e)" + unfolding islimpt_def + apply auto + apply(erule_tac x="ball x e" in allE) + apply (auto simp add: dist_refl) + apply(rule_tac x=y in bexI) apply (auto simp add: dist_sym) + by (metis open_def dist_sym open_ball centre_in_ball mem_ball) + +lemma islimpt_approachable_le: "x islimpt S \ (\e>0. \x'\ S. x' \ x \ dist x' x <= e)" + unfolding islimpt_approachable + using approachable_lt_le[where f="\x'. dist x' x" and P="\x'. \ (x'\S \ x'\x)"] + by metis + +lemma islimpt_UNIV[simp, intro]: "(x:: real ^'n) islimpt UNIV" +proof- + { + fix e::real assume ep: "e>0" + from vector_choose_size[of "e/2"] ep have "\(c:: real ^'n). norm c = e/2" by auto + then obtain c ::"real^'n" where c: "norm c = e/2" by blast + let ?x = "x + c" + have "?x \ x" using c ep by (auto simp add: norm_eq_0_imp) + moreover have "dist ?x x < e" using c ep apply simp by norm + ultimately have "\x'. x' \ x\ dist x' x < e" by blast} + then show ?thesis unfolding islimpt_approachable by blast +qed + +lemma closed_limpt: "closed S \ (\x. x islimpt S \ x \ S)" + unfolding closed_def + apply (subst open_subopen) + apply (simp add: islimpt_def subset_eq) + by (metis DiffE DiffI UNIV_I insertCI insert_absorb mem_def) + +lemma islimpt_EMPTY[simp]: "\ x islimpt {}" + unfolding islimpt_approachable apply auto by ferrack + +lemma closed_positive_orthant: "closed {x::real^'n. \i\{1.. dimindex(UNIV:: 'n set)}. 0 \x$i}" +proof- + let ?U = "{1 .. dimindex(UNIV :: 'n set)}" + let ?O = "{x::real^'n. \i\?U. x$i\0}" + {fix x:: "real^'n" and i::nat assume H: "\e>0. \x'\?O. x' \ x \ dist x' x < e" and i: "i \ ?U" + and xi: "x$i < 0" + from xi have th0: "-x$i > 0" by arith + from H[rule_format, OF th0] obtain x' where x': "x' \?O" "x' \ x" "dist x' x < -x $ i" by blast + have th:" \b a (x::real). abs x <= b \ b <= a ==> ~(a + x < 0)" by arith + have th': "\x (y::real). x < 0 \ 0 <= y ==> abs x <= abs (y - x)" by arith + have th1: "\x$i\ \ \(x' - x)$i\" using i x'(1) xi + apply (simp only: vector_component) + by (rule th') auto + have th2: "\dist x x'\ \ \(x' - x)$i\" using component_le_norm[OF i, of "x'-x"] + apply (simp add: dist_def) by norm + from th[OF th1 th2] x'(3) have False by (simp add: dist_sym dist_pos_le) } + then show ?thesis unfolding closed_limpt islimpt_approachable + unfolding not_le[symmetric] by blast +qed + +lemma finite_set_avoid: assumes fS: "finite S" shows "\d>0. \x\S. x \ a \ d <= dist a x" +proof(induct rule: finite_induct[OF fS]) + case 1 thus ?case apply auto by ferrack +next + case (2 x F) + from 2 obtain d where d: "d >0" "\x\F. x\a \ d \ dist a x" by blast + {assume "x = a" hence ?case using d by auto } + moreover + {assume xa: "x\a" + let ?d = "min d (dist a x)" + have dp: "?d > 0" using xa d(1) using dist_nz by auto + from d have d': "\x\F. x\a \ ?d \ dist a x" by auto + with dp xa have ?case by(auto intro!: exI[where x="?d"]) } + ultimately show ?case by blast +qed + +lemma islimpt_finite: assumes fS: "finite S" shows "\ a islimpt S" + unfolding islimpt_approachable + using finite_set_avoid[OF fS, of a] by (metis dist_sym not_le) + +lemma islimpt_Un: "x islimpt (S \ T) \ x islimpt S \ x islimpt T" + apply (rule iffI) + defer + apply (metis Un_upper1 Un_upper2 islimpt_subset) + unfolding islimpt_approachable + apply auto + apply (erule_tac x="min e ea" in allE) + apply auto + done + +lemma discrete_imp_closed: + assumes e: "0 < e" and d: "\x \ S. \y \ S. norm(y - x) < e \ y = x" + shows "closed S" +proof- + {fix x assume C: "\e>0. \x'\S. x' \ x \ dist x' x < e" + from e have e2: "e/2 > 0" by arith + from C[rule_format, OF e2] obtain y where y: "y \ S" "y\x" "dist y x < e/2" by blast + let ?m = "min (e/2) (dist x y) " + from e2 y(2) have mp: "?m > 0" by (simp add: dist_nz[THEN sym]) + from C[rule_format, OF mp] obtain z where z: "z \ S" "z\x" "dist z x < ?m" by blast + have th: "norm (z - y) < e" using z y by norm + from d[rule_format, OF y(1) z(1) th] y z + have False by (auto simp add: dist_sym)} + then show ?thesis by (metis islimpt_approachable closed_limpt) +qed + +subsection{* Interior of a Set *} +definition "interior S = {x. \T. open T \ x \ T \ T \ S}" + +lemma interior_eq: "interior S = S \ open S" + apply (simp add: expand_set_eq interior_def) + apply (subst (2) open_subopen) by blast + +lemma interior_open: "open S ==> (interior S = S)" by (metis interior_eq) + +lemma interior_empty[simp]: "interior {} = {}" by (simp add: interior_def) + +lemma open_interior[simp, intro]: "open(interior S)" + apply (simp add: interior_def) + apply (subst open_subopen) by blast + +lemma interior_interior[simp]: "interior(interior S) = interior S" by (metis interior_eq open_interior) +lemma interior_subset: "interior S \ S" by (auto simp add: interior_def) +lemma subset_interior: "S \ T ==> (interior S) \ (interior T)" by (auto simp add: interior_def) +lemma interior_maximal: "T \ S \ open T ==> T \ (interior S)" by (auto simp add: interior_def) +lemma interior_unique: "T \ S \ open T \ (\T'. T' \ S \ open T' \ T' \ T) \ interior S = T" + by (metis equalityI interior_maximal interior_subset open_interior) +lemma mem_interior: "x \ interior S \ (\e. 0 < e \ ball x e \ S)" + apply (simp add: interior_def) + by (metis open_contains_ball centre_in_ball open_ball subset_trans) + +lemma open_subset_interior: "open S ==> S \ interior T \ S \ T" + by (metis interior_maximal interior_subset subset_trans) + +lemma interior_inter[simp]: "interior(S \ T) = interior S \ interior T" + apply (rule equalityI, simp) + apply (metis Int_lower1 Int_lower2 subset_interior) + by (metis Int_mono interior_subset open_inter open_interior open_subset_interior) + +lemma interior_limit_point[intro]: assumes x: "x \ interior S" shows "x islimpt S" +proof- + from x obtain e where e: "e>0" "\x'. dist x x' < e \ x' \ S" + unfolding mem_interior subset_eq Ball_def mem_ball by blast + {fix d::real assume d: "d>0" + let ?m = "min d e / 2" + have mde2: "?m \ 0" using e(1) d(1) by arith + from vector_choose_dist[OF mde2, of x] + obtain y where y: "dist x y = ?m" by blast + have th: "dist x y < e" "dist x y < d" unfolding y using e(1) d(1) by arith+ + have "\x'\S. x'\ x \ dist x' x < d" + apply (rule bexI[where x=y]) + using e th y by (auto simp add: dist_sym)} + then show ?thesis unfolding islimpt_approachable by blast +qed + +lemma interior_closed_Un_empty_interior: + assumes cS: "closed S" and iT: "interior T = {}" + shows "interior(S \ T) = interior S" +proof- + have "interior S \ interior (S\T)" + by (rule subset_interior, blast) + moreover + {fix x e assume e: "e > 0" "\x' \ ball x e. x'\(S\T)" + {fix y assume y: "y \ ball x e" + {fix d::real assume d: "d > 0" + let ?k = "min d (e - dist x y)" + have kp: "?k > 0" using d e(1) y[unfolded mem_ball] by norm + have "?k/2 \ 0" using kp by simp + then obtain w where w: "dist y w = ?k/ 2" by (metis vector_choose_dist) + from iT[unfolded expand_set_eq mem_interior] + have "\ ball w (?k/4) \ T" using kp by (auto simp add: Arith_Tools.less_divide_eq_number_of1) + then obtain z where z: "dist w z < ?k/4" "z \ T" by (auto simp add: subset_eq) + have "z \ T \ z\ y \ dist z y < d \ dist x z < e" using z apply simp + using w e(1) d apply (auto simp only: dist_sym) + apply (auto simp add: min_def cong del: if_weak_cong) + apply (cases "d \ e - dist x y", auto simp add: ring_simps cong del: if_weak_cong) + apply norm + apply (cases "d \ e - dist x y", auto simp add: ring_simps not_le not_less cong del: if_weak_cong) + apply norm + apply norm + apply (cases "d \ e - dist x y", auto simp add: ring_simps not_le not_less cong del: if_weak_cong) + apply norm + apply norm + done + then have "\z. z \ T \ z\ y \ dist z y < d \ dist x z < e" by blast + then have "\x' \S. x'\y \ dist x' y < d" using e by auto} + then have "y\S" by (metis islimpt_approachable cS closed_limpt) } + then have "x \ interior S" unfolding mem_interior using e(1) by blast} + hence "interior (S\T) \ interior S" unfolding mem_interior Ball_def subset_eq by blast + ultimately show ?thesis by blast +qed + + +subsection{* Closure of a Set *} + +definition "closure S = S \ {x | x. x islimpt S}" + +lemma closure_interior: "closure S = UNIV - interior (UNIV - S)" +proof- + { fix x + have "x\UNIV - interior (UNIV - S) \ x \ closure S" (is "?lhs = ?rhs") + proof + let ?exT = "\ y. (\T. open T \ y \ T \ T \ UNIV - S)" + assume "?lhs" + hence *:"\ ?exT x" + unfolding interior_def + by simp + { assume "\ ?rhs" + hence False using * + unfolding closure_def islimpt_def + by blast + } + thus "?rhs" + by blast + next + assume "?rhs" thus "?lhs" + unfolding closure_def interior_def islimpt_def + by blast + qed + } + thus ?thesis + by blast +qed + +lemma interior_closure: "interior S = UNIV - (closure (UNIV - S))" +proof- + { fix x + have "x \ interior S \ x \ UNIV - (closure (UNIV - S))" + unfolding interior_def closure_def islimpt_def + by blast + } + thus ?thesis + by blast +qed + +lemma closed_closure[simp, intro]: "closed (closure S)" +proof- + have "closed (UNIV - interior (UNIV -S))" by blast + thus ?thesis using closure_interior[of S] by simp +qed + +lemma closure_hull: "closure S = closed hull S" +proof- + have "S \ closure S" + unfolding closure_def + by blast + moreover + have "closed (closure S)" + using closed_closure[of S] + by assumption + moreover + { fix t + assume *:"S \ t" "closed t" + { fix x + assume "x islimpt S" + hence "x islimpt t" using *(1) + using islimpt_subset[of x, of S, of t] + by blast + } + with * have "closure S \ t" + unfolding closure_def + using closed_limpt[of t] + by blast + } + ultimately show ?thesis + using hull_unique[of S, of "closure S", of closed] + unfolding mem_def + by simp +qed + +lemma closure_eq: "closure S = S \ closed S" + unfolding closure_hull + using hull_eq[of closed, unfolded mem_def, OF closed_Inter, of S] + by (metis mem_def subset_eq) + +lemma closure_closed[simp]: "closed S \ closure S = S" + using closure_eq[of S] + by simp + +lemma closure_closure[simp]: "closure (closure S) = closure S" + unfolding closure_hull + using hull_hull[of closed S] + by assumption + +lemma closure_subset: "S \ closure S" + unfolding closure_hull + using hull_subset[of S closed] + by assumption + +lemma subset_closure: "S \ T \ closure S \ closure T" + unfolding closure_hull + using hull_mono[of S T closed] + by assumption + +lemma closure_minimal: "S \ T \ closed T \ closure S \ T" + using hull_minimal[of S T closed] + unfolding closure_hull mem_def + by simp + +lemma closure_unique: "S \ T \ closed T \ (\ T'. S \ T' \ closed T' \ T \ T') \ closure S = T" + using hull_unique[of S T closed] + unfolding closure_hull mem_def + by simp + +lemma closure_empty[simp]: "closure {} = {}" + using closed_empty closure_closed[of "{}"] + by simp + +lemma closure_univ[simp]: "closure UNIV = UNIV" + using closure_closed[of UNIV] + by simp + +lemma closure_eq_empty: "closure S = {} \ S = {}" + using closure_empty closure_subset[of S] + by blast + +lemma closure_subset_eq: "closure S \ S \ closed S" + using closure_eq[of S] closure_subset[of S] + by simp + +lemma open_inter_closure_eq_empty: + "open S \ (S \ closure T) = {} \ S \ T = {}" + using open_subset_interior[of S "UNIV - T"] + using interior_subset[of "UNIV - T"] + unfolding closure_interior + by auto + +lemma open_inter_closure_subset: "open S \ (S \ (closure T)) \ closure(S \ T)" +proof + fix x + assume as: "open S" "x \ S \ closure T" + { assume *:"x islimpt T" + { fix e::real + assume "e > 0" + from as `open S` obtain e' where "e' > 0" and e':"\x'. dist x' x < e' \ x' \ S" + unfolding open_def + by auto + let ?e = "min e e'" + from `e>0` `e'>0` have "?e > 0" + by simp + then obtain y where y:"y\T" "y \ x \ dist y x < ?e" + using islimpt_approachable[of x T] using * + by blast + hence "\x'\S \ T. x' \ x \ dist x' x < e" using e' + using y + by(rule_tac x=y in bexI, simp+) + } + hence "x islimpt S \ T" + using islimpt_approachable[of x "S \ T"] + by blast + } + then show "x \ closure (S \ T)" using as + unfolding closure_def + by blast +qed + +lemma closure_complement: "closure(UNIV - S) = UNIV - interior(S)" +proof- + have "S = UNIV - (UNIV - S)" + by auto + thus ?thesis + unfolding closure_interior + by auto +qed + +lemma interior_complement: "interior(UNIV - S) = UNIV - closure(S)" + unfolding closure_interior + by blast + +subsection{* Frontier (aka boundary) *} + +definition "frontier S = closure S - interior S" + +lemma frontier_closed: "closed(frontier S)" + by (simp add: frontier_def closed_diff closed_closure) + +lemma frontier_closures: "frontier S = (closure S) \ (closure(UNIV - S))" + by (auto simp add: frontier_def interior_closure) + +lemma frontier_straddle: "a \ frontier S \ (\e>0. (\x\S. dist a x < e) \ (\x. x \ S \ dist a x < e))" (is "?lhs \ ?rhs") +proof + assume "?lhs" + { fix e::real + assume "e > 0" + let ?rhse = "(\x\S. dist a x < e) \ (\x. x \ S \ dist a x < e)" + { assume "a\S" + have "\x\S. dist a x < e" using dist_refl[of a] `e>0` `a\S` by(rule_tac x=a in bexI) auto + moreover have "\x. x \ S \ dist a x < e" using `?lhs` `a\S` + unfolding frontier_closures closure_def islimpt_def using dist_refl[of a] `e>0` + by (auto, erule_tac x="ball a e" in allE, auto) + ultimately have ?rhse by auto + } + moreover + { assume "a\S" + hence ?rhse using `?lhs` + unfolding frontier_closures closure_def islimpt_def + using open_ball[of a e] dist_refl[of a] `e > 0` + by (auto, erule_tac x = "ball a e" in allE, auto) + } + ultimately have ?rhse by auto + } + thus ?rhs by auto +next + assume ?rhs + moreover + { fix T assume "a\S" and + as:"\e>0. (\x\S. dist a x < e) \ (\x. x \ S \ dist a x < e)" "a \ S" "a \ T" "open T" + from `open T` `a \ T` have "\e>0. ball a e \ T" unfolding open_contains_ball[of T] by auto + then obtain e where "e>0" "ball a e \ T" by auto + then obtain y where y:"y\S" "dist a y < e" using as(1) by auto + have "\y\S. y \ T \ y \ a" + using `dist a y < e` `ball a e \ T` unfolding ball_def using `y\S` `a\S` by auto + } + hence "a \ closure S" unfolding closure_def islimpt_def using `?rhs` by auto + moreover + { fix T assume "a \ T" "open T" "a\S" + then obtain e where "e>0" and balle: "ball a e \ T" unfolding open_contains_ball using `?rhs` by auto + obtain x where "x \ S" "dist a x < e" using `?rhs` using `e>0` by auto + hence "\y\UNIV - S. y \ T \ y \ a" using balle `a\S` unfolding ball_def by (rule_tac x=x in bexI)auto + } + hence "a islimpt (UNIV - S) \ a\S" unfolding islimpt_def by auto + ultimately show ?lhs unfolding frontier_closures using closure_def[of "UNIV - S"] by auto +qed + +lemma frontier_subset_closed: "closed S \ frontier S \ S" + by (metis frontier_def closure_closed Diff_subset) + +lemma frontier_empty: "frontier {} = {}" + by (simp add: frontier_def closure_empty) + +lemma frontier_subset_eq: "frontier S \ S \ closed S" +proof- + { assume "frontier S \ S" + hence "closure S \ S" using interior_subset unfolding frontier_def by auto + hence "closed S" using closure_subset_eq by auto + } + thus ?thesis using frontier_subset_closed[of S] by auto +qed + +lemma frontier_complement: "frontier(UNIV - S) = frontier S" + by (auto simp add: frontier_def closure_complement interior_complement) + +lemma frontier_disjoint_eq: "frontier S \ S = {} \ open S" + using frontier_complement frontier_subset_eq[of "UNIV - S"] + unfolding open_closed by auto + +subsection{* A variant of nets (Slightly non-standard but good for our purposes). *} + +typedef (open) 'a net = + "{g :: 'a \ 'a \ bool. \x y. (\z. g z x \ g z y) \ (\z. g z y \ g z x)}" + morphisms "netord" "mknet" by blast +lemma net: "(\z. netord n z x \ netord n z y) \ (\z. netord n z y \ netord n z x)" + using netord[of n] by auto + +lemma oldnet: "netord n x x \ netord n y y \ + \z. netord n z z \ (\w. netord n w z \ netord n w x \ netord n w y)" + by (metis net) + +lemma net_dilemma: + "\a. (\x. netord net x a) \ (\x. netord net x a \ P x) \ + \b. (\x. netord net x b) \ (\x. netord net x b \ Q x) + \ \c. (\x. netord net x c) \ (\x. netord net x c \ P x \ Q x)" + by (metis net) + +subsection{* Common nets and The "within" modifier for nets. *} + +definition "at a = mknet(\x y. 0 < dist x a \ dist x a <= dist y a)" +definition "at_infinity = mknet(\x y. norm x \ norm y)" +definition "sequentially = mknet(\(m::nat) n. m >= n)" + +definition within :: "'a net \ 'a set \ 'a net" (infixr "within" 70) where + within_def: "net within S = mknet (\x y. netord net x y \ x \ S)" + +definition indirection :: "real ^'n \ real ^'n \ (real ^'n) net" (infixr "indirection" 70) where + indirection_def: "a indirection v = (at a) within {b. \c\0. b - a = c*s v}" + +text{* Prove That They are all nets. *} + +lemma mknet_inverse': "netord (mknet r) = r \ (\x y. (\z. r z x \ r z y) \ (\z. r z y \ r z x))" + using mknet_inverse[of r] apply (auto simp add: netord_inverse) by (metis net) + +method_setup net = {* + let + val ss1 = HOL_basic_ss addsimps [@{thm expand_fun_eq} RS sym] + val ss2 = HOL_basic_ss addsimps [@{thm mknet_inverse'}] + fun tac ths = ObjectLogic.full_atomize_tac THEN' Simplifier.simp_tac (ss1 addsimps ths) THEN' Simplifier.asm_full_simp_tac ss2 + in Method.thms_args (Method.SIMPLE_METHOD' o tac) end + +*} "Reduces goals about net" + +lemma at: "\x y. netord (at a) x y \ 0 < dist x a \ dist x a <= dist y a" + apply (net at_def) + by (metis dist_sym real_le_linear real_le_trans) + +lemma at_infinity: + "\x y. netord at_infinity x y \ norm x >= norm y" + apply (net at_infinity_def) + apply (metis real_le_linear real_le_trans) + done + +lemma sequentially: "\m n. netord sequentially m n \ m >= n" + apply (net sequentially_def) + apply (metis linorder_linear min_max.le_supI2 min_max.sup_absorb1) + done + +lemma within: "netord (n within S) x y \ netord n x y \ x \ S" +proof- + have "\x y. (\z. netord n z x \ z \ S \ netord n z y) \ (\z. netord n z y \ z \ S \ netord n z x)" + by (metis net) + thus ?thesis + unfolding within_def + using mknet_inverse[of "\x y. netord n x y \ x \ S"] + by simp +qed + +lemma in_direction: "netord (a indirection v) x y \ 0 < dist x a \ dist x a \ dist y a \ (\c \ 0. x - a = c *s v)" + by (simp add: within at indirection_def) + +lemma within_UNIV: "at x within UNIV = at x" + by (simp add: within_def at_def netord_inverse) + +subsection{* Identify Trivial limits, where we can't approach arbitrarily closely. *} + + +definition "trivial_limit (net:: 'a net) \ + (\(a::'a) b. a = b) \ (\(a::'a) b. a \ b \ (\x. ~(netord (net) x a) \ ~(netord(net) x b)))" + + +lemma trivial_limit_within: "trivial_limit (at (a::real^'n) within S) \ ~(a islimpt S)" +proof- + {assume "\(a::real^'n) b. a = b" hence "\ a islimpt S" + apply (simp add: islimpt_approachable_le) + by (rule exI[where x=1], auto)} + moreover + {fix b c assume bc: "b \ c" "\x. \ netord (at a within S) x b \ \ netord (at a within S) x c" + have "dist a b > 0 \ dist a c > 0" using bc by (auto simp add: within at dist_nz[THEN sym]) + then have "\ a islimpt S" + using bc + unfolding within at dist_nz islimpt_approachable_le + by(auto simp add: dist_triangle dist_sym dist_eq_0[THEN sym]) } + moreover + {assume "\ a islimpt S" + then obtain e where e: "e > 0" "\x' \ S. x' \ a \ dist x' a > e" + unfolding islimpt_approachable_le by (auto simp add: not_le) + from e vector_choose_dist[of e a] obtain b where b: "dist a b = e" by auto + from b e(1) have "a \ b" by (simp add: dist_nz) + moreover have "\x. \ ((0 < dist x a \ dist x a \ dist a a) \ x \ S) \ + \ ((0 < dist x a \ dist x a \ dist b a) \ x \ S)" + using e(2) b by (auto simp add: dist_refl dist_sym) + ultimately have "trivial_limit (at a within S)" unfolding trivial_limit_def within at + by blast} + ultimately show ?thesis unfolding trivial_limit_def by blast +qed + +lemma trivial_limit_at: "~(trivial_limit (at a))" + apply (subst within_UNIV[symmetric]) + by (simp add: trivial_limit_within islimpt_UNIV) + +lemma trivial_limit_at_infinity: "~(trivial_limit (at_infinity :: ('a::{norm,zero_neq_one}) net))" + apply (simp add: trivial_limit_def at_infinity) + by (metis order_refl zero_neq_one) + +lemma trivial_limit_sequentially: "~(trivial_limit sequentially)" + by (auto simp add: trivial_limit_def sequentially) + +subsection{* Some property holds "sufficiently close" to the limit point. *} + +definition "eventually P net \ trivial_limit net \ (\y. (\x. netord net x y) \ (\x. netord net x y \ P x))" + +lemma eventually_happens: "eventually P net ==> trivial_limit net \ (\x. P x)" + by (metis eventually_def) + +lemma eventually_within_le: "eventually P (at a within S) \ + (\d>0. \x\S. 0 < dist x a \ dist x a <= d \ P x)" (is "?lhs = ?rhs") +proof + assume "?lhs" + moreover + { assume "\ a islimpt S" + then obtain e where "e>0" and e:"\x'\S. \ (x' \ a \ dist x' a \ e)" unfolding islimpt_approachable_le by auto + hence "?rhs" apply auto apply (rule_tac x=e in exI) by auto } + moreover + { assume "\y. (\x. netord (at a within S) x y) \ (\x. netord (at a within S) x y \ P x)" + then obtain x y where xy:"netord (at a within S) x y \ (\x. netord (at a within S) x y \ P x)" by auto + hence "?rhs" unfolding within at by auto + } + ultimately show "?rhs" unfolding eventually_def trivial_limit_within by auto +next + assume "?rhs" + then obtain d where "d>0" and d:"\x\S. 0 < dist x a \ dist x a \ d \ P x" by auto + thus "?lhs" + unfolding eventually_def trivial_limit_within islimpt_approachable_le within at unfolding dist_nz[THEN sym] by (clarsimp, rule_tac x=d in exI, auto) +qed + +lemma eventually_within: " eventually P (at a within S) \ + (\d>0. \x\S. 0 < dist x a \ dist x a < d \ P x)" +proof- + { fix d + assume "d>0" "\x\S. 0 < dist x a \ dist x a < d \ P x" + hence "\x\S. 0 < dist x a \ dist x a \ (d/2) \ P x" using order_less_imp_le by auto + } + thus ?thesis unfolding eventually_within_le using approachable_lt_le + by (auto, rule_tac x="d/2" in exI, auto) +qed + +lemma eventually_at: "eventually P (at a) \ (\d>0. \x. 0 < dist x a \ dist x a < d \ P x)" + apply (subst within_UNIV[symmetric]) + by (simp add: eventually_within) + +lemma eventually_sequentially: "eventually P sequentially \ (\N. \n\N. P n)" + apply (simp add: eventually_def sequentially trivial_limit_sequentially) +apply (metis dlo_simps(7) dlo_simps(9) le_maxI2 min_max.le_iff_sup min_max.sup_absorb1 order_antisym_conv) done + +(* FIXME Declare this with P::'a::some_type \ bool *) +lemma eventually_at_infinity: "eventually (P::(real^'n \ bool)) at_infinity \ (\b. \x. norm x >= b \ P x)" (is "?lhs = ?rhs") +proof + assume "?lhs" thus "?rhs" + unfolding eventually_def at_infinity + by (auto simp add: trivial_limit_at_infinity) +next + assume "?rhs" + then obtain b where b:"\x. b \ norm x \ P x" and "b\0" + by (metis norm_ge_zero real_le_linear real_le_trans) + obtain y::"real^'n" where y:"norm y = b" using `b\0` + using vector_choose_size[of b] by auto + thus "?lhs" unfolding eventually_def at_infinity using b y by auto +qed + +lemma always_eventually: "(\(x::'a::zero_neq_one). P x) ==> eventually P net" + apply (auto simp add: eventually_def trivial_limit_def ) + by (rule exI[where x=0], rule exI[where x=1], rule zero_neq_one) + +text{* Combining theorems for "eventually" *} + +lemma eventually_and: " eventually (\x. P x \ Q x) net \ eventually P net \ eventually Q net" + apply (simp add: eventually_def) + apply (cases "trivial_limit net") + using net_dilemma[of net P Q] by auto + +lemma eventually_mono: "(\x. P x \ Q x) \ eventually P net \ eventually Q net" + by (metis eventually_def) + +lemma eventually_mp: "eventually (\x. P x \ Q x) net \ eventually P net \ eventually Q net" + apply (atomize(full)) + unfolding imp_conjL[symmetric] eventually_and[symmetric] + by (auto simp add: eventually_def) + +lemma eventually_false: "eventually (\x. False) net \ trivial_limit net" + by (auto simp add: eventually_def) + +lemma not_eventually: "(\x. \ P x ) \ ~(trivial_limit net) ==> ~(eventually P net)" + by (auto simp add: eventually_def) + +subsection{* Limits, defined as vacuously true when the limit is trivial. *} + +definition tendsto:: "('a \ real ^'n) \ real ^'n \ 'a net \ bool" (infixr "--->" 55) where + tendsto_def: "(f ---> l) net \ (\e>0. eventually (\x. dist (f x) l < e) net)" + +lemma tendstoD: "(f ---> l) net \ e>0 \ eventually (\x. dist (f x) l < e) net" + unfolding tendsto_def by auto + + text{* Notation Lim to avoid collition with lim defined in analysis *} +definition "Lim net f = (THE l. (f ---> l) net)" + +lemma Lim: + "(f ---> l) net \ + trivial_limit net \ + (\e>0. \y. (\x. netord net x y) \ + (\x. netord(net) x y \ dist (f x) l < e))" + by (auto simp add: tendsto_def eventually_def) + + +text{* Show that they yield usual definitions in the various cases. *} + +lemma Lim_within_le: "(f ---> l)(at a within S) \ + (\e>0. \d>0. \x\S. 0 < dist x a \ dist x a <= d \ dist (f x) l < e)" + by (auto simp add: tendsto_def eventually_within_le) + +lemma Lim_within: "(f ---> l) (at a within S) \ + (\e >0. \d>0. \x \ S. 0 < dist x a \ dist x a < d \ dist (f x) l < e)" + by (auto simp add: tendsto_def eventually_within) + +lemma Lim_at: "(f ---> l) (at a) \ + (\e >0. \d>0. \x. 0 < dist x a \ dist x a < d \ dist (f x) l < e)" + by (auto simp add: tendsto_def eventually_at) + +lemma Lim_at_infinity: + "(f ---> l) at_infinity \ (\e>0. \b. \x::real^'n. norm x >= b \ dist (f x) l < e)" + by (auto simp add: tendsto_def eventually_at_infinity) + +lemma Lim_sequentially: + "(S ---> l) sequentially \ + (\e>0. \N. \n\N. dist (S n) l < e)" + by (auto simp add: tendsto_def eventually_sequentially) + +lemma Lim_eventually: "eventually (\x. f x = l) net \ (f ---> l) net" + by (auto simp add: eventually_def Lim dist_refl) + +text{* The expected monotonicity property. *} + +lemma Lim_within_empty: "(f ---> l) (at x within {})" + by (simp add: Lim_within_le) + +lemma Lim_within_subset: "(f ---> l) (at a within S) \ T \ S \ (f ---> l) (at a within T)" + apply (auto simp add: Lim_within_le) + by (metis subset_eq) + +lemma Lim_Un: assumes "(f ---> l) (at x within S)" "(f ---> l) (at x within T)" + shows "(f ---> l) (at x within (S \ T))" +proof- + { fix e::real assume "e>0" + obtain d1 where d1:"d1>0" "\xa\T. 0 < dist xa x \ dist xa x < d1 \ dist (f xa) l < e" using assms unfolding Lim_within using `e>0` by auto + obtain d2 where d2:"d2>0" "\xa\S. 0 < dist xa x \ dist xa x < d2 \ dist (f xa) l < e" using assms unfolding Lim_within using `e>0` by auto + have "\d>0. \xa\S \ T. 0 < dist xa x \ dist xa x < d \ dist (f xa) l < e" using d1 d2 + by (rule_tac x="min d1 d2" in exI)auto + } + thus ?thesis unfolding Lim_within by auto +qed + +lemma Lim_Un_univ: + "(f ---> l) (at x within S) \ (f ---> l) (at x within T) \ S \ T = (UNIV::(real^'n) set) + ==> (f ---> l) (at x)" + by (metis Lim_Un within_UNIV) + +text{* Interrelations between restricted and unrestricted limits. *} + +lemma Lim_at_within: "(f ---> l)(at a) ==> (f ---> l)(at a within S)" + apply (simp add: Lim_at Lim_within) + by metis + +lemma Lim_within_open: + assumes"a \ S" "open S" + shows "(f ---> l)(at a within S) \ (f ---> l)(at a)" (is "?lhs \ ?rhs") +proof + assume ?lhs + { fix e::real assume "e>0" + obtain d where d: "d >0" "\x\S. 0 < dist x a \ dist x a < d \ dist (f x) l < e" using `?lhs` `e>0` unfolding Lim_within by auto + obtain d' where d': "d'>0" "\x. dist x a < d' \ x \ S" using assms unfolding open_def by auto + from d d' have "\d>0. \x. 0 < dist x a \ dist x a < d \ dist (f x) l < e" by (rule_tac x= "min d d'" in exI)auto + } + thus ?rhs unfolding Lim_at by auto +next + assume ?rhs + { fix e::real assume "e>0" + then obtain d where "d>0" and d:"\x. 0 < dist x a \ dist x a < d \ dist (f x) l < e" using `?rhs` unfolding Lim_at by auto + hence "\d>0. \x. 0 < dist x a \ dist x a < d \ dist (f x) l < e" using `d>0` by auto + } + thus ?lhs using Lim_at_within[of f l a S] by (auto simp add: Lim_at) +qed + +text{* Another limit point characterization. *} + +lemma islimpt_sequential: + "x islimpt S \ (\f. (\n::nat. f n \ S -{x}) \ (f ---> x) sequentially)" (is "?lhs = ?rhs") +proof + assume ?lhs + then obtain f where f:"\y. y>0 \ f y \ S \ f y \ x \ dist (f y) x < y" + unfolding islimpt_approachable using choice[of "\e y. e>0 \ y\S \ y\x \ dist y x < e"] by auto + { fix n::nat + have "f (inverse (real n + 1)) \ S - {x}" using f by auto + } + moreover + { fix e::real assume "e>0" + hence "\N::nat. inverse (real (N + 1)) < e" using real_arch_inv[of e] apply (auto simp add: Suc_pred') apply(rule_tac x="n - 1" in exI) by auto + then obtain N::nat where "inverse (real (N + 1)) < e" by auto + hence "\n\N. inverse (real n + 1) < e" by (auto, metis Suc_le_mono le_SucE less_imp_inverse_less nat_le_real_less order_less_trans real_of_nat_Suc real_of_nat_Suc_gt_zero) + moreover have "\n\N. dist (f (inverse (real n + 1))) x < (inverse (real n + 1))" using f `e>0` by auto + ultimately have "\N::nat. \n\N. dist (f (inverse (real n + 1))) x < e" apply(rule_tac x=N in exI) apply auto apply(erule_tac x=n in allE)+ by auto + } + hence " ((\n. f (inverse (real n + 1))) ---> x) sequentially" + unfolding Lim_sequentially using f by auto + ultimately show ?rhs apply (rule_tac x="(\n::nat. f (inverse (real n + 1)))" in exI) by auto +next + assume ?rhs + then obtain f::"nat\real^'a" where f:"(\n. f n \ S - {x})" "(\e>0. \N. \n\N. dist (f n) x < e)" unfolding Lim_sequentially by auto + { fix e::real assume "e>0" + then obtain N where "dist (f N) x < e" using f(2) by auto + moreover have "f N\S" "f N \ x" using f(1) by auto + ultimately have "\x'\S. x' \ x \ dist x' x < e" by auto + } + thus ?lhs unfolding islimpt_approachable by auto +qed + +text{* Basic arithmetical combining theorems for limits. *} + +lemma Lim_linear: fixes f :: "('a \ real^'n)" and h :: "(real^'n \ real^'m)" + assumes "(f ---> l) net" "linear h" + shows "((\x. h (f x)) ---> h l) net" +proof (cases "trivial_limit net") + case True + thus ?thesis unfolding tendsto_def unfolding eventually_def by auto +next + case False note cas = this + obtain b where b: "b>0" "\x. norm (h x) \ b * norm x" using assms(2) using linear_bounded_pos[of h] by auto + { fix e::real assume "e >0" + hence "e/b > 0" using `b>0` by (metis divide_pos_pos) + then have "(\y. (\x. netord net x y) \ (\x. netord net x y \ dist (f x) l < e/b))" using assms `e>0` cas + unfolding tendsto_def unfolding eventually_def by auto + then obtain y where y: "\x. netord net x y" "\x. netord net x y \ dist (f x) l < e/b" by auto + { fix x + have "netord net x y \ dist (h (f x)) (h l) < e" + using y(2) b unfolding dist_def using linear_sub[of h "f x" l] `linear h` + apply auto by (metis b(1) b(2) dist_def dist_sym less_le_not_le linorder_not_le mult_imp_div_pos_le real_mult_commute xt1(7)) + } + hence " (\y. (\x. netord net x y) \ (\x. netord net x y \ dist (h (f x)) (h l) < e))" using y + by(rule_tac x="y" in exI) auto + } + thus ?thesis unfolding tendsto_def eventually_def using `b>0` by auto +qed + +lemma Lim_const: "((\x. a) ---> a) net" + by (auto simp add: Lim dist_refl trivial_limit_def) + +lemma Lim_cmul: "(f ---> l) net ==> ((\x. c *s f x) ---> c *s l) net" + apply (rule Lim_linear[where f = f]) + apply simp + apply (rule linear_compose_cmul) + apply (rule linear_id[unfolded id_def]) + done + +lemma Lim_neg: "(f ---> l) net ==> ((\x. -(f x)) ---> -l) net" + apply (simp add: Lim dist_def group_simps) + apply (subst minus_diff_eq[symmetric]) + unfolding norm_minus_cancel by simp + +lemma Lim_add: fixes f :: "'a \ real^'n" shows + "(f ---> l) net \ (g ---> m) net \ ((\x. f(x) + g(x)) ---> l + m) net" +proof- + assume as:"(f ---> l) net" "(g ---> m) net" + { fix e::real + assume "e>0" + hence *:"eventually (\x. dist (f x) l < e/2) net" + "eventually (\x. dist (g x) m < e/2) net" using as + by (auto intro: tendstoD simp del: Arith_Tools.less_divide_eq_number_of1) + hence "eventually (\x. dist (f x + g x) (l + m) < e) net" + proof(cases "trivial_limit net") + case True + thus ?thesis unfolding eventually_def by auto + next + case False + hence fl:"(\y. (\x. netord net x y) \ (\x. netord net x y \ dist (f x) l < e / 2))" and + gl:"(\y. (\x. netord net x y) \ (\x. netord net x y \ dist (g x) m < e / 2))" + using * unfolding eventually_def by auto + obtain c where c:"(\x. netord net x c)" "(\x. netord net x c \ dist (f x) l < e / 2 \ dist (g x) m < e / 2)" + using net_dilemma[of net, OF fl gl] by auto + { fix x assume "netord net x c" + with c(2) have " dist (f x + g x) (l + m) < e" using dist_triangle_add[of "f x" "g x" l m] by auto + } + with c show ?thesis unfolding eventually_def by auto + qed + } + thus ?thesis unfolding tendsto_def by auto +qed + +lemma Lim_sub: "(f ---> l) net \ (g ---> m) net \ ((\x. f(x) - g(x)) ---> l - m) net" + unfolding diff_minus + by (simp add: Lim_add Lim_neg) + +lemma Lim_null: "(f ---> l) net \ ((\x. f(x) - l) ---> 0) net" by (simp add: Lim dist_def) +lemma Lim_null_norm: "(f ---> 0) net \ ((\x. vec1(norm(f x))) ---> 0) net" + by (simp add: Lim dist_def norm_vec1) + +lemma Lim_null_comparison: + assumes "eventually (\x. norm(f x) <= g x) net" "((\x. vec1(g x)) ---> 0) net" + shows "(f ---> 0) net" +proof(simp add: tendsto_def, rule+) + fix e::real assume "0 g x" "dist (vec1 (g x)) 0 < e" + hence "dist (f x) 0 < e" unfolding vec_def using dist_vec1[of "g x" "0"] + by (vector dist_def norm_vec1 dist_refl real_vector_norm_def dot_def vec1_def) + } + thus "eventually (\x. dist (f x) 0 < e) net" + using eventually_and[of "\x. norm(f x) <= g x" "\x. dist (vec1 (g x)) 0 < e" net] + using eventually_mono[of "(\x. norm (f x) \ g x \ dist (vec1 (g x)) 0 < e)" "(\x. dist (f x) 0 < e)" net] + using assms `e>0` unfolding tendsto_def by auto +qed + +lemma Lim_component: "(f ---> l) net \ i \ {1 .. dimindex(UNIV:: 'n set)} + ==> ((\a. vec1((f a :: real ^'n)$i)) ---> vec1(l$i)) net" + apply (simp add: Lim dist_def vec1_sub[symmetric] norm_vec1 vector_minus_component[symmetric] del: One_nat_def) + apply auto + apply (erule_tac x=e in allE) + apply clarify + apply (rule_tac x=y in exI) + apply auto + apply (rule order_le_less_trans) + apply (rule component_le_norm) + by auto + +lemma Lim_transform_bound: + assumes "eventually (\n. norm(f n) <= norm(g n)) net" "(g ---> 0) net" + shows "(f ---> 0) net" +proof(simp add: tendsto_def, rule+) + fix e::real assume "e>0" + { fix x + assume "norm (f x) \ norm (g x)" "dist (g x) 0 < e" + hence "dist (f x) 0 < e" by norm} + thus "eventually (\x. dist (f x) 0 < e) net" + using eventually_and[of "\x. norm (f x) \ norm (g x)" "\x. dist (g x) 0 < e" net] + using eventually_mono[of "\x. norm (f x) \ norm (g x) \ dist (g x) 0 < e" "\x. dist (f x) 0 < e" net] + using assms `e>0` unfolding tendsto_def by blast +qed + +text{* Deducing things about the limit from the elements. *} + +lemma Lim_in_closed_set: + assumes "closed S" "eventually (\x. f(x) \ S) net" "\(trivial_limit net)" "(f ---> l) net" + shows "l \ S" +proof- + { assume "l \ S" + obtain e where e:"e>0" "ball l e \ UNIV - S" using assms(1) `l \ S` unfolding closed_def open_contains_ball by auto + hence *:"\x. dist l x < e \ x \ S" by auto + obtain y where "(\x. netord net x y) \ (\x. netord net x y \ dist (f x) l < e)" + using assms(3,4) `e>0` unfolding tendsto_def eventually_def by blast + hence "(\x. netord net x y) \ (\x. netord net x y \ f x \ S)" using * by (auto simp add: dist_sym) + hence False using assms(2,3) + using eventually_and[of "(\x. f x \ S)" "(\x. f x \ S)"] not_eventually[of "(\x. f x \ S \ f x \ S)" net] + unfolding eventually_def by blast + } + thus ?thesis by blast +qed + +text{* Need to prove closed(cball(x,e)) before deducing this as a corollary. *} + +lemma Lim_norm_ubound: + assumes "\(trivial_limit net)" "(f ---> l) net" "eventually (\x. norm(f x) <= e) net" + shows "norm(l) <= e" +proof- + obtain y where y: "\x. netord net x y" "\x. netord net x y \ norm (f x) \ e" using assms(1,3) unfolding eventually_def by auto + show ?thesis + proof(rule ccontr) + assume "\ norm l \ e" + then obtain z where z: "\x. netord net x z" "\x. netord net x z \ dist (f x) l < norm l - e" + using assms(2)[unfolded Lim] using assms(1) apply simp apply(erule_tac x="norm l - e" in allE) by auto + obtain w where w:"netord net w z" "netord net w y" using net[of net] using z(1) y(1) by blast + hence "dist (f w) l < norm l - e \ norm (f w) <= e" using z(2) y(2) by auto + thus False using `\ norm l \ e` by norm + qed +qed + +lemma Lim_norm_lbound: + assumes "\ (trivial_limit net)" "(f ---> l) net" "eventually (\x. e <= norm(f x)) net" + shows "e \ norm l" +proof- + obtain y where y: "\x. netord net x y" "\x. netord net x y \ e \ norm (f x)" using assms(1,3) unfolding eventually_def by auto + show ?thesis + proof(rule ccontr) + assume "\ e \ norm l" + then obtain z where z: "\x. netord net x z" "\x. netord net x z \ dist (f x) l < e - norm l" + using assms(2)[unfolded Lim] using assms(1) apply simp apply(erule_tac x="e - norm l" in allE) by auto + obtain w where w:"netord net w z" "netord net w y" using net[of net] using z(1) y(1) by blast + hence "dist (f w) l < e - norm l \ e \ norm (f w)" using z(2) y(2) by auto + thus False using `\ e \ norm l` by norm + qed +qed + +text{* Uniqueness of the limit, when nontrivial. *} + +lemma Lim_unique: + fixes l::"real^'a" and net::"'b::zero_neq_one net" + assumes "\(trivial_limit net)" "(f ---> l) net" "(f ---> l') net" + shows "l = l'" +proof- + { fix e::real assume "e>0" + hence "eventually (\x. norm (0::real^'a) \ e) net" unfolding norm_0 using always_eventually[of _ net] by auto + hence "norm (l - l') \ e" using Lim_norm_ubound[of net "\x. 0" "l-l'"] using assms using Lim_sub[of f l net f l'] by auto + } note * = this + { assume "norm (l - l') > 0" + hence "norm (l - l') = 0" using *[of "(norm (l - l')) /2"] using norm_ge_zero[of "l - l'"] by simp + } + hence "l = l'" using norm_ge_zero[of "l - l'"] unfolding le_less and dist_nz[of l l', unfolded dist_def, THEN sym] by auto + thus ?thesis using assms using Lim_sub[of f l net f l'] by simp +qed + +lemma tendsto_Lim: + "~(trivial_limit (net::('b::zero_neq_one net))) \ (f ---> l) net ==> Lim net f = l" + unfolding Lim_def using Lim_unique[of net f] by auto + +text{* Limit under bilinear function (surprisingly tedious, but important) *} + +lemma norm_bound_lemma: + "0 < e \ \d>0. \(x'::real^'b) y'::real^'a. norm(x' - (x::real^'b)) < d \ norm(y' - y) < d \ norm(x') * norm(y' - y) + norm(x' - x) * norm(y) < e" +proof- + assume e: "0 < e" + have th1: "(2 * norm x + 2 * norm y + 2) > 0" using norm_ge_zero[of x] norm_ge_zero[of y] by norm + hence th0: "0 < e / (2 * norm x + 2 * norm y + 2)" using `e>0` using divide_pos_pos by auto + moreover + { fix x' y' + assume h: "norm (x' - x) < 1" "norm (x' - x) < e / (2 * norm x + 2 * norm y + 2)" + "norm (y' - y) < 1" "norm (y' - y) < e / (2 * norm x + 2 * norm y + 2)" + have th: "\a b (c::real). a \ 0 \ c \ 0 \ a + (b + c) < e ==> b < e " by arith + from h have thx: "norm (x' - x) * norm y < e / 2" + using th0 th1 apply (simp add: field_simps) + apply (rule th) defer defer apply assumption + by (simp_all add: norm_ge_zero zero_le_mult_iff) + + have "norm x' - norm x < 1" apply(rule le_less_trans) + using h(1) using norm_triangle_ineq2[of x' x] by auto + hence *:"norm x' < 1 + norm x" by auto + + have thy: "norm (y' - y) * norm x' < e / (2 * norm x + 2 * norm y + 2) * (1 + norm x)" + using mult_strict_mono'[OF h(4) * norm_ge_zero norm_ge_zero] by auto + also have "\ \ e/2" apply simp unfolding divide_le_eq + using th1 th0 `e>0` apply auto + unfolding mult_assoc and real_mult_le_cancel_iff2[OF `e>0`] by auto + + finally have "norm x' * norm (y' - y) + norm (x' - x) * norm y < e" + using thx and e by (simp add: field_simps) } + ultimately show ?thesis apply(rule_tac x="min 1 (e / 2 / (norm x + norm y + 1))" in exI) by auto +qed + +lemma Lim_bilinear: + fixes net :: "'a net" and h:: "real ^'m \ real ^'n \ real ^'p" + assumes "(f ---> l) net" and "(g ---> m) net" and "bilinear h" + shows "((\x. h (f x) (g x)) ---> (h l m)) net" +proof(cases "trivial_limit net") + case True thus "((\x. h (f x) (g x)) ---> h l m) net" unfolding Lim .. +next + case False note ntriv = this + obtain B where "B>0" and B:"\x y. norm (h x y) \ B * norm x * norm y" using bilinear_bounded_pos[OF assms(3)] by auto + { fix e::real assume "e>0" + obtain d where "d>0" and d:"\x' y'. norm (x' - l) < d \ norm (y' - m) < d \ norm x' * norm (y' - m) + norm (x' - l) * norm m < e / B" using `B>0` `e>0` + using norm_bound_lemma[of "e / B" l m] using divide_pos_pos by auto + + have *:"\x y. h (f x) (g x) - h l m = h (f x) (g x - m) + h (f x - l) m" + unfolding bilinear_rsub[OF assms(3)] + unfolding bilinear_lsub[OF assms(3)] by auto + + { fix x assume "dist (f x) l < d \ dist (g x) m < d" + hence **:"norm (f x) * norm (g x - m) + norm (f x - l) * norm m < e / B" + using d[THEN spec[where x="f x"], THEN spec[where x="g x"]] unfolding dist_def by auto + have "norm (h (f x) (g x - m)) + norm (h (f x - l) m) \ B * norm (f x) * norm (g x - m) + B * norm (f x - l) * norm m" + using B[THEN spec[where x="f x"], THEN spec[where x="g x - m"]] + using B[THEN spec[where x="f x - l"], THEN spec[where x="m"]] by auto + also have "\ < e" using ** and `B>0` by(auto simp add: field_simps) + finally have "dist (h (f x) (g x)) (h l m) < e" unfolding dist_def and * using norm_triangle_lt by auto + } + moreover + obtain c where "(\x. netord net x c) \ (\x. netord net x c \ dist (f x) l < d \ dist (g x) m < d)" + using net_dilemma[of net "\x. dist (f x) l < d" "\x. dist (g x) m < d"] using assms(1,2) unfolding Lim using False and `d>0` by (auto elim!: allE[where x=d]) + ultimately have "\y. (\x. netord net x y) \ (\x. netord net x y \ dist (h (f x) (g x)) (h l m) < e)" by auto } + thus "((\x. h (f x) (g x)) ---> h l m) net" unfolding Lim by auto +qed + +text{* These are special for limits out of the same vector space. *} + +lemma Lim_within_id: "(id ---> a) (at a within s)" by (auto simp add: Lim_within id_def) +lemma Lim_at_id: "(id ---> a) (at a)" +apply (subst within_UNIV[symmetric]) by (simp add: Lim_within_id) + +lemma Lim_at_zero: "(f ---> l) (at (a::real^'a)) \ ((\x. f(a + x)) ---> l) (at 0)" (is "?lhs = ?rhs") +proof + assume "?lhs" + { fix e::real assume "e>0" + with `?lhs` obtain d where d:"d>0" "\x. 0 < dist x a \ dist x a < d \ dist (f x) l < e" unfolding Lim_at by auto + { fix x::"real^'a" assume "0 < dist x 0 \ dist x 0 < d" + hence "dist (f (a + x)) l < e" using d + apply(erule_tac x="x+a" in allE) by(auto simp add: comm_monoid_add.mult_commute dist_def dist_sym) + } + hence "\d>0. \x. 0 < dist x 0 \ dist x 0 < d \ dist (f (a + x)) l < e" using d(1) by auto + } + thus "?rhs" unfolding Lim_at by auto +next + assume "?rhs" + { fix e::real assume "e>0" + with `?rhs` obtain d where d:"d>0" "\x. 0 < dist x 0 \ dist x 0 < d \ dist (f (a + x)) l < e" + unfolding Lim_at by auto + { fix x::"real^'a" assume "0 < dist x a \ dist x a < d" + hence "dist (f x) l < e" using d apply(erule_tac x="x-a" in allE) + by(auto simp add: comm_monoid_add.mult_commute dist_def dist_sym) + } + hence "\d>0. \x. 0 < dist x a \ dist x a < d \ dist (f x) l < e" using d(1) by auto + } + thus "?lhs" unfolding Lim_at by auto +qed + +text{* It's also sometimes useful to extract the limit point from the net. *} + +definition "netlimit net = (SOME a. \x. ~(netord net x a))" + +lemma netlimit_within: assumes"~(trivial_limit (at a within S))" + shows "(netlimit (at a within S) = a)" +proof- + { fix x assume "x \ a" + then obtain y where y:"dist y a \ dist a a \ 0 < dist y a \ y \ S \ dist y a \ dist x a \ 0 < dist y a \ y \ S" using assms unfolding trivial_limit_def within at by blast + assume "\y. \ netord (at a within S) y x" + hence "x = a" using y unfolding within at by (auto simp add: dist_refl dist_nz) + } + moreover + have "\y. \ netord (at a within S) y a" using assms unfolding trivial_limit_def within at by (auto simp add: dist_refl) + ultimately show ?thesis unfolding netlimit_def using some_equality[of "\x. \y. \ netord (at a within S) y x"] by blast +qed + +lemma netlimit_at: "netlimit(at a) = a" + apply (subst within_UNIV[symmetric]) + using netlimit_within[of a UNIV] + by (simp add: trivial_limit_at within_UNIV) + +text{* Transformation of limit. *} + +lemma Lim_transform: assumes "((\x. f x - g x) ---> 0) net" "(f ---> l) net" + shows "(g ---> l) net" +proof- + from assms have "((\x. f x - g x - f x) ---> 0 - l) net" using Lim_sub[of "\x. f x - g x" 0 net f l] by auto + thus "?thesis" using Lim_neg [of "\ x. - g x" "-l" net] by auto +qed + +lemma Lim_transform_eventually: "eventually (\x. f x = g x) net \ (f ---> l) net ==> (g ---> l) net" + using Lim_eventually[of "\x. f x - g x" 0 net] Lim_transform[of f g net l] by auto + +lemma Lim_transform_within: + assumes "0 < d" "(\x'\S. 0 < dist x' x \ dist x' x < d \ f x' = g x')" + "(f ---> l) (at x within S)" + shows "(g ---> l) (at x within S)" +proof- + have "((\x. f x - g x) ---> 0) (at x within S)" unfolding Lim_within[of "\x. f x - g x" 0 x S] using assms(1,2) by auto + thus ?thesis using Lim_transform[of f g "at x within S" l] using assms(3) by auto +qed + +lemma Lim_transform_at: "0 < d \ (\x'. 0 < dist x' x \ dist x' x < d \ f x' = g x') \ + (f ---> l) (at x) ==> (g ---> l) (at x)" + apply (subst within_UNIV[symmetric]) + using Lim_transform_within[of d UNIV x f g l] + by (auto simp add: within_UNIV) + +text{* Common case assuming being away from some crucial point like 0. *} + +lemma Lim_transform_away_within: + fixes f:: "real ^'m \ real ^'n" + assumes "a\b" "\x\ S. x \ a \ x \ b \ f x = g x" + and "(f ---> l) (at a within S)" + shows "(g ---> l) (at a within S)" +proof- + have "\x'\S. 0 < dist x' a \ dist x' a < dist a b \ f x' = g x'" using assms(2) + apply auto apply(erule_tac x=x' in ballE) by (auto simp add: dist_sym dist_refl) + thus ?thesis using Lim_transform_within[of "dist a b" S a f g l] using assms(1,3) unfolding dist_nz by auto +qed + +lemma Lim_transform_away_at: + fixes f:: "real ^'m \ real ^'n" + assumes ab: "a\b" and fg: "\x. x \ a \ x \ b \ f x = g x" + and fl: "(f ---> l) (at a)" + shows "(g ---> l) (at a)" + using Lim_transform_away_within[OF ab, of UNIV f g l] fg fl + by (auto simp add: within_UNIV) + +text{* Alternatively, within an open set. *} + +lemma Lim_transform_within_open: + fixes f:: "real ^'m \ real ^'n" + assumes "open S" "a \ S" "\x\S. x \ a \ f x = g x" "(f ---> l) (at a)" + shows "(g ---> l) (at a)" +proof- + from assms(1,2) obtain e::real where "e>0" and e:"ball a e \ S" unfolding open_contains_ball by auto + hence "\x'. 0 < dist x' a \ dist x' a < e \ f x' = g x'" using assms(3) + unfolding ball_def subset_eq apply auto apply(erule_tac x=x' in allE) apply(erule_tac x=x' in ballE) by(auto simp add: dist_refl dist_sym) + thus ?thesis using Lim_transform_at[of e a f g l] `e>0` assms(4) by auto +qed + +text{* A congruence rule allowing us to transform limits assuming not at point. *} + +lemma Lim_cong_within[cong add]: + "(\x. x \ a \ f x = g x) ==> ((\x. f x) ---> l) (at a within S) \ ((g ---> l) (at a within S))" + by (simp add: Lim_within dist_nz[symmetric]) + +lemma Lim_cong_at[cong add]: + "(\x. x \ a ==> f x = g x) ==> (((\x. f x) ---> l) (at a) \ ((g ---> l) (at a)))" + by (simp add: Lim_at dist_nz[symmetric]) + +text{* Useful lemmas on closure and set of possible sequential limits.*} + +lemma closure_sequential: + "l \ closure S \ (\x. (\n. x n \ S) \ (x ---> l) sequentially)" (is "?lhs = ?rhs") +proof + assume "?lhs" moreover + { assume "l \ S" + hence "?rhs" using Lim_const[of l sequentially] by auto + } moreover + { assume "l islimpt S" + hence "?rhs" unfolding islimpt_sequential by auto + } ultimately + show "?rhs" unfolding closure_def by auto +next + assume "?rhs" + thus "?lhs" unfolding closure_def unfolding islimpt_sequential by auto +qed + +lemma closed_sequential_limits: + "closed S \ (\x l. (\n. x n \ S) \ (x ---> l) sequentially \ l \ S)" + unfolding closed_limpt + by (metis closure_sequential closure_closed closed_limpt islimpt_sequential mem_delete) + +lemma closure_approachable: "x \ closure S \ (\e>0. \y\S. dist y x < e)" + apply (auto simp add: closure_def islimpt_approachable) + by (metis dist_refl) + +lemma closed_approachable: "closed S ==> (\e>0. \y\S. dist y x < e) \ x \ S" + by (metis closure_closed closure_approachable) + +text{* Some other lemmas about sequences. *} + +lemma seq_offset: "(f ---> l) sequentially ==> ((\i. f( i + k)) ---> l) sequentially" + apply (auto simp add: Lim_sequentially) + by (metis trans_le_add1 ) + +lemma seq_offset_neg: "(f ---> l) sequentially ==> ((\i. f(i - k)) ---> l) sequentially" + apply (simp add: Lim_sequentially) + apply (subgoal_tac "\N k (n::nat). N + k <= n ==> N <= n - k") + apply metis + by arith + +lemma seq_offset_rev: "((\i. f(i + k)) ---> l) sequentially ==> (f ---> l) sequentially" + apply (simp add: Lim_sequentially) + apply (subgoal_tac "\N k (n::nat). N + k <= n ==> N <= n - k \ (n - k) + k = n") + by metis arith + +lemma seq_harmonic: "((\n. vec1(inverse (real n))) ---> 0) sequentially" +proof- + { fix e::real assume "e>0" + hence "\N::nat. \n::nat\N. inverse (real n) < e" + using real_arch_inv[of e] apply auto apply(rule_tac x=n in exI) + by (metis dlo_simps(4) le_imp_inverse_le linorder_not_less real_of_nat_gt_zero_cancel_iff real_of_nat_less_iff xt1(7)) + } + thus ?thesis unfolding Lim_sequentially dist_def apply simp unfolding norm_vec1 by auto +qed + +text{* More properties of closed balls. *} + +lemma closed_cball: "closed(cball x e)" +proof- + { fix xa::"nat\real^'a" and l assume as: "\n. dist x (xa n) \ e" "(xa ---> l) sequentially" + from as(2) have "((\n. x - xa n) ---> x - l) sequentially" using Lim_sub[of "\n. x" x sequentially xa l] Lim_const[of x sequentially] by auto + moreover from as(1) have "eventually (\n. norm (x - xa n) \ e) sequentially" unfolding eventually_sequentially dist_def by auto + ultimately have "dist x l \ e" + unfolding dist_def + using Lim_norm_ubound[of sequentially _ "x - l" e] using trivial_limit_sequentially by auto + } + thus ?thesis unfolding closed_sequential_limits by auto +qed + +lemma open_contains_cball: "open S \ (\x\S. \e>0. cball x e \ S)" +proof- + { fix x and e::real assume "x\S" "e>0" "ball x e \ S" + hence "\d>0. cball x d \ S" unfolding subset_eq by (rule_tac x="e/2" in exI, auto) + } moreover + { fix x and e::real assume "x\S" "e>0" "cball x e \ S" + hence "\d>0. ball x d \ S" unfolding subset_eq apply(rule_tac x="e/2" in exI) by auto + } ultimately + show ?thesis unfolding open_contains_ball by auto +qed + +lemma open_contains_cball_eq: "open S ==> (\x. x \ S \ (\e>0. cball x e \ S))" + by (metis open_contains_cball subset_eq order_less_imp_le centre_in_cball mem_def) + +lemma mem_interior_cball: "x \ interior S \ (\e>0. cball x e \ S)" + apply (simp add: interior_def) + by (metis open_contains_cball subset_trans ball_subset_cball centre_in_ball open_ball) + +lemma islimpt_ball: "y islimpt ball x e \ 0 < e \ y \ cball x e" (is "?lhs = ?rhs") +proof + assume "?lhs" + { assume "e \ 0" + hence *:"ball x e = {}" using ball_eq_empty[of x e] by auto + have False using `?lhs` unfolding * using islimpt_EMPTY[of y] by auto + } + hence "e > 0" by (metis dlo_simps(3)) + moreover + have "y \ cball x e" using closed_cball[of x e] islimpt_subset[of y "ball x e" "cball x e"] ball_subset_cball[of x e] `?lhs` unfolding closed_limpt by auto + ultimately show "?rhs" by auto +next + assume "?rhs" hence "e>0" by auto + { fix d::real assume "d>0" + have "\x'\ball x e. x' \ y \ dist x' y < d" + proof(cases "d \ dist x y") + case True thus "\x'\ball x e. x' \ y \ dist x' y < d" + proof(cases "x=y") + case True hence False using `d \ dist x y` `d>0` dist_refl[of x] by auto + thus "\x'\ball x e. x' \ y \ dist x' y < d" by auto + next + case False + + have "dist x (y - (d / (2 * dist y x)) *s (y - x)) + = norm (x - y + (d / (2 * norm (y - x))) *s (y - x))" + unfolding mem_cball mem_ball dist_def diff_diff_eq2 diff_add_eq[THEN sym] by auto + also have "\ = \- 1 + d / (2 * norm (x - y))\ * norm (x - y)" + using vector_sadd_rdistrib[of "- 1" "d / (2 * norm (y - x))", THEN sym, of "y - x"] + unfolding vector_smult_lneg vector_smult_lid + by (auto simp add: dist_sym[unfolded dist_def] norm_mul) + also have "\ = \- norm (x - y) + d / 2\" + unfolding abs_mult_pos[of "norm (x - y)", OF norm_ge_zero[of "x - y"]] + unfolding real_add_mult_distrib using `x\y`[unfolded dist_nz, unfolded dist_def] by auto + also have "\ \ e - d/2" using `d \ dist x y` and `d>0` and `?rhs` by(auto simp add: dist_def) + finally have "y - (d / (2 * dist y x)) *s (y - x) \ ball x e" using `d>0` by auto + + moreover + + have "(d / (2*dist y x)) *s (y - x) \ 0" + using `x\y`[unfolded dist_nz] `d>0` unfolding vector_mul_eq_0 by (auto simp add: dist_sym dist_refl) + moreover + have "dist (y - (d / (2 * dist y x)) *s (y - x)) y < d" unfolding dist_def apply simp unfolding norm_minus_cancel norm_mul + using `d>0` `x\y`[unfolded dist_nz] dist_sym[of x y] + unfolding dist_def by auto + ultimately show "\x'\ball x e. x' \ y \ dist x' y < d" by (rule_tac x="y - (d / (2*dist y x)) *s (y - x)" in bexI) auto + qed + next + case False hence "d > dist x y" by auto + show "\x'\ball x e. x' \ y \ dist x' y < d" + proof(cases "x=y") + case True + obtain z where **:"dist y z = (min e d) / 2" using vector_choose_dist[of "(min e d) / 2" y] + using `d > 0` `e>0` by (auto simp add: dist_refl) + show "\x'\ball x e. x' \ y \ dist x' y < d" + apply(rule_tac x=z in bexI) unfolding `x=y` dist_sym dist_refl dist_nz using ** `d > 0` `e>0` by auto + next + case False thus "\x'\ball x e. x' \ y \ dist x' y < d" + using `d>0` `d > dist x y` `?rhs` by(rule_tac x=x in bexI, auto simp add: dist_refl) + qed + qed } + thus "?lhs" unfolding mem_cball islimpt_approachable mem_ball by auto +qed + +lemma closure_ball: "0 < e ==> (closure(ball x e) = cball x e)" + apply (simp add: closure_def islimpt_ball expand_set_eq) + by arith + +lemma interior_cball: "interior(cball x e) = ball x e" +proof(cases "e\0") + case False note cs = this + from cs have "ball x e = {}" using ball_empty[of e x] by auto moreover + { fix y assume "y \ cball x e" + hence False unfolding mem_cball using dist_nz[of x y] cs by (auto simp add: dist_refl) } + hence "cball x e = {}" by auto + hence "interior (cball x e) = {}" using interior_empty by auto + ultimately show ?thesis by blast +next + case True note cs = this + have "ball x e \ cball x e" using ball_subset_cball by auto moreover + { fix S y assume as: "S \ cball x e" "open S" "y\S" + then obtain d where "d>0" and d:"\x'. dist x' y < d \ x' \ S" unfolding open_def by blast + + then obtain xa where xa:"dist y xa = d / 2" using vector_choose_dist[of "d/2" y] by auto + hence xa_y:"xa \ y" using dist_nz[of y xa] using `d>0` by auto + have "xa\S" using d[THEN spec[where x=xa]] using xa apply(auto simp add: dist_sym) unfolding dist_nz[THEN sym] using xa_y by auto + hence xa_cball:"xa \ cball x e" using as(1) by auto + + hence "y \ ball x e" proof(cases "x = y") + case True + hence "e>0" using xa_y[unfolded dist_nz] xa_cball[unfolded mem_cball] by (auto simp add: dist_sym) + thus "y \ ball x e" using `x = y ` by simp + next + case False + have "dist (y + (d / 2 / dist y x) *s (y - x)) y < d" unfolding dist_def + using `d>0` norm_ge_zero[of "y - x"] `x \ y` by auto + hence *:"y + (d / 2 / dist y x) *s (y - x) \ cball x e" using d as(1)[unfolded subset_eq] by blast + have "y - x \ 0" using `x \ y` by auto + hence **:"d / (2 * norm (y - x)) > 0" unfolding zero_less_norm_iff[THEN sym] + using `d>0` divide_pos_pos[of d "2*norm (y - x)"] by auto + + have "dist (y + (d / 2 / dist y x) *s (y - x)) x = norm (y + (d / (2 * norm (y - x))) *s y - (d / (2 * norm (y - x))) *s x - x)" + by (auto simp add: dist_def vector_ssub_ldistrib add_diff_eq) + also have "\ = norm ((1 + d / (2 * norm (y - x))) *s (y - x))" + by (auto simp add: vector_sadd_rdistrib vector_smult_lid ring_simps vector_sadd_rdistrib vector_ssub_ldistrib) + also have "\ = \1 + d / (2 * norm (y - x))\ * norm (y - x)" using ** by auto + also have "\ = (dist y x) + d/2"using ** by (auto simp add: left_distrib dist_def) + finally have "e \ dist x y +d/2" using *[unfolded mem_cball] by (auto simp add: dist_sym) + thus "y \ ball x e" unfolding mem_ball using `d>0` by auto + qed } + hence "\S \ cball x e. open S \ S \ ball x e" by auto + ultimately show ?thesis using interior_unique[of "ball x e" "cball x e"] using open_ball[of x e] by auto +qed + +lemma frontier_ball: "0 < e ==> frontier(ball a e) = {x. dist a x = e}" + apply (simp add: frontier_def closure_ball interior_open open_ball order_less_imp_le) + apply (simp add: expand_set_eq) + by arith + +lemma frontier_cball: "frontier(cball a e) = {x. dist a x = e}" + apply (simp add: frontier_def interior_cball closed_cball closure_closed order_less_imp_le) + apply (simp add: expand_set_eq) + by arith + +lemma cball_eq_empty: "(cball x e = {}) \ e < 0" + apply (simp add: expand_set_eq not_le) + by (metis dist_pos_le dist_refl order_less_le_trans) +lemma cball_empty: "e < 0 ==> cball x e = {}" by (simp add: cball_eq_empty) + +lemma cball_eq_sing: "(cball x e = {x}) \ e = 0" +proof- + { assume as:"\xa. (dist x xa \ e) = (xa = x)" + hence "e \ 0" apply (erule_tac x=x in allE) by (auto simp add: dist_pos_le dist_refl) + then obtain y where y:"dist x y = e" using vector_choose_dist[of e] by auto + hence "e = 0" using as apply(erule_tac x=y in allE) by (auto simp add: dist_pos_le dist_refl) + } + thus ?thesis unfolding expand_set_eq mem_cball by (auto simp add: dist_refl dist_nz dist_le_0) +qed + +lemma cball_sing: "e = 0 ==> cball x e = {x}" by (simp add: cball_eq_sing) + +text{* For points in the interior, localization of limits makes no difference. *} + +lemma eventually_within_interior: assumes "x \ interior S" + shows "eventually P (at x within S) \ eventually P (at x)" (is "?lhs = ?rhs") +proof- + from assms obtain e where e:"e>0" "\y. dist x y < e \ y \ S" unfolding mem_interior ball_def subset_eq by auto + { assume "?lhs" then obtain d where "d>0" "\xa\S. 0 < dist xa x \ dist xa x < d \ P xa" unfolding eventually_within by auto + hence "?rhs" unfolding eventually_at using e by (auto simp add: dist_sym intro!: add exI[of _ "min e d"]) + } moreover + { assume "?rhs" hence "?lhs" unfolding eventually_within eventually_at by auto + } ultimately + show "?thesis" by auto +qed + +lemma lim_within_interior: "x \ interior S ==> ((f ---> l) (at x within S) \ (f ---> l) (at x))" + by (simp add: tendsto_def eventually_within_interior) + +lemma netlimit_within_interior: assumes "x \ interior S" + shows "netlimit(at x within S) = x" (is "?lhs = ?rhs") +proof- + from assms obtain e::real where e:"e>0" "ball x e \ S" using open_interior[of S] unfolding open_contains_ball using interior_subset[of S] by auto + hence "\ trivial_limit (at x within S)" using islimpt_subset[of x "ball x e" S] unfolding trivial_limit_within islimpt_ball centre_in_cball by auto + thus ?thesis using netlimit_within by auto +qed + +subsection{* Boundedness. *} + + (* FIXME: This has to be unified with BSEQ!! *) +definition "bounded S \ (\a. \(x::real^'n) \ S. norm x <= a)" + +lemma bounded_empty[simp]: "bounded {}" by (simp add: bounded_def) +lemma bounded_subset: "bounded T \ S \ T ==> bounded S" + by (metis bounded_def subset_eq) + +lemma bounded_interior[intro]: "bounded S ==> bounded(interior S)" + by (metis bounded_subset interior_subset) + +lemma bounded_closure[intro]: assumes "bounded S" shows "bounded(closure S)" +proof- + from assms obtain a where a:"\x\S. norm x \ a" unfolding bounded_def by auto + { fix x assume "x\closure S" + then obtain xa where xa:"\n. xa n \ S" "(xa ---> x) sequentially" unfolding closure_sequential by auto + moreover have "\y. \x. netord sequentially x y" using trivial_limit_sequentially unfolding trivial_limit_def by blast + hence "\y. (\x. netord sequentially x y) \ (\x. netord sequentially x y \ norm (xa x) \ a)" unfolding sequentially_def using a xa(1) by auto + ultimately have "norm x \ a" using Lim_norm_ubound[of sequentially xa x a] trivial_limit_sequentially unfolding eventually_def by auto + } + thus ?thesis unfolding bounded_def by auto +qed + +lemma bounded_cball[simp,intro]: "bounded (cball x e)" + apply (simp add: bounded_def) + apply (rule exI[where x="norm x + e"]) + apply (simp add: Ball_def) + by norm + +lemma bounded_ball[simp,intro]: "bounded(ball x e)" + by (metis ball_subset_cball bounded_cball bounded_subset) + +lemma finite_imp_bounded[intro]: assumes "finite S" shows "bounded S" +proof- + { fix x F assume as:"bounded F" + then obtain a where "\x\F. norm x \ a" unfolding bounded_def by auto + hence "bounded (insert x F)" unfolding bounded_def by(auto intro!: add exI[of _ "max a (norm x)"]) + } + thus ?thesis using finite_induct[of S bounded] using bounded_empty assms by auto +qed + +lemma bounded_Un[simp]: "bounded (S \ T) \ bounded S \ bounded T" + apply (auto simp add: bounded_def) + by (rule_tac x="max a aa" in exI, auto) + +lemma bounded_Union[intro]: "finite F \ (\S\F. bounded S) \ bounded(\F)" + by (induct rule: finite_induct[of F], auto) + +lemma bounded_pos: "bounded S \ (\b>0. \x\ S. norm x <= b)" + apply (simp add: bounded_def) + apply (subgoal_tac "\x (y::real). 0 < 1 + abs y \ (x <= y \ x <= 1 + abs y)") + by metis arith + +lemma bounded_Int[intro]: "bounded S \ bounded T \ bounded (S \ T)" + by (metis Int_lower1 Int_lower2 bounded_subset) + +lemma bounded_diff[intro]: "bounded S ==> bounded (S - T)" +apply (metis Diff_subset bounded_subset) +done + +lemma bounded_insert[intro]:"bounded(insert x S) \ bounded S" + by (metis Diff_cancel Un_empty_right Un_insert_right bounded_Un bounded_subset finite.emptyI finite_imp_bounded infinite_remove subset_insertI) + +lemma bot_bounded_UNIV[simp, intro]: "~(bounded (UNIV:: (real^'n) set))" +proof(auto simp add: bounded_pos not_le) + fix b::real assume b: "b >0" + have b1: "b +1 \ 0" using b by simp + then obtain x:: "real^'n" where "norm x = b + 1" using vector_choose_size[of "b+1"] by blast + hence "norm x > b" using b by simp + then show "\(x::real^'n). b < norm x" by blast +qed + +lemma bounded_linear_image: + fixes f :: "real^'m \ real^'n" + assumes "bounded S" "linear f" + shows "bounded(f ` S)" +proof- + from assms(1) obtain b where b:"b>0" "\x\S. norm x \ b" unfolding bounded_pos by auto + from assms(2) obtain B where B:"B>0" "\x. norm (f x) \ B * norm x" using linear_bounded_pos by auto + { fix x assume "x\S" + hence "norm x \ b" using b by auto + hence "norm (f x) \ B * b" using B(2) apply(erule_tac x=x in allE) + by (metis B(1) B(2) real_le_trans real_mult_le_cancel_iff2) + } + thus ?thesis unfolding bounded_pos apply(rule_tac x="b*B" in exI) + using b B real_mult_order[of b B] by (auto simp add: real_mult_commute) +qed + +lemma bounded_scaling: "bounded S \ bounded ((\x. c *s x) ` S)" + apply (rule bounded_linear_image, assumption) + by (rule linear_compose_cmul, rule linear_id[unfolded id_def]) + +lemma bounded_translation: assumes "bounded S" shows "bounded ((\x. a + x) ` S)" +proof- + from assms obtain b where b:"b>0" "\x\S. norm x \ b" unfolding bounded_pos by auto + { fix x assume "x\S" + hence "norm (a + x) \ b + norm a" using norm_triangle_ineq[of a x] b by auto + } + thus ?thesis unfolding bounded_pos using norm_ge_zero[of a] b(1) using add_strict_increasing[of b 0 "norm a"] + by (auto intro!: add exI[of _ "b + norm a"]) +qed + + +text{* Some theorems on sups and infs using the notion "bounded". *} + +lemma bounded_vec1: "bounded(vec1 ` S) \ (\a. \x\S. abs x <= a)" + by (simp add: bounded_def forall_vec1 norm_vec1 vec1_in_image_vec1) + +lemma bounded_has_rsup: assumes "bounded(vec1 ` S)" "S \ {}" + shows "\x\S. x <= rsup S" and "\b. (\x\S. x <= b) \ rsup S <= b" +proof + fix x assume "x\S" + from assms(1) obtain a where a:"\x\S. \x\ \ a" unfolding bounded_vec1 by auto + hence *:"S *<= a" using setleI[of S a] by (metis abs_le_interval_iff mem_def) + thus "x \ rsup S" using rsup[OF `S\{}`] using assms(1)[unfolded bounded_vec1] using isLubD2[of UNIV S "rsup S" x] using `x\S` by auto +next + show "\b. (\x\S. x \ b) \ rsup S \ b" using assms + using rsup[of S, unfolded isLub_def isUb_def leastP_def setle_def setge_def] + apply (auto simp add: bounded_vec1) + by (auto simp add: isLub_def isUb_def leastP_def setle_def setge_def) +qed + +lemma rsup_insert: assumes "bounded (vec1 ` S)" + shows "rsup(insert x S) = (if S = {} then x else max x (rsup S))" +proof(cases "S={}") + case True thus ?thesis using rsup_finite_in[of "{x}"] by auto +next + let ?S = "insert x S" + case False + hence *:"\x\S. x \ rsup S" using bounded_has_rsup(1)[of S] using assms by auto + hence "insert x S *<= max x (rsup S)" unfolding setle_def by auto + hence "isLub UNIV ?S (rsup ?S)" using rsup[of ?S] by auto + moreover + have **:"isUb UNIV ?S (max x (rsup S))" unfolding isUb_def setle_def using * by auto + { fix y assume as:"isUb UNIV (insert x S) y" + hence "max x (rsup S) \ y" unfolding isUb_def using rsup_le[OF `S\{}`] + unfolding setle_def by auto } + hence "max x (rsup S) <=* isUb UNIV (insert x S)" unfolding setge_def Ball_def mem_def by auto + hence "isLub UNIV ?S (max x (rsup S))" using ** isLubI2[of UNIV ?S "max x (rsup S)"] unfolding Collect_def by auto + ultimately show ?thesis using real_isLub_unique[of UNIV ?S] using `S\{}` by auto +qed + +lemma sup_insert_finite: "finite S \ rsup(insert x S) = (if S = {} then x else max x (rsup S))" + apply (rule rsup_insert) + apply (rule finite_imp_bounded) + by simp + +lemma bounded_has_rinf: + assumes "bounded(vec1 ` S)" "S \ {}" + shows "\x\S. x >= rinf S" and "\b. (\x\S. x >= b) \ rinf S >= b" +proof + fix x assume "x\S" + from assms(1) obtain a where a:"\x\S. \x\ \ a" unfolding bounded_vec1 by auto + hence *:"- a <=* S" using setgeI[of S "-a"] unfolding abs_le_interval_iff by auto + thus "x \ rinf S" using rinf[OF `S\{}`] using isGlbD2[of UNIV S "rinf S" x] using `x\S` by auto +next + show "\b. (\x\S. x >= b) \ rinf S \ b" using assms + using rinf[of S, unfolded isGlb_def isLb_def greatestP_def setle_def setge_def] + apply (auto simp add: bounded_vec1) + by (auto simp add: isGlb_def isLb_def greatestP_def setle_def setge_def) +qed + +(* TODO: Move this to RComplete.thy -- would need to include Glb into RComplete *) +lemma real_isGlb_unique: "[| isGlb R S x; isGlb R S y |] ==> x = (y::real)" + apply (frule isGlb_isLb) + apply (frule_tac x = y in isGlb_isLb) + apply (blast intro!: order_antisym dest!: isGlb_le_isLb) + done + +lemma rinf_insert: assumes "bounded (vec1 ` S)" + shows "rinf(insert x S) = (if S = {} then x else min x (rinf S))" (is "?lhs = ?rhs") +proof(cases "S={}") + case True thus ?thesis using rinf_finite_in[of "{x}"] by auto +next + let ?S = "insert x S" + case False + hence *:"\x\S. x \ rinf S" using bounded_has_rinf(1)[of S] using assms by auto + hence "min x (rinf S) <=* insert x S" unfolding setge_def by auto + hence "isGlb UNIV ?S (rinf ?S)" using rinf[of ?S] by auto + moreover + have **:"isLb UNIV ?S (min x (rinf S))" unfolding isLb_def setge_def using * by auto + { fix y assume as:"isLb UNIV (insert x S) y" + hence "min x (rinf S) \ y" unfolding isLb_def using rinf_ge[OF `S\{}`] + unfolding setge_def by auto } + hence "isLb UNIV (insert x S) *<= min x (rinf S)" unfolding setle_def Ball_def mem_def by auto + hence "isGlb UNIV ?S (min x (rinf S))" using ** isGlbI2[of UNIV ?S "min x (rinf S)"] unfolding Collect_def by auto + ultimately show ?thesis using real_isGlb_unique[of UNIV ?S] using `S\{}` by auto +qed + +lemma inf_insert_finite: "finite S ==> rinf(insert x S) = (if S = {} then x else min x (rinf S))" + by (rule rinf_insert, rule finite_imp_bounded, simp) + +subsection{* Compactness (the definition is the one based on convegent subsequences). *} + +definition "compact S \ + (\(f::nat \ real^'n). (\n. f n \ S) \ + (\l\S. \r. (\m n. m < n \ r m < r n) \ ((f o r) ---> l) sequentially))" + +lemma monotone_bigger: fixes r::"nat\nat" + assumes "\m n::nat. m < n --> r m < r n" + shows "n \ r n" +proof(induct n) + show "0 \ r 0" by auto +next + fix n assume "n \ r n" + moreover have "r n < r (Suc n)" using assms by auto + ultimately show "Suc n \ r (Suc n)" by auto +qed + +lemma lim_subsequence: "\m n. m < n \ r m < r n \ (s ---> l) sequentially \ ((s o r) ---> l) sequentially" +unfolding Lim_sequentially by (simp, metis monotone_bigger le_trans) + +lemma num_Axiom: "EX! g. g 0 = e \ (\n. g (Suc n) = f n (g n))" + unfolding Ex1_def + apply (rule_tac x="nat_rec e f" in exI) + apply (rule conjI)+ +apply (rule def_nat_rec_0, simp) +apply (rule allI, rule def_nat_rec_Suc, simp) +apply (rule allI, rule impI, rule ext) +apply (erule conjE) +apply (induct_tac x) +apply (simp add: nat_rec_0) +apply (erule_tac x="n" in allE) +apply (simp) +done + + +lemma convergent_bounded_increasing: fixes s ::"nat\real" + assumes "\m n. m \ n --> s m \ s n" and "\n. abs(s n) \ b" + shows "\ l. \e::real>0. \ N. \n \ N. abs(s n - l) < e" +proof- + have "isUb UNIV (range s) b" using assms(2) and abs_le_D1 unfolding isUb_def and setle_def by auto + then obtain t where t:"isLub UNIV (range s) t" using reals_complete[of "range s" ] by auto + { fix e::real assume "e>0" and as:"\N. \n\N. \ \s n - t\ < e" + { fix n::nat + obtain N where "N\n" and n:"\s N - t\ \ e" using as[THEN spec[where x=n]] by auto + have "t \ s N" using isLub_isUb[OF t, unfolded isUb_def setle_def] by auto + with n have "s N \ t - e" using `e>0` by auto + hence "s n \ t - e" using assms(1)[THEN spec[where x=n], THEN spec[where x=N]] using `n\N` by auto } + hence "isUb UNIV (range s) (t - e)" unfolding isUb_def and setle_def by auto + hence False using isLub_le_isUb[OF t, of "t - e"] and `e>0` by auto } + thus ?thesis by blast +qed + +lemma convergent_bounded_monotone: fixes s::"nat \ real" + assumes "\n. abs(s n) \ b" and "(\m n. m \ n --> s m \ s n) \ (\m n. m \ n --> s n \ s m)" + shows "\l. \e::real>0. \N. \n\N. abs(s n - l) < e" + using convergent_bounded_increasing[of s b] assms using convergent_bounded_increasing[of "\n. - s n" b] + apply auto unfolding minus_add_distrib[THEN sym, unfolded diff_minus[THEN sym]] + unfolding abs_minus_cancel by(rule_tac x="-l" in exI)auto + +lemma compact_real_lemma: + assumes "\n::nat. abs(s n) \ b" + shows "\l r. (\m n::nat. m < n --> r m < r n) \ + (\e>0::real. \N. \n\N. (abs(s (r n) - l) < e))" +proof- + obtain r where r:"\m n::nat. m < n \ r m < r n" + "(\m n. m \ n \ s (r m) \ s (r n)) \ (\m n. m \ n \ s (r n) \ s (r m))" + using seq_monosub[of s] by (auto simp add: subseq_def monoseq_def) + thus ?thesis using convergent_bounded_monotone[of "s o r" b] and assms by auto +qed + +lemma compact_lemma: + assumes "bounded s" and "\n. (x::nat \real^'a) n \ s" + shows "\d\{1.. dimindex(UNIV::'a set)}. + \l::(real^'a). \ r. (\n m::nat. m < n --> r m < r n) \ + (\e>0. \N. \n\N. \i\{1..d}. \x (r n) $ i - l $ i\ < e)" +proof- + obtain b where b:"\x\s. norm x \ b" using assms(1)[unfolded bounded_def] by auto + { { fix i assume i:"i\{1.. dimindex(UNIV::'a set)}" + { fix n::nat + have "\x n $ i\ \ b" using b[THEN bspec[where x="x n"]] and component_le_norm[of i "x n"] and assms(2)[THEN spec[where x=n]] and i by auto } + hence "\n. \x n $ i\ \ b" by auto + } note b' = this + + fix d assume "d\{1.. dimindex(UNIV::'a set)}" + hence "\l::(real^'a). \ r. (\n m::nat. m < n --> r m < r n) \ + (\e>0. \N. \n\N. \i\{1..d}. \x (r n) $ i - l $ i\ < e)" + proof(induct d) case 0 thus ?case by auto + (* The induction really starts at Suc 0 *) + next case (Suc d) + show ?case proof(cases "d = 0") + case True hence "Suc d = Suc 0" by auto + obtain l r where r:"\m n::nat. m < n \ r m < r n" and lr:"\e>0. \N. \n\N. \x (r n) $ 1 - l\ < e" using b' and dimindex_ge_1[of "UNIV::'a set"] + using compact_real_lemma[of "\i. (x i)$1" b] by auto + thus ?thesis apply(rule_tac x="vec l" in exI) apply(rule_tac x=r in exI) + unfolding `Suc d = Suc 0` apply auto + unfolding vec_component[OF Suc(2)[unfolded `Suc d = Suc 0`]] by auto + next + case False hence d:"d \{1.. dimindex(UNIV::'a set)}" using Suc(2) by auto + obtain l1::"real^'a" and r1 where r1:"\n m::nat. m < n \ r1 m < r1 n" and lr1:"\e>0. \N. \n\N. \i\{1..d}. \x (r1 n) $ i - l1 $ i\ < e" + using Suc(1)[OF d] by auto + obtain l2 r2 where r2:"\m n::nat. m < n \ r2 m < r2 n" and lr2:"\e>0. \N. \n\N. \(x \ r1) (r2 n) $ (Suc d) - l2\ < e" + using b'[OF Suc(2)] and compact_real_lemma[of "\i. ((x \ r1) i)$(Suc d)" b] by auto + def r \ "r1 \ r2" have r:"\m n. m < n \ r m < r n" unfolding r_def o_def using r1 and r2 by auto + moreover + def l \ "(\ i. if i = Suc d then l2 else l1$i)::real^'a" + { fix e::real assume "e>0" + from lr1 obtain N1 where N1:"\n\N1. \i\{1..d}. \x (r1 n) $ i - l1 $ i\ < e" using `e>0` by blast + from lr2 obtain N2 where N2:"\n\N2. \(x \ r1) (r2 n) $ (Suc d) - l2\ < e" using `e>0` by blast + { fix n assume n:"n\ N1 + N2" + fix i assume i:"i\{1..Suc d}" hence i':"i\{1.. dimindex(UNIV::'a set)}" using Suc by auto + hence "\x (r n) $ i - l $ i\ < e" + using N2[THEN spec[where x="n"]] and n + using N1[THEN spec[where x="r2 n"]] and n + using monotone_bigger[OF r] and i + unfolding l_def and r_def and Cart_lambda_beta'[OF i'] + using monotone_bigger[OF r2, of n] by auto } + hence "\N. \n\N. \i\{1..Suc d}. \x (r n) $ i - l $ i\ < e" by blast } + ultimately show ?thesis by auto + qed + qed } + thus ?thesis by auto +qed + +lemma bounded_closed_imp_compact: fixes s::"(real^'a) set" + assumes "bounded s" and "closed s" + shows "compact s" +proof- + let ?d = "dimindex (UNIV::'a set)" + { fix f assume as:"\n::nat. f n \ s" + obtain l::"real^'a" and r where r:"\n m::nat. m < n \ r m < r n" + and lr:"\e>0. \N. \n\N. \i\{1..?d}. \f (r n) $ i - l $ i\ < e" + using compact_lemma[OF assms(1) as, THEN bspec[where x="?d"]] and dimindex_ge_1[of "UNIV::'a set"] by auto + { fix e::real assume "e>0" + hence "0 < e / (real_of_nat ?d)" using dimindex_nonzero[of "UNIV::'a set"] using divide_pos_pos[of e, of "real_of_nat ?d"] by auto + then obtain N::nat where N:"\n\N. \i\{1..?d}. \f (r n) $ i - l $ i\ < e / (real_of_nat ?d)" using lr[THEN spec[where x="e / (real_of_nat ?d)"]] by blast + { fix n assume n:"n\N" + have "1 \ {1..?d}" using dimindex_nonzero[of "UNIV::'a set"] by auto + hence "finite {1..?d}" "{1..?d} \ {}" by auto + moreover + { fix i assume i:"i \ {1..?d}" + hence "\((f \ r) n - l) $ i\ < e / real_of_nat ?d" using `n\N` using N[THEN spec[where x=n]] + apply auto apply(erule_tac x=i in ballE) unfolding vector_minus_component[OF i] by auto } + ultimately have "(\i = 1..?d. \((f \ r) n - l) $ i\) + < (\i = 1..?d. e / real_of_nat ?d)" + using setsum_strict_mono[of "{1..?d}" "\i. \((f \ r) n - l) $ i\" "\i. e / (real_of_nat ?d)"] by auto + hence "(\i = 1..?d. \((f \ r) n - l) $ i\) < e" unfolding setsum_constant using dimindex_nonzero[of "UNIV::'a set"] by auto + hence "dist ((f \ r) n) l < e" unfolding dist_def using norm_le_l1[of "(f \ r) n - l"] by auto } + hence "\N. \n\N. dist ((f \ r) n) l < e" by auto } + hence *:"((f \ r) ---> l) sequentially" unfolding Lim_sequentially by auto + moreover have "l\s" + using assms(2)[unfolded closed_sequential_limits, THEN spec[where x="f \ r"], THEN spec[where x=l]] and * and as by auto + ultimately have "\l\s. \r. (\m n. m < n \ r m < r n) \ ((f \ r) ---> l) sequentially" using r by auto } + thus ?thesis unfolding compact_def by auto +qed + +subsection{* Completeness. *} + + (* FIXME: Unify this with Cauchy from SEQ!!!!!*) + +definition cauchy_def:"cauchy s \ (\e>0. \N. \m n. m \ N \ n \ N --> dist(s m)(s n) < e)" + +definition complete_def:"complete s \ (\f::(nat=>real^'a). (\n. f n \ s) \ cauchy f + --> (\l \ s. (f ---> l) sequentially))" + +lemma cauchy: "cauchy s \ (\e>0.\ N::nat. \n\N. dist(s n)(s N) < e)" (is "?lhs = ?rhs") +proof- + { assume ?rhs + { fix e::real + assume "e>0" + with `?rhs` obtain N where N:"\n\N. dist (s n) (s N) < e/2" + by (erule_tac x="e/2" in allE) auto + { fix n m + assume nm:"N \ m \ N \ n" + hence "dist (s m) (s n) < e" using N + using dist_triangle_half_l[of "s m" "s N" "e" "s n"] + by blast + } + hence "\N. \m n. N \ m \ N \ n \ dist (s m) (s n) < e" + by blast + } + hence ?lhs + unfolding cauchy_def + by blast + } + thus ?thesis + unfolding cauchy_def + using dist_triangle_half_l + by blast +qed + +lemma convergent_imp_cauchy: + "(s ---> l) sequentially ==> cauchy s" +proof(simp only: cauchy_def, rule, rule) + fix e::real assume "e>0" "(s ---> l) sequentially" + then obtain N::nat where N:"\n\N. dist (s n) l < e/2" unfolding Lim_sequentially by(erule_tac x="e/2" in allE) auto + thus "\N. \m n. N \ m \ N \ n \ dist (s m) (s n) < e" using dist_triangle_half_l[of _ l e _] by (rule_tac x=N in exI) auto +qed + +lemma cauchy_imp_bounded: assumes "cauchy s" shows "bounded {y. (\n::nat. y = s n)}" +proof- + from assms obtain N::nat where "\m n. N \ m \ N \ n \ dist (s m) (s n) < 1" unfolding cauchy_def apply(erule_tac x= 1 in allE) by auto + hence N:"\n. N \ n \ dist (s N) (s n) < 1" by auto + { fix n::nat assume "n\N" + hence "norm (s n) \ norm (s N) + 1" using N apply(erule_tac x=n in allE) unfolding dist_def + using norm_triangle_sub[of "s N" "s n"] by (auto, metis dist_def dist_sym le_add_right_mono norm_triangle_sub real_less_def) + } + hence "\n\N. norm (s n) \ norm (s N) + 1" by auto + moreover + have "bounded (s ` {0..N})" using finite_imp_bounded[of "s ` {1..N}"] by auto + then obtain a where a:"\x\s ` {0..N}. norm x \ a" unfolding bounded_def by auto + ultimately show "?thesis" unfolding bounded_def + apply(rule_tac x="max a (norm (s N) + 1)" in exI) apply auto + apply(erule_tac x=n in allE) apply(erule_tac x=n in ballE) by auto +qed + +lemma compact_imp_complete: assumes "compact s" shows "complete s" +proof- + { fix f assume as: "(\n::nat. f n \ s)" "cauchy f" + from as(1) obtain l r where lr: "l\s" "(\m n. m < n \ r m < r n)" "((f \ r) ---> l) sequentially" using assms unfolding compact_def by blast + + { fix n :: nat have lr':"n \ r n" + proof (induct n) + show "0 \ r 0" using lr(2) by blast + next fix na assume "na \ r na" moreover have "na < Suc na \ r na < r (Suc na)" using lr(2) by blast + ultimately show "Suc na \ r (Suc na)" by auto + qed } note lr' = this + + { fix e::real assume "e>0" + from as(2) obtain N where N:"\m n. N \ m \ N \ n \ dist (f m) (f n) < e/2" unfolding cauchy_def using `e>0` apply (erule_tac x="e/2" in allE) by auto + from lr(3)[unfolded Lim_sequentially, THEN spec[where x="e/2"]] obtain M where M:"\n\M. dist ((f \ r) n) l < e/2" using `e>0` by auto + { fix n::nat assume n:"n \ max N M" + have "dist ((f \ r) n) l < e/2" using n M by auto + moreover have "r n \ N" using lr'[of n] n by auto + hence "dist (f n) ((f \ r) n) < e / 2" using N using n by auto + ultimately have "dist (f n) l < e" using dist_triangle_half_r[of "f (r n)" "f n" e l] by (auto simp add: dist_sym) } + hence "\N. \n\N. dist (f n) l < e" by blast } + hence "\l\s. (f ---> l) sequentially" using `l\s` unfolding Lim_sequentially by auto } + thus ?thesis unfolding complete_def by auto +qed + +lemma complete_univ: + "complete UNIV" +proof(simp add: complete_def, rule, rule) + fix f::"nat \ real^'n" assume "cauchy f" + hence "bounded (f`UNIV)" using cauchy_imp_bounded[of f] unfolding image_def by auto + hence "compact (closure (f`UNIV))" using bounded_closed_imp_compact[of "closure (range f)"] by auto + hence "complete (closure (range f))" using compact_imp_complete by auto + thus "\l. (f ---> l) sequentially" unfolding complete_def[of "closure (range f)"] using `cauchy f` unfolding closure_def by auto +qed + +lemma complete_eq_closed: "complete s \ closed s" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix x assume "x islimpt s" + then obtain f where f:"\n. f n \ s - {x}" "(f ---> x) sequentially" unfolding islimpt_sequential by auto + then obtain l where l: "l\s" "(f ---> l) sequentially" using `?lhs`[unfolded complete_def] using convergent_imp_cauchy[of f x] by auto + hence "x \ s" using Lim_unique[of sequentially f l x] trivial_limit_sequentially f(2) by auto } + thus ?rhs unfolding closed_limpt by auto +next + assume ?rhs + { fix f assume as:"\n::nat. f n \ s" "cauchy f" + then obtain l where "(f ---> l) sequentially" using complete_univ[unfolded complete_def, THEN spec[where x=f]] by auto + hence "\l\s. (f ---> l) sequentially" using `?rhs`[unfolded closed_sequential_limits, THEN spec[where x=f], THEN spec[where x=l]] using as(1) by auto } + thus ?lhs unfolding complete_def by auto +qed + +lemma convergent_eq_cauchy: "(\l. (s ---> l) sequentially) \ cauchy s" (is "?lhs = ?rhs") +proof + assume ?lhs then obtain l where "(s ---> l) sequentially" by auto + thus ?rhs using convergent_imp_cauchy by auto +next + assume ?rhs thus ?lhs using complete_univ[unfolded complete_def, THEN spec[where x=s]] by auto +qed + +lemma convergent_imp_bounded: "(s ---> l) sequentially ==> bounded (s ` (UNIV::(nat set)))" + using convergent_eq_cauchy[of s] + using cauchy_imp_bounded[of s] + unfolding image_def + by auto + +subsection{* Total boundedness. *} + +fun helper_1::"((real^'n) set) \ real \ nat \ real^'n" where + "helper_1 s e n = (SOME y::real^'n. y \ s \ (\m (dist (helper_1 s e m) y < e)))" +declare helper_1.simps[simp del] + +lemma compact_imp_totally_bounded: + assumes "compact s" + shows "\e>0. \k. finite k \ k \ s \ s \ (\((\x. ball x e) ` k))" +proof(rule, rule, rule ccontr) + fix e::real assume "e>0" and assm:"\ (\k. finite k \ k \ s \ s \ \(\x. ball x e) ` k)" + def x \ "helper_1 s e" + { fix n + have "x n \ s \ (\m dist (x m) (x n) < e)" + proof(induct_tac rule:nat_less_induct) + fix n def Q \ "(\y. y \ s \ (\m dist (x m) y < e))" + assume as:"\m s \ (\ma dist (x ma) (x m) < e)" + have "\ s \ (\x\x ` {0..s" "z \ (\x\x ` {0.. s \ (\m dist (x m) (x n) < e)" unfolding Q_def by auto + qed } + hence "\n::nat. x n \ s" and x:"\n. \m < n. \ (dist (x m) (x n) < e)" by blast+ + then obtain l r where "l\s" and r:"\m n. m < n \ r m < r n" and "((x \ r) ---> l) sequentially" using assms(1)[unfolded compact_def, THEN spec[where x=x]] by auto + from this(3) have "cauchy (x \ r)" using convergent_imp_cauchy by auto + then obtain N::nat where N:"\m n. N \ m \ N \ n \ dist ((x \ r) m) ((x \ r) n) < e" unfolding cauchy_def using `e>0` by auto + show False + using N[THEN spec[where x=N], THEN spec[where x="N+1"]] + using r[THEN spec[where x=N], THEN spec[where x="N+1"]] + using x[THEN spec[where x="r (N+1)"], THEN spec[where x="r (N)"]] by auto +qed + +subsection{* Heine-Borel theorem (following Burkill \& Burkill vol. 2) *} + +lemma heine_borel_lemma: fixes s::"(real^'n) set" + assumes "compact s" "s \ (\ t)" "\b \ t. open b" + shows "\e>0. \x \ s. \b \ t. ball x e \ b" +proof(rule ccontr) + assume "\ (\e>0. \x\s. \b\t. ball x e \ b)" + hence cont:"\e>0. \x\s. \xa\t. \ (ball x e \ xa)" by auto + { fix n::nat + have "1 / real (n + 1) > 0" by auto + hence "\x. x\s \ (\xa\t. \ (ball x (inverse (real (n+1))) \ xa))" using cont unfolding Bex_def by auto } + hence "\n::nat. \x. x \ s \ (\xa\t. \ ball x (inverse (real (n + 1))) \ xa)" by auto + then obtain f where f:"\n::nat. f n \ s \ (\xa\t. \ ball (f n) (inverse (real (n + 1))) \ xa)" + using choice[of "\n::nat. \x. x\s \ (\xa\t. \ ball x (inverse (real (n + 1))) \ xa)"] by auto + + then obtain l r where l:"l\s" and r:"\m n. m < n \ r m < r n" and lr:"((f \ r) ---> l) sequentially" + using assms(1)[unfolded compact_def, THEN spec[where x=f]] by auto + + obtain b where "l\b" "b\t" using assms(2) and l by auto + then obtain e where "e>0" and e:"\z. dist z l < e \ z\b" + using assms(3)[THEN bspec[where x=b]] unfolding open_def by auto + + then obtain N1 where N1:"\n\N1. dist ((f \ r) n) l < e / 2" + using lr[unfolded Lim_sequentially, THEN spec[where x="e/2"]] by auto + + obtain N2::nat where N2:"N2>0" "inverse (real N2) < e /2" using real_arch_inv[of "e/2"] and `e>0` by auto + have N2':"inverse (real (r (N1 + N2) +1 )) < e/2" + apply(rule order_less_trans) apply(rule less_imp_inverse_less) using N2 + using monotone_bigger[OF r, of "N1 + N2"] by auto + + def x \ "(f (r (N1 + N2)))" + have x:"\ ball x (inverse (real (r (N1 + N2) + 1))) \ b" unfolding x_def + using f[THEN spec[where x="r (N1 + N2)"]] using `b\t` by auto + have "\y\ball x (inverse (real (r (N1 + N2) + 1))). y\b" apply(rule ccontr) using x by auto + then obtain y where y:"y \ ball x (inverse (real (r (N1 + N2) + 1)))" "y \ b" by auto + + have "dist x l < e/2" using N1 unfolding x_def o_def by auto + hence "dist y l < e" using y N2' using dist_triangle[of y l x]by (auto simp add:dist_sym) + + thus False using e and `y\b` by auto +qed + +lemma compact_imp_heine_borel: "compact s ==> (\f. (\t \ f. open t) \ s \ (\ f) + \ (\f'. f' \ f \ finite f' \ s \ (\ f')))" +proof clarify + fix f assume "compact s" " \t\f. open t" "s \ \f" + then obtain e::real where "e>0" and "\x\s. \b\f. ball x e \ b" using heine_borel_lemma[of s f] by auto + hence "\x\s. \b. b\f \ ball x e \ b" by auto + hence "\bb. \x\s. bb x \f \ ball x e \ bb x" using bchoice[of s "\x b. b\f \ ball x e \ b"] by auto + then obtain bb where bb:"\x\s. (bb x) \ f \ ball x e \ (bb x)" by blast + + from `compact s` have "\ k. finite k \ k \ s \ s \ \(\x. ball x e) ` k" using compact_imp_totally_bounded[of s] `e>0` by auto + then obtain k where k:"finite k" "k \ s" "s \ \(\x. ball x e) ` k" by auto + + have "finite (bb ` k)" using k(1) by auto + moreover + { fix x assume "x\s" + hence "x\\(\x. ball x e) ` k" using k(3) unfolding subset_eq by auto + hence "\X\bb ` k. x \ X" using bb k(2) by blast + hence "x \ \(bb ` k)" using Union_iff[of x "bb ` k"] by auto + } + ultimately show "\f'\f. finite f' \ s \ \f'" using bb k(2) by (rule_tac x="bb ` k" in exI) auto +qed + +subsection{* Bolzano-Weierstrass property. *} + +lemma heine_borel_imp_bolzano_weierstrass: + assumes "\f. (\t \ f. open t) \ s \ (\ f) --> (\f'. f' \ f \ finite f' \ s \ (\ f'))" + "infinite t" "t \ s" + shows "\x \ s. x islimpt t" +proof(rule ccontr) + assume "\ (\x \ s. x islimpt t)" + then obtain f where f:"\x\s. x \ f x \ open (f x) \ (\y\t. y \ f x \ y = x)" unfolding islimpt_def + using bchoice[of s "\ x T. x \ T \ open T \ (\y\t. y \ T \ y = x)"] by auto + obtain g where g:"g\{t. \x. x \ s \ t = f x}" "finite g" "s \ \g" + using assms(1)[THEN spec[where x="{t. \x. x\s \ t = f x}"]] using f by auto + from g(1,3) have g':"\x\g. \xa \ s. x = f xa" by auto + { fix x y assume "x\t" "y\t" "f x = f y" + hence "x \ f x" "y \ f x \ y = x" using f[THEN bspec[where x=x]] and `t\s` by auto + hence "x = y" using `f x = f y` and f[THEN bspec[where x=y]] and `y\t` and `t\s` by auto } + hence "infinite (f ` t)" using assms(2) using finite_imageD[unfolded inj_on_def, of f t] by auto + moreover + { fix x assume "x\t" "f x \ g" + from g(3) assms(3) `x\t` obtain h where "h\g" and "x\h" by auto + then obtain y where "y\s" "h = f y" using g'[THEN bspec[where x=h]] by auto + hence "y = x" using f[THEN bspec[where x=y]] and `x\t` and `x\h`[unfolded `h = f y`] by auto + hence False using `f x \ g` `h\g` unfolding `h = f y` by auto } + hence "f ` t \ g" by auto + ultimately show False using g(2) using finite_subset by auto +qed + +subsection{* Complete the chain of compactness variants. *} + +primrec helper_2::"(real \ real^'n) \ nat \ real ^'n" where + "helper_2 beyond 0 = beyond 0" | + "helper_2 beyond (Suc n) = beyond (norm (helper_2 beyond n) + 1 )" + +lemma bolzano_weierstrass_imp_bounded: fixes s::"(real^'n) set" + assumes "\t. infinite t \ t \ s --> (\x \ s. x islimpt t)" + shows "bounded s" +proof(rule ccontr) + assume "\ bounded s" + then obtain beyond where "\a. beyond a \s \ \ norm (beyond a) \ a" + unfolding bounded_def apply simp using choice[of "\a x. x\s \ \ norm x \ a"] by auto + hence beyond:"\a. beyond a \s" "\a. norm (beyond a) > a" unfolding linorder_not_le by auto + def x \ "helper_2 beyond" + + { fix m n ::nat assume "mn" + have "1 < dist (x m) (x n)" + proof(cases "mn` by auto + hence "1 < norm (x m) - norm (x n)" using *[of n m] by auto + thus ?thesis unfolding dist_sym[of "x n" "x m"] unfolding dist_def using norm_triangle_sub[of "x m" "x n"] by auto + qed } note ** = this + { fix a b assume "x a = x b" "a \ b" + hence False using **[of a b] unfolding dist_eq_0[THEN sym] by auto } + hence "inj x" unfolding inj_on_def by auto + moreover + { fix n::nat + have "x n \ s" + proof(cases "n = 0") + case True thus ?thesis unfolding x_def using beyond by auto + next + case False then obtain z where "n = Suc z" using not0_implies_Suc by auto + thus ?thesis unfolding x_def using beyond by auto + qed } + ultimately have "infinite (range x) \ range x \ s" unfolding x_def using range_inj_infinite[of "helper_2 beyond"] using beyond(1) by auto + + then obtain l where "l\s" and l:"l islimpt range x" using assms[THEN spec[where x="range x"]] by auto + then obtain y where "x y \ l" and y:"dist (x y) l < 1/2" unfolding islimpt_approachable apply(erule_tac x="1/2" in allE) by auto + then obtain z where "x z \ l" and z:"dist (x z) l < dist (x y) l" using l[unfolded islimpt_approachable, THEN spec[where x="dist (x y) l"]] + unfolding dist_nz by auto + show False using y and z and dist_triangle_half_l[of "x y" l 1 "x z"] and **[of y z] by auto +qed + +lemma sequence_infinite_lemma: + assumes "\n::nat. (f n \ l)" "(f ---> l) sequentially" + shows "infinite {y::real^'a. (\ n. y = f n)}" +proof(rule ccontr) + let ?A = "(\x. dist x l) ` {y. \n. y = f n}" + assume "\ infinite {y. \n. y = f n}" + hence **:"finite ?A" "?A \ {}" by auto + obtain k where k:"dist (f k) l = Min ?A" using Min_in[OF **] by auto + have "0 < Min ?A" using assms(1) unfolding dist_nz unfolding Min_gr_iff[OF **] by auto + then obtain N where "dist (f N) l < Min ?A" using assms(2)[unfolded Lim_sequentially, THEN spec[where x="Min ?A"]] by auto + moreover have "dist (f N) l \ ?A" by auto + ultimately show False using Min_le[OF **(1), of "dist (f N) l"] by auto +qed + +lemma sequence_unique_limpt: + assumes "\n::nat. (f n \ l)" "(f ---> l) sequentially" "l' islimpt {y. (\n. y = f n)}" + shows "l' = l" +proof(rule ccontr) + def e \ "dist l' l" + assume "l' \ l" hence "e>0" unfolding dist_nz e_def by auto + then obtain N::nat where N:"\n\N. dist (f n) l < e / 2" + using assms(2)[unfolded Lim_sequentially, THEN spec[where x="e/2"]] by auto + def d \ "Min (insert (e/2) ((\n. if dist (f n) l' = 0 then e/2 else dist (f n) l') ` {0 .. N}))" + have "d>0" using `e>0` unfolding d_def e_def using dist_pos_le[of _ l', unfolded order_le_less] by auto + obtain k where k:"f k \ l'" "dist (f k) l' < d" using `d>0` and assms(3)[unfolded islimpt_approachable, THEN spec[where x="d"]] by auto + have "k\N" using k(1)[unfolded dist_nz] using k(2)[unfolded d_def] + by force + hence "dist l' l < e" using N[THEN spec[where x=k]] using k(2)[unfolded d_def] and dist_triangle_half_r[of "f k" l' e l] by auto + thus False unfolding e_def by auto +qed + +lemma bolzano_weierstrass_imp_closed: + assumes "\t. infinite t \ t \ s --> (\x \ s. x islimpt t)" + shows "closed s" +proof- + { fix x l assume as: "\n::nat. x n \ s" "(x ---> l) sequentially" + hence "l \ s" + proof(cases "\n. x n \ l") + case False thus "l\s" using as(1) by auto + next + case True note cas = this + with as(2) have "infinite {y. \n. y = x n}" using sequence_infinite_lemma[of x l] by auto + then obtain l' where "l'\s" "l' islimpt {y. \n. y = x n}" using assms[THEN spec[where x="{y. \n. y = x n}"]] as(1) by auto + thus "l\s" using sequence_unique_limpt[of x l l'] using as cas by auto + qed } + thus ?thesis unfolding closed_sequential_limits by auto +qed + +text{* Hence express everything as an equivalence. *} + +lemma compact_eq_heine_borel: "compact s \ + (\f. (\t \ f. open t) \ s \ (\ f) + --> (\f'. f' \ f \ finite f' \ s \ (\ f')))" (is "?lhs = ?rhs") +proof + assume ?lhs thus ?rhs using compact_imp_heine_borel[of s] by blast +next + assume ?rhs + hence "\t. infinite t \ t \ s \ (\x\s. x islimpt t)" using heine_borel_imp_bolzano_weierstrass[of s] by blast + thus ?lhs using bolzano_weierstrass_imp_bounded[of s] bolzano_weierstrass_imp_closed[of s] bounded_closed_imp_compact[of s] by blast +qed + +lemma compact_eq_bolzano_weierstrass: + "compact s \ (\t. infinite t \ t \ s --> (\x \ s. x islimpt t))" (is "?lhs = ?rhs") +proof + assume ?lhs thus ?rhs unfolding compact_eq_heine_borel using heine_borel_imp_bolzano_weierstrass[of s] by auto +next + assume ?rhs thus ?lhs using bolzano_weierstrass_imp_bounded bolzano_weierstrass_imp_closed bounded_closed_imp_compact by auto +qed + +lemma compact_eq_bounded_closed: + "compact s \ bounded s \ closed s" (is "?lhs = ?rhs") +proof + assume ?lhs thus ?rhs unfolding compact_eq_bolzano_weierstrass using bolzano_weierstrass_imp_bounded bolzano_weierstrass_imp_closed by auto +next + assume ?rhs thus ?lhs using bounded_closed_imp_compact by auto +qed + +lemma compact_imp_bounded: + "compact s ==> bounded s" + unfolding compact_eq_bounded_closed + by simp + +lemma compact_imp_closed: + "compact s ==> closed s" + unfolding compact_eq_bounded_closed + by simp + +text{* In particular, some common special cases. *} + +lemma compact_empty[simp]: + "compact {}" + unfolding compact_def + by simp + + (* FIXME : Rename *) +lemma compact_union[intro]: + "compact s \ compact t ==> compact (s \ t)" + unfolding compact_eq_bounded_closed + using bounded_Un[of s t] + using closed_Un[of s t] + by simp + +lemma compact_inter[intro]: + "compact s \ compact t ==> compact (s \ t)" + unfolding compact_eq_bounded_closed + using bounded_Int[of s t] + using closed_Int[of s t] + by simp + +lemma compact_inter_closed[intro]: + "compact s \ closed t ==> compact (s \ t)" + unfolding compact_eq_bounded_closed + using closed_Int[of s t] + using bounded_subset[of "s \ t" s] + by blast + +lemma closed_inter_compact[intro]: + "closed s \ compact t ==> compact (s \ t)" +proof- + assume "closed s" "compact t" + moreover + have "s \ t = t \ s" by auto ultimately + show ?thesis + using compact_inter_closed[of t s] + by auto +qed + +lemma finite_imp_closed: + "finite s ==> closed s" +proof- + assume "finite s" hence "\( \t. t \ s \ infinite t)" using finite_subset by auto + thus ?thesis using bolzano_weierstrass_imp_closed[of s] by auto +qed + +lemma finite_imp_compact: + "finite s ==> compact s" + unfolding compact_eq_bounded_closed + using finite_imp_closed finite_imp_bounded + by blast + +lemma compact_sing[simp]: + "compact {a}" + using finite_imp_compact[of "{a}"] + by blast + +lemma closed_sing[simp]: + "closed {a}" + using compact_eq_bounded_closed compact_sing[of a] + by blast + +lemma compact_cball[simp]: + "compact(cball x e)" + using compact_eq_bounded_closed bounded_cball closed_cball + by blast + +lemma compact_frontier_bounded[intro]: + "bounded s ==> compact(frontier s)" + unfolding frontier_def + using compact_eq_bounded_closed + by blast + +lemma compact_frontier[intro]: + "compact s ==> compact (frontier s)" + using compact_eq_bounded_closed compact_frontier_bounded + by blast + +lemma frontier_subset_compact: + "compact s ==> frontier s \ s" + using frontier_subset_closed compact_eq_bounded_closed + by blast + +lemma open_delete: + "open s ==> open(s - {x})" + using open_diff[of s "{x}"] closed_sing + by blast + +text{* Finite intersection property. I could make it an equivalence in fact. *} + +lemma compact_imp_fip: + assumes "compact s" "\t \ f. closed t" + "\f'. finite f' \ f' \ f --> (s \ (\ f') \ {})" + shows "s \ (\ f) \ {}" +proof + assume as:"s \ (\ f) = {}" + hence "s \ \op - UNIV ` f" by auto + moreover have "Ball (op - UNIV ` f) open" using open_diff closed_diff using assms(2) by auto + ultimately obtain f' where f':"f' \ op - UNIV ` f" "finite f'" "s \ \f'" using assms(1)[unfolded compact_eq_heine_borel, THEN spec[where x="(\t. UNIV - t) ` f"]] by auto + hence "finite (op - UNIV ` f') \ op - UNIV ` f' \ f" by(auto simp add: Diff_Diff_Int) + hence "s \ \op - UNIV ` f' \ {}" using assms(3)[THEN spec[where x="op - UNIV ` f'"]] by auto + thus False using f'(3) unfolding subset_eq and Union_iff by blast +qed + +subsection{* Bounded closed nest property (proof does not use Heine-Borel). *} + +lemma bounded_closed_nest: + assumes "\n. closed(s n)" "\n. (s n \ {})" + "(\m n. m \ n --> s n \ s m)" "bounded(s 0)" + shows "\ a::real^'a. \n::nat. a \ s(n)" +proof- + from assms(2) obtain x where x:"\n::nat. x n \ s n" using choice[of "\n x. x\ s n"] by auto + from assms(4,1) have *:"compact (s 0)" using bounded_closed_imp_compact[of "s 0"] by auto + + then obtain l r where lr:"l\s 0" "\m n. m < n \ r m < r n" "((x \ r) ---> l) sequentially" + unfolding compact_def apply(erule_tac x=x in allE) using x using assms(3) by blast + + { fix n::nat + { fix e::real assume "e>0" + with lr(3) obtain N where N:"\m\N. dist ((x \ r) m) l < e" unfolding Lim_sequentially by auto + hence "dist ((x \ r) (max N n)) l < e" by auto + moreover + have "r (max N n) \ n" using lr(2) using monotone_bigger[of r "max N n"] by auto + hence "(x \ r) (max N n) \ s n" + using x apply(erule_tac x=n in allE) + using x apply(erule_tac x="r (max N n)" in allE) + using assms(3) apply(erule_tac x=n in allE)apply( erule_tac x="r (max N n)" in allE) by auto + ultimately have "\y\s n. dist y l < e" by auto + } + hence "l \ s n" using closed_approachable[of "s n" l] assms(1) by blast + } + thus ?thesis by auto +qed + +text{* Decreasing case does not even need compactness, just completeness. *} + +lemma decreasing_closed_nest: + assumes "\n. closed(s n)" + "\n. (s n \ {})" + "\m n. m \ n --> s n \ s m" + "\e>0. \n. \x \ (s n). \ y \ (s n). dist x y < e" + shows "\a::real^'a. \n::nat. a \ s n" +proof- + have "\n. \ x. x\s n" using assms(2) by auto + hence "\t. \n. t n \ s n" using choice[of "\ n x. x \ s n"] by auto + then obtain t where t: "\n. t n \ s n" by auto + { fix e::real assume "e>0" + then obtain N where N:"\x\s N. \y\s N. dist x y < e" using assms(4) by auto + { fix m n ::nat assume "N \ m \ N \ n" + hence "t m \ s N" "t n \ s N" using assms(3) t unfolding subset_eq t by blast+ + hence "dist (t m) (t n) < e" using N by auto + } + hence "\N. \m n. N \ m \ N \ n \ dist (t m) (t n) < e" by auto + } + hence "cauchy t" unfolding cauchy_def by auto + then obtain l where l:"(t ---> l) sequentially" using complete_univ unfolding complete_def by auto + { fix n::nat + { fix e::real assume "e>0" + then obtain N::nat where N:"\n\N. dist (t n) l < e" using l[unfolded Lim_sequentially] by auto + have "t (max n N) \ s n" using assms(3) unfolding subset_eq apply(erule_tac x=n in allE) apply (erule_tac x="max n N" in allE) using t by auto + hence "\y\s n. dist y l < e" apply(rule_tac x="t (max n N)" in bexI) using N by auto + } + hence "l \ s n" using closed_approachable[of "s n" l] assms(1) by auto + } + then show ?thesis by auto +qed + +text{* Strengthen it to the intersection actually being a singleton. *} + +lemma decreasing_closed_nest_sing: + assumes "\n. closed(s n)" + "\n. s n \ {}" + "\m n. m \ n --> s n \ s m" + "\e>0. \n. \x \ (s n). \ y\(s n). dist x y < e" + shows "\a::real^'a. \ {t. (\n::nat. t = s n)} = {a}" +proof- + obtain a where a:"\n. a \ s n" using decreasing_closed_nest[of s] using assms by auto + { fix b assume b:"b \ \{t. \n. t = s n}" + { fix e::real assume "e>0" + hence "dist a b < e" using assms(4 )using b using a by blast + } + hence "dist a b = 0" by (metis dist_eq_0 dist_nz real_less_def) + } + with a have "\{t. \n. t = s n} = {a}" unfolding dist_eq_0 by auto + thus ?thesis by auto +qed + +text{* Cauchy-type criteria for uniform convergence. *} + +lemma uniformly_convergent_eq_cauchy: fixes s::"nat \ 'b \ real^'a" shows + "(\l. \e>0. \N. \n x. N \ n \ P x --> dist(s n x)(l x) < e) \ + (\e>0. \N. \m n x. N \ m \ N \ n \ P x --> dist (s m x) (s n x) < e)" (is "?lhs = ?rhs") +proof(rule) + assume ?lhs + then obtain l where l:"\e>0. \N. \n x. N \ n \ P x \ dist (s n x) (l x) < e" by auto + { fix e::real assume "e>0" + then obtain N::nat where N:"\n x. N \ n \ P x \ dist (s n x) (l x) < e / 2" using l[THEN spec[where x="e/2"]] by auto + { fix n m::nat and x::"'b" assume "N \ m \ N \ n \ P x" + hence "dist (s m x) (s n x) < e" + using N[THEN spec[where x=m], THEN spec[where x=x]] + using N[THEN spec[where x=n], THEN spec[where x=x]] + using dist_triangle_half_l[of "s m x" "l x" e "s n x"] by auto } + hence "\N. \m n x. N \ m \ N \ n \ P x --> dist (s m x) (s n x) < e" by auto } + thus ?rhs by auto +next + assume ?rhs + hence "\x. P x \ cauchy (\n. s n x)" unfolding cauchy_def apply auto by (erule_tac x=e in allE)auto + then obtain l where l:"\x. P x \ ((\n. s n x) ---> l x) sequentially" unfolding convergent_eq_cauchy[THEN sym] + using choice[of "\x l. P x \ ((\n. s n x) ---> l) sequentially"] by auto + { fix e::real assume "e>0" + then obtain N where N:"\m n x. N \ m \ N \ n \ P x \ dist (s m x) (s n x) < e/2" + using `?rhs`[THEN spec[where x="e/2"]] by auto + { fix x assume "P x" + then obtain M where M:"\n\M. dist (s n x) (l x) < e/2" + using l[THEN spec[where x=x], unfolded Lim_sequentially] using `e>0` by(auto elim!: allE[where x="e/2"]) + fix n::nat assume "n\N" + hence "dist(s n x)(l x) < e" using `P x`and N[THEN spec[where x=n], THEN spec[where x="N+M"], THEN spec[where x=x]] + using M[THEN spec[where x="N+M"]] and dist_triangle_half_l[of "s n x" "s (N+M) x" e "l x"] by (auto simp add: dist_sym) } + hence "\N. \n x. N \ n \ P x \ dist(s n x)(l x) < e" by auto } + thus ?lhs by auto +qed + +lemma uniformly_cauchy_imp_uniformly_convergent: + assumes "\e>0.\N. \m (n::nat) x. N \ m \ N \ n \ P x --> dist(s m x)(s n x) < e" + "\x. P x --> (\e>0. \N. \n. N \ n --> dist(s n x)(l x) < e)" + shows "\e>0. \N. \n x. N \ n \ P x --> dist(s n x)(l x) < e" +proof- + obtain l' where l:"\e>0. \N. \n x. N \ n \ P x \ dist (s n x) (l' x) < e" + using assms(1) unfolding uniformly_convergent_eq_cauchy[THEN sym] by auto + moreover + { fix x assume "P x" + hence "l x = l' x" using Lim_unique[OF trivial_limit_sequentially, of "\n. s n x" "l x" "l' x"] + using l and assms(2) unfolding Lim_sequentially by blast } + ultimately show ?thesis by auto +qed + +subsection{* Define continuity over a net to take in restrictions of the set. *} + +definition "continuous net f \ (f ---> f(netlimit net)) net" + +lemma continuous_trivial_limit: + "trivial_limit net ==> continuous net f" + unfolding continuous_def tendsto_def eventually_def by auto + +lemma continuous_within: "continuous (at x within s) f \ (f ---> f(x)) (at x within s)" + unfolding continuous_def + unfolding tendsto_def + using netlimit_within[of x s] + unfolding eventually_def + by (cases "trivial_limit (at x within s)") auto + +lemma continuous_at: "continuous (at x) f \ (f ---> f(x)) (at x)" using within_UNIV[of x] + using continuous_within[of x UNIV f] by auto + +lemma continuous_at_within: + assumes "continuous (at x) f" shows "continuous (at x within s) f" +proof(cases "x islimpt s") + case True show ?thesis using assms unfolding continuous_def and netlimit_at + using Lim_at_within[of f "f x" x s] + unfolding netlimit_within[unfolded trivial_limit_within not_not, OF True] by blast +next + case False thus ?thesis unfolding continuous_def and netlimit_at + unfolding Lim and trivial_limit_within by auto +qed + +text{* Derive the epsilon-delta forms, which we often use as "definitions" *} + +lemma continuous_within_eps_delta: + "continuous (at x within s) f \ (\e>0. \d>0. \x'\ s. dist x' x < d --> dist (f x') (f x) < e)" + unfolding continuous_within and Lim_within + apply auto unfolding dist_nz[THEN sym] apply(auto elim!:allE) apply(rule_tac x=d in exI) by auto + +lemma continuous_at_eps_delta: "continuous (at x) f \ (\e>0. \d>0. + \x'. dist x' x < d --> dist(f x')(f x) < e)" + using continuous_within_eps_delta[of x UNIV f] + unfolding within_UNIV by blast + +text{* Versions in terms of open balls. *} + +lemma continuous_within_ball: + "continuous (at x within s) f \ (\e>0. \d>0. + f ` (ball x d \ s) \ ball (f x) e)" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix e::real assume "e>0" + then obtain d where d: "d>0" "\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" + using `?lhs`[unfolded continuous_within Lim_within] by auto + { fix y assume "y\f ` (ball x d \ s)" + hence "y \ ball (f x) e" using d(2) unfolding dist_nz[THEN sym] + apply (auto simp add: dist_sym mem_ball) apply(erule_tac x=xa in ballE) apply auto unfolding dist_refl using `e>0` by auto + } + hence "\d>0. f ` (ball x d \ s) \ ball (f x) e" using `d>0` unfolding subset_eq ball_def by (auto simp add: dist_sym) } + thus ?rhs by auto +next + assume ?rhs thus ?lhs unfolding continuous_within Lim_within ball_def subset_eq + apply (auto simp add: dist_sym) apply(erule_tac x=e in allE) by auto +qed + +lemma continuous_at_ball: fixes f::"real^'a \ real^'a" + shows "continuous (at x) f \ (\e>0. \d>0. f ` (ball x d) \ ball (f x) e)" (is "?lhs = ?rhs") +proof + assume ?lhs thus ?rhs unfolding continuous_at Lim_at subset_eq Ball_def Bex_def image_iff mem_ball + apply auto apply(erule_tac x=e in allE) apply auto apply(rule_tac x=d in exI) apply auto apply(erule_tac x=xa in allE) apply (auto simp add: dist_refl dist_sym dist_nz) + unfolding dist_nz[THEN sym] by (auto simp add: dist_refl) +next + assume ?rhs thus ?lhs unfolding continuous_at Lim_at subset_eq Ball_def Bex_def image_iff mem_ball + apply auto apply(erule_tac x=e in allE) apply auto apply(rule_tac x=d in exI) apply auto apply(erule_tac x="f xa" in allE) by (auto simp add: dist_refl dist_sym dist_nz) +qed + +text{* For setwise continuity, just start from the epsilon-delta definitions. *} + +definition "continuous_on s f \ (\x \ s. \e>0. \d::real>0. \x' \ s. dist x' x < d --> dist (f x') (f x) < e)" + + +definition "uniformly_continuous_on s f \ + (\e>0. \d>0. \x\s. \ x'\s. dist x' x < d + --> dist (f x') (f x) < e)" + +text{* Some simple consequential lemmas. *} + +lemma uniformly_continuous_imp_continuous: + " uniformly_continuous_on s f ==> continuous_on s f" + unfolding uniformly_continuous_on_def continuous_on_def by blast + +lemma continuous_at_imp_continuous_within: + "continuous (at x) f ==> continuous (at x within s) f" + unfolding continuous_within continuous_at using Lim_at_within by auto + +lemma continuous_at_imp_continuous_on: assumes "(\x \ s. continuous (at x) f)" + shows "continuous_on s f" +proof(simp add: continuous_at continuous_on_def, rule, rule, rule) + fix x and e::real assume "x\s" "e>0" + hence "eventually (\xa. dist (f xa) (f x) < e) (at x)" using assms unfolding continuous_at tendsto_def by auto + then obtain d where d:"d>0" "\xa. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" unfolding eventually_at by auto + { fix x' assume "\ 0 < dist x' x" + hence "x=x'" + using dist_nz[of x' x] by auto + hence "dist (f x') (f x) < e" using dist_refl[of "f x'"] `e>0` by auto + } + thus "\d>0. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using d by auto +qed + +lemma continuous_on_eq_continuous_within: + "continuous_on s f \ (\x \ s. continuous (at x within s) f)" (is "?lhs = ?rhs") +proof + assume ?rhs + { fix x assume "x\s" + fix e::real assume "e>0" + assume "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" + then obtain d where "d>0" and d:"\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" by auto + { fix x' assume as:"x'\s" "dist x' x < d" + hence "dist (f x') (f x) < e" using dist_refl[of "f x'"] `e>0` d `x'\s` dist_eq_0[of x' x] dist_pos_le[of x' x] as(2) by (metis dist_eq_0 dist_nz) } + hence "\d>0. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using `d>0` by (auto simp add: dist_refl) + } + thus ?lhs using `?rhs` unfolding continuous_on_def continuous_within Lim_within by auto +next + assume ?lhs + thus ?rhs unfolding continuous_on_def continuous_within Lim_within by blast +qed + +lemma continuous_on: + "continuous_on s f \ (\x \ s. (f ---> f(x)) (at x within s))" + by (auto simp add: continuous_on_eq_continuous_within continuous_within) + +lemma continuous_on_eq_continuous_at: + "open s ==> (continuous_on s f \ (\x \ s. continuous (at x) f))" + by (auto simp add: continuous_on continuous_at Lim_within_open) + +lemma continuous_within_subset: + "continuous (at x within s) f \ t \ s + ==> continuous (at x within t) f" + unfolding continuous_within by(metis Lim_within_subset) + +lemma continuous_on_subset: + "continuous_on s f \ t \ s ==> continuous_on t f" + unfolding continuous_on by (metis subset_eq Lim_within_subset) + +lemma continuous_on_interior: + "continuous_on s f \ x \ interior s ==> continuous (at x) f" +unfolding interior_def +apply simp +by (meson continuous_on_eq_continuous_at continuous_on_subset) + +lemma continuous_on_eq: + "(\x \ s. f x = g x) \ continuous_on s f + ==> continuous_on s g" + by (simp add: continuous_on_def) + +text{* Characterization of various kinds of continuity in terms of sequences. *} + +lemma continuous_within_sequentially: + "continuous (at a within s) f \ + (\x. (\n::nat. x n \ s) \ (x ---> a) sequentially + --> ((f o x) ---> f a) sequentially)" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix x::"nat \ real^'a" assume x:"\n. x n \ s" "\e>0. \N. \n\N. dist (x n) a < e" + fix e::real assume "e>0" + from `?lhs` obtain d where "d>0" and d:"\x\s. 0 < dist x a \ dist x a < d \ dist (f x) (f a) < e" unfolding continuous_within Lim_within using `e>0` by auto + from x(2) `d>0` obtain N where N:"\n\N. dist (x n) a < d" by auto + hence "\N. \n\N. dist ((f \ x) n) (f a) < e" + apply(rule_tac x=N in exI) using N d apply auto using x(1) + apply(erule_tac x=n in allE) apply(erule_tac x=n in allE) + apply(erule_tac x="x n" in ballE) apply auto unfolding dist_nz[THEN sym] apply auto unfolding dist_refl using `e>0` by auto + } + thus ?rhs unfolding continuous_within unfolding Lim_sequentially by simp +next + assume ?rhs + { fix e::real assume "e>0" + assume "\ (\d>0. \x\s. 0 < dist x a \ dist x a < d \ dist (f x) (f a) < e)" + hence "\d. \x. d>0 \ x\s \ (0 < dist x a \ dist x a < d \ \ dist (f x) (f a) < e)" by blast + then obtain x where x:"\d>0. x d \ s \ (0 < dist (x d) a \ dist (x d) a < d \ \ dist (f (x d)) (f a) < e)" + using choice[of "\d x.0 x\s \ (0 < dist x a \ dist x a < d \ \ dist (f x) (f a) < e)"] by auto + { fix d::real assume "d>0" + hence "\N::nat. inverse (real (N + 1)) < d" using real_arch_inv[of d] by (auto, rule_tac x="n - 1" in exI)auto + then obtain N::nat where N:"inverse (real (N + 1)) < d" by auto + { fix n::nat assume n:"n\N" + hence "dist (x (inverse (real (n + 1)))) a < inverse (real (n + 1))" using x[THEN spec[where x="inverse (real (n + 1))"]] by auto + moreover have "inverse (real (n + 1)) < d" using N n by (auto, metis Suc_le_mono le_SucE less_imp_inverse_less nat_le_real_less order_less_trans real_of_nat_Suc real_of_nat_Suc_gt_zero) + ultimately have "dist (x (inverse (real (n + 1)))) a < d" by auto + } + hence "\N::nat. \n\N. dist (x (inverse (real (n + 1)))) a < d" by auto + } + hence "(\n::nat. x (inverse (real (n + 1))) \ s) \ (\e>0. \N::nat. \n\N. dist (x (inverse (real (n + 1)))) a < e)" using x by auto + hence "\e>0. \N::nat. \n\N. dist (f (x (inverse (real (n + 1))))) (f a) < e" using `?rhs`[THEN spec[where x="\n::nat. x (inverse (real (n+1)))"], unfolded Lim_sequentially] by auto + hence "False" apply(erule_tac x=e in allE) using `e>0` using x by auto + } + thus ?lhs unfolding continuous_within unfolding Lim_within unfolding Lim_sequentially by blast +qed + +lemma continuous_at_sequentially: + "continuous (at a) f \ (\x. (x ---> a) sequentially + --> ((f o x) ---> f a) sequentially)" + using continuous_within_sequentially[of a UNIV f] unfolding within_UNIV by auto + +lemma continuous_on_sequentially: + "continuous_on s f \ (\x. \a \ s. (\n. x(n) \ s) \ (x ---> a) sequentially + --> ((f o x) ---> f(a)) sequentially)" (is "?lhs = ?rhs") +proof + assume ?rhs thus ?lhs using continuous_within_sequentially[of _ s f] unfolding continuous_on_eq_continuous_within by auto +next + assume ?lhs thus ?rhs unfolding continuous_on_eq_continuous_within using continuous_within_sequentially[of _ s f] by auto +qed + +lemma uniformly_continuous_on_sequentially: + "uniformly_continuous_on s f \ (\x y. (\n. x n \ s) \ (\n. y n \ s) \ + ((\n. x n - y n) ---> 0) sequentially + \ ((\n. f(x n) - f(y n)) ---> 0) sequentially)" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix x y assume x:"\n. x n \ s" and y:"\n. y n \ s" and xy:"((\n. x n - y n) ---> 0) sequentially" + { fix e::real assume "e>0" + then obtain d where "d>0" and d:"\x\s. \x'\s. dist x' x < d \ dist (f x') (f x) < e" + using `?lhs`[unfolded uniformly_continuous_on_def, THEN spec[where x=e]] by auto + obtain N where N:"\n\N. norm (x n - y n - 0) < d" using xy[unfolded Lim_sequentially dist_def] and `d>0` by auto + { fix n assume "n\N" + hence "norm (f (x n) - f (y n) - 0) < e" + using N[THEN spec[where x=n]] using d[THEN bspec[where x="x n"], THEN bspec[where x="y n"]] using x and y + unfolding dist_sym and dist_def by simp } + hence "\N. \n\N. norm (f (x n) - f (y n) - 0) < e" by auto } + hence "((\n. f(x n) - f(y n)) ---> 0) sequentially" unfolding Lim_sequentially and dist_def by auto } + thus ?rhs by auto +next + assume ?rhs + { assume "\ ?lhs" + then obtain e where "e>0" "\d>0. \x\s. \x'\s. dist x' x < d \ \ dist (f x') (f x) < e" unfolding uniformly_continuous_on_def by auto + then obtain fa where fa:"\x. 0 < x \ fst (fa x) \ s \ snd (fa x) \ s \ dist (fst (fa x)) (snd (fa x)) < x \ \ dist (f (fst (fa x))) (f (snd (fa x))) < e" + using choice[of "\d x. d>0 \ fst x \ s \ snd x \ s \ dist (snd x) (fst x) < d \ \ dist (f (snd x)) (f (fst x)) < e"] unfolding Bex_def + by (auto simp add: dist_sym) + def x \ "\n::nat. fst (fa (inverse (real n + 1)))" + def y \ "\n::nat. snd (fa (inverse (real n + 1)))" + have xyn:"\n. x n \ s \ y n \ s" and xy0:"\n. dist (x n) (y n) < inverse (real n + 1)" and fxy:"\n. \ dist (f (x n)) (f (y n)) < e" + unfolding x_def and y_def using fa by auto + have *:"\x y. dist (x - y) 0 = dist x y" unfolding dist_def by auto + { fix e::real assume "e>0" + then obtain N::nat where "N \ 0" and N:"0 < inverse (real N) \ inverse (real N) < e" unfolding real_arch_inv[of e] by auto + { fix n::nat assume "n\N" + hence "inverse (real n + 1) < inverse (real N)" using real_of_nat_ge_zero and `N\0` by auto + also have "\ < e" using N by auto + finally have "inverse (real n + 1) < e" by auto + hence "dist (x n - y n) 0 < e" unfolding * using xy0[THEN spec[where x=n]] by auto } + hence "\N. \n\N. dist (x n - y n) 0 < e" by auto } + hence "\e>0. \N. \n\N. dist (f (x n) - f (y n)) 0 < e" using `?rhs`[THEN spec[where x=x], THEN spec[where x=y]] and xyn unfolding Lim_sequentially by auto + hence False unfolding * using fxy and `e>0` by auto } + thus ?lhs unfolding uniformly_continuous_on_def by blast +qed + +text{* The usual transformation theorems. *} + +lemma continuous_transform_within: + assumes "0 < d" "x \ s" "\x' \ s. dist x' x < d --> f x' = g x'" + "continuous (at x within s) f" + shows "continuous (at x within s) g" +proof- + { fix e::real assume "e>0" + then obtain d' where d':"d'>0" "\xa\s. 0 < dist xa x \ dist xa x < d' \ dist (f xa) (f x) < e" using assms(4) unfolding continuous_within Lim_within by auto + { fix x' assume "x'\s" "0 < dist x' x" "dist x' x < (min d d')" + hence "dist (f x') (g x) < e" using assms(2,3) apply(erule_tac x=x in ballE) unfolding dist_refl using d' by auto } + hence "\xa\s. 0 < dist xa x \ dist xa x < (min d d') \ dist (f xa) (g x) < e" by blast + hence "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (g x) < e" using `d>0` `d'>0` by(rule_tac x="min d d'" in exI)auto } + hence "(f ---> g x) (at x within s)" unfolding Lim_within using assms(1) by auto + thus ?thesis unfolding continuous_within using Lim_transform_within[of d s x f g "g x"] using assms by blast +qed + +lemma continuous_transform_at: + assumes "0 < d" "\x'. dist x' x < d --> f x' = g x'" + "continuous (at x) f" + shows "continuous (at x) g" +proof- + { fix e::real assume "e>0" + then obtain d' where d':"d'>0" "\xa. 0 < dist xa x \ dist xa x < d' \ dist (f xa) (f x) < e" using assms(3) unfolding continuous_at Lim_at by auto + { fix x' assume "0 < dist x' x" "dist x' x < (min d d')" + hence "dist (f x') (g x) < e" using assms(2) apply(erule_tac x=x in allE) unfolding dist_refl using d' by auto + } + hence "\xa. 0 < dist xa x \ dist xa x < (min d d') \ dist (f xa) (g x) < e" by blast + hence "\d>0. \xa. 0 < dist xa x \ dist xa x < d \ dist (f xa) (g x) < e" using `d>0` `d'>0` by(rule_tac x="min d d'" in exI)auto + } + hence "(f ---> g x) (at x)" unfolding Lim_at using assms(1) by auto + thus ?thesis unfolding continuous_at using Lim_transform_at[of d x f g "g x"] using assms by blast +qed + +text{* Combination results for pointwise continuity. *} + +lemma continuous_const: "continuous net (\x::'a::zero_neq_one. c)" + by(auto simp add: continuous_def Lim_const) + +lemma continuous_cmul: + "continuous net f ==> continuous net (\x. c *s f x)" + by(auto simp add: continuous_def Lim_cmul) + +lemma continuous_neg: + "continuous net f ==> continuous net (\x. -(f x))" + by(auto simp add: continuous_def Lim_neg) + +lemma continuous_add: + "continuous net f \ continuous net g + ==> continuous net (\x. f x + g x)" + by(auto simp add: continuous_def Lim_add) + +lemma continuous_sub: + "continuous net f \ continuous net g + ==> continuous net (\x. f(x) - g(x))" + by(auto simp add: continuous_def Lim_sub) + +text{* Same thing for setwise continuity. *} + +lemma continuous_on_const: + "continuous_on s (\x. c)" + unfolding continuous_on_eq_continuous_within using continuous_const by blast + +lemma continuous_on_cmul: + "continuous_on s f ==> continuous_on s (\x. c *s (f x))" + unfolding continuous_on_eq_continuous_within using continuous_cmul by blast + +lemma continuous_on_neg: + "continuous_on s f ==> continuous_on s (\x. -(f x))" + unfolding continuous_on_eq_continuous_within using continuous_neg by blast + +lemma continuous_on_add: + "continuous_on s f \ continuous_on s g + ==> continuous_on s (\x. f x + g x)" + unfolding continuous_on_eq_continuous_within using continuous_add by blast + +lemma continuous_on_sub: + "continuous_on s f \ continuous_on s g + ==> continuous_on s (\x. f(x) - g(x))" + unfolding continuous_on_eq_continuous_within using continuous_sub by blast + +text{* Same thing for uniform continuity, using sequential formulations. *} + +lemma uniformly_continuous_on_const: + "uniformly_continuous_on s (\x. c)" + unfolding uniformly_continuous_on_sequentially using Lim_const[of 0] by auto + +lemma uniformly_continuous_on_cmul: + assumes "uniformly_continuous_on s f" + shows "uniformly_continuous_on s (\x. c *s f(x))" +proof- + { fix x y assume "((\n. f (x n) - f (y n)) ---> 0) sequentially" + hence "((\n. c *s f (x n) - c *s f (y n)) ---> 0) sequentially" + using Lim_cmul[of "(\n. f (x n) - f (y n))" 0 sequentially c] + unfolding vector_smult_rzero vector_ssub_ldistrib[of c] by auto + } + thus ?thesis using assms unfolding uniformly_continuous_on_sequentially by auto +qed + +lemma uniformly_continuous_on_neg: + "uniformly_continuous_on s f + ==> uniformly_continuous_on s (\x. -(f x))" + using uniformly_continuous_on_cmul[of s f "-1"] unfolding pth_3 by auto + +lemma uniformly_continuous_on_add: + assumes "uniformly_continuous_on s f" "uniformly_continuous_on s g" + shows "uniformly_continuous_on s (\x. f(x) + g(x) ::real^'n)" +proof- + have *:"\fx fy gx gy::real^'n. fx - fy + (gx - gy) = fx + gx - (fy + gy)" by auto + { fix x y assume "((\n. f (x n) - f (y n)) ---> 0) sequentially" + "((\n. g (x n) - g (y n)) ---> 0) sequentially" + hence "((\xa. f (x xa) - f (y xa) + (g (x xa) - g (y xa))) ---> 0 + 0) sequentially" + using Lim_add[of "\ n. f (x n) - f (y n)" 0 sequentially "\ n. g (x n) - g (y n)" 0] by auto + hence "((\n. f (x n) + g (x n) - (f (y n) + g (y n))) ---> 0) sequentially" unfolding Lim_sequentially and * by auto } + thus ?thesis using assms unfolding uniformly_continuous_on_sequentially by auto +qed + +lemma uniformly_continuous_on_sub: + "uniformly_continuous_on s f \ uniformly_continuous_on s g + ==> uniformly_continuous_on s (\x. f x - g x)" + unfolding ab_diff_minus + using uniformly_continuous_on_add[of s f "\x. - g x"] + using uniformly_continuous_on_neg[of s g] by auto + +text{* Identity function is continuous in every sense. *} + +lemma continuous_within_id: + "continuous (at a within s) (\x. x)" + unfolding continuous_within Lim_within by auto + +lemma continuous_at_id: + "continuous (at a) (\x. x)" + unfolding continuous_at Lim_at by auto + +lemma continuous_on_id: + "continuous_on s (\x. x)" + unfolding continuous_on Lim_within by auto + +lemma uniformly_continuous_on_id: + "uniformly_continuous_on s (\x. x)" + unfolding uniformly_continuous_on_def by auto + +text{* Continuity of all kinds is preserved under composition. *} + +lemma continuous_within_compose: + assumes "continuous (at x within s) f" "continuous (at (f x) within f ` s) g" + shows "continuous (at x within s) (g o f)" +proof- + { fix e::real assume "e>0" + with assms(2)[unfolded continuous_within Lim_within] obtain d where "d>0" and d:"\xa\f ` s. 0 < dist xa (f x) \ dist xa (f x) < d \ dist (g xa) (g (f x)) < e" by auto + from assms(1)[unfolded continuous_within Lim_within] obtain d' where "d'>0" and d':"\xa\s. 0 < dist xa x \ dist xa x < d' \ dist (f xa) (f x) < d" using `d>0` by auto + { fix y assume as:"y\s" "0 < dist y x" "dist y x < d'" + hence "dist (f y) (f x) < d" using d'[THEN bspec[where x=y]] by (auto simp add:dist_sym) + hence "dist (g (f y)) (g (f x)) < e" using as(1) d[THEN bspec[where x="f y"]] unfolding dist_nz[THEN sym] using `e>0` by (auto simp add: dist_refl) } + hence "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (g (f xa)) (g (f x)) < e" using `d'>0` by auto } + thus ?thesis unfolding continuous_within Lim_within by auto +qed + +lemma continuous_at_compose: + assumes "continuous (at x) f" "continuous (at (f x)) g" + shows "continuous (at x) (g o f)" +proof- + have " continuous (at (f x) within range f) g" using assms(2) using continuous_within_subset[of "f x" UNIV g "range f", unfolded within_UNIV] by auto + thus ?thesis using assms(1) using continuous_within_compose[of x UNIV f g, unfolded within_UNIV] by auto +qed + +lemma continuous_on_compose: + "continuous_on s f \ continuous_on (f ` s) g \ continuous_on s (g o f)" + unfolding continuous_on_eq_continuous_within using continuous_within_compose[of _ s f g] by auto + +lemma uniformly_continuous_on_compose: + assumes "uniformly_continuous_on s f" "uniformly_continuous_on (f ` s) g" + shows "uniformly_continuous_on s (g o f)" +proof- + { fix e::real assume "e>0" + then obtain d where "d>0" and d:"\x\f ` s. \x'\f ` s. dist x' x < d \ dist (g x') (g x) < e" using assms(2) unfolding uniformly_continuous_on_def by auto + obtain d' where "d'>0" "\x\s. \x'\s. dist x' x < d' \ dist (f x') (f x) < d" using `d>0` using assms(1) unfolding uniformly_continuous_on_def by auto + hence "\d>0. \x\s. \x'\s. dist x' x < d \ dist ((g \ f) x') ((g \ f) x) < e" using `d>0` using d by auto } + thus ?thesis using assms unfolding uniformly_continuous_on_def by auto +qed + +text{* Continuity in terms of open preimages. *} + +lemma continuous_at_open: + "continuous (at x) f \ (\t. open t \ f x \ t --> (\s. open s \ x \ s \ (\x' \ s. (f x') \ t)))" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix t assume as: "open t" "f x \ t" + then obtain e where "e>0" and e:"ball (f x) e \ t" unfolding open_contains_ball by auto + + obtain d where "d>0" and d:"\y. 0 < dist y x \ dist y x < d \ dist (f y) (f x) < e" using `e>0` using `?lhs`[unfolded continuous_at Lim_at open_def] by auto + + have "open (ball x d)" using open_ball by auto + moreover have "x \ ball x d" unfolding centre_in_ball using `d>0` by simp + moreover + { fix x' assume "x'\ball x d" hence "f x' \ t" + using e[unfolded subset_eq Ball_def mem_ball, THEN spec[where x="f x'"]] d[THEN spec[where x=x']] + unfolding mem_ball apply (auto simp add: dist_sym) + unfolding dist_nz[THEN sym] using as(2) by auto } + hence "\x'\ball x d. f x' \ t" by auto + ultimately have "\s. open s \ x \ s \ (\x'\s. f x' \ t)" + apply(rule_tac x="ball x d" in exI) by simp } + thus ?rhs by auto +next + assume ?rhs + { fix e::real assume "e>0" + then obtain s where s: "open s" "x \ s" "\x'\s. f x' \ ball (f x) e" using `?rhs`[unfolded continuous_at Lim_at, THEN spec[where x="ball (f x) e"]] + unfolding centre_in_ball[of "f x" e, THEN sym] by auto + then obtain d where "d>0" and d:"ball x d \ s" unfolding open_contains_ball by auto + { fix y assume "0 < dist y x \ dist y x < d" + hence "dist (f y) (f x) < e" using d[unfolded subset_eq Ball_def mem_ball, THEN spec[where x=y]] + using s(3)[THEN bspec[where x=y], unfolded mem_ball] by (auto simp add: dist_sym) } + hence "\d>0. \xa. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" using `d>0` by auto } + thus ?lhs unfolding continuous_at Lim_at by auto +qed + +lemma continuous_on_open: + "continuous_on s f \ + (\t. openin (subtopology euclidean (f ` s)) t + --> openin (subtopology euclidean s) {x \ s. f x \ t})" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix t assume as:"openin (subtopology euclidean (f ` s)) t" + have "{x \ s. f x \ t} \ s" using as[unfolded openin_euclidean_subtopology_iff] by auto + moreover + { fix x assume as':"x\{x \ s. f x \ t}" + then obtain e where e: "e>0" "\x'\f ` s. dist x' (f x) < e \ x' \ t" using as[unfolded openin_euclidean_subtopology_iff, THEN conjunct2, THEN bspec[where x="f x"]] by auto + from this(1) obtain d where d: "d>0" "\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" using `?lhs`[unfolded continuous_on Lim_within, THEN bspec[where x=x]] using as' by auto + have "\e>0. \x'\s. dist x' x < e \ x' \ {x \ s. f x \ t}" using d e unfolding dist_nz[THEN sym] by (rule_tac x=d in exI, auto simp add: dist_refl) } + ultimately have "openin (subtopology euclidean s) {x \ s. f x \ t}" unfolding openin_euclidean_subtopology_iff by auto } + thus ?rhs unfolding continuous_on Lim_within using openin by auto +next + assume ?rhs + { fix e::real and x assume "x\s" "e>0" + { fix xa x' assume "dist (f xa) (f x) < e" "xa \ s" "x' \ s" "dist (f xa) (f x') < e - dist (f xa) (f x)" + hence "dist (f x') (f x) < e" using dist_triangle[of "f x'" "f x" "f xa"] + by (auto simp add: dist_sym) } + hence "ball (f x) e \ f ` s \ f ` s \ (\xa\ball (f x) e \ f ` s. \ea>0. \x'\f ` s. dist x' xa < ea \ x' \ ball (f x) e \ f ` s)" apply auto + apply(rule_tac x="e - dist (f xa) (f x)" in exI) using `e>0` by (auto simp add: dist_sym) + hence "\xa\{xa \ s. f xa \ ball (f x) e \ f ` s}. \ea>0. \x'\s. dist x' xa < ea \ x' \ {xa \ s. f xa \ ball (f x) e \ f ` s}" + using `?rhs`[unfolded openin_euclidean_subtopology_iff, THEN spec[where x="ball (f x) e \ f ` s"]] by auto + hence "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" apply(erule_tac x=x in ballE) apply auto unfolding dist_refl using `e>0` `x\s` by (auto simp add: dist_sym) } + thus ?lhs unfolding continuous_on Lim_within by auto +qed + +(* ------------------------------------------------------------------------- *) +(* Similarly in terms of closed sets. *) +(* ------------------------------------------------------------------------- *) + +lemma continuous_on_closed: + "continuous_on s f \ (\t. closedin (subtopology euclidean (f ` s)) t --> closedin (subtopology euclidean s) {x \ s. f x \ t})" (is "?lhs = ?rhs") +proof + assume ?lhs + { fix t + have *:"s - {x \ s. f x \ f ` s - t} = {x \ s. f x \ t}" by auto + have **:"f ` s - (f ` s - (f ` s - t)) = f ` s - t" by auto + assume as:"closedin (subtopology euclidean (f ` s)) t" + hence "closedin (subtopology euclidean (f ` s)) (f ` s - (f ` s - t))" unfolding closedin_def topspace_euclidean_subtopology unfolding ** by auto + hence "closedin (subtopology euclidean s) {x \ s. f x \ t}" using `?lhs`[unfolded continuous_on_open, THEN spec[where x="(f ` s) - t"]] + unfolding openin_closedin_eq topspace_euclidean_subtopology unfolding * by auto } + thus ?rhs by auto +next + assume ?rhs + { fix t + have *:"s - {x \ s. f x \ f ` s - t} = {x \ s. f x \ t}" by auto + assume as:"openin (subtopology euclidean (f ` s)) t" + hence "openin (subtopology euclidean s) {x \ s. f x \ t}" using `?rhs`[THEN spec[where x="(f ` s) - t"]] + unfolding openin_closedin_eq topspace_euclidean_subtopology *[THEN sym] closedin_subtopology by auto } + thus ?lhs unfolding continuous_on_open by auto +qed + +text{* Half-global and completely global cases. *} + +lemma continuous_open_in_preimage: + assumes "continuous_on s f" "open t" + shows "openin (subtopology euclidean s) {x \ s. f x \ t}" +proof- + have *:"\x. x \ s \ f x \ t \ x \ s \ f x \ (t \ f ` s)" by auto + have "openin (subtopology euclidean (f ` s)) (t \ f ` s)" + using openin_open_Int[of t "f ` s", OF assms(2)] unfolding openin_open by auto + thus ?thesis using assms(1)[unfolded continuous_on_open, THEN spec[where x="t \ f ` s"]] using * by auto +qed + +lemma continuous_closed_in_preimage: + assumes "continuous_on s f" "closed t" + shows "closedin (subtopology euclidean s) {x \ s. f x \ t}" +proof- + have *:"\x. x \ s \ f x \ t \ x \ s \ f x \ (t \ f ` s)" by auto + have "closedin (subtopology euclidean (f ` s)) (t \ f ` s)" + using closedin_closed_Int[of t "f ` s", OF assms(2)] unfolding Int_commute by auto + thus ?thesis + using assms(1)[unfolded continuous_on_closed, THEN spec[where x="t \ f ` s"]] using * by auto +qed + +lemma continuous_open_preimage: + assumes "continuous_on s f" "open s" "open t" + shows "open {x \ s. f x \ t}" +proof- + obtain T where T: "open T" "{x \ s. f x \ t} = s \ T" + using continuous_open_in_preimage[OF assms(1,3)] unfolding openin_open by auto + thus ?thesis using open_inter[of s T, OF assms(2)] by auto +qed + +lemma continuous_closed_preimage: + assumes "continuous_on s f" "closed s" "closed t" + shows "closed {x \ s. f x \ t}" +proof- + obtain T where T: "closed T" "{x \ s. f x \ t} = s \ T" + using continuous_closed_in_preimage[OF assms(1,3)] unfolding closedin_closed by auto + thus ?thesis using closed_Int[of s T, OF assms(2)] by auto +qed + +lemma continuous_open_preimage_univ: + "\x. continuous (at x) f \ open s \ open {x. f x \ s}" + using continuous_open_preimage[of UNIV f s] open_UNIV continuous_at_imp_continuous_on by auto + +lemma continuous_closed_preimage_univ: + "(\x. continuous (at x) f) \ closed s ==> closed {x. f x \ s}" + using continuous_closed_preimage[of UNIV f s] closed_UNIV continuous_at_imp_continuous_on by auto + +text{* Equality of continuous functions on closure and related results. *} + +lemma continuous_closed_in_preimage_constant: + "continuous_on s f ==> closedin (subtopology euclidean s) {x \ s. f x = a}" + using continuous_closed_in_preimage[of s f "{a}"] closed_sing by auto + +lemma continuous_closed_preimage_constant: + "continuous_on s f \ closed s ==> closed {x \ s. f x = a}" + using continuous_closed_preimage[of s f "{a}"] closed_sing by auto + +lemma continuous_constant_on_closure: + assumes "continuous_on (closure s) f" + "\x \ s. f x = a" + shows "\x \ (closure s). f x = a" + using continuous_closed_preimage_constant[of "closure s" f a] + assms closure_minimal[of s "{x \ closure s. f x = a}"] closure_subset unfolding subset_eq by auto + +lemma image_closure_subset: + assumes "continuous_on (closure s) f" "closed t" "(f ` s) \ t" + shows "f ` (closure s) \ t" +proof- + have "s \ {x \ closure s. f x \ t}" using assms(3) closure_subset by auto + moreover have "closed {x \ closure s. f x \ t}" + using continuous_closed_preimage[OF assms(1)] and assms(2) by auto + ultimately have "closure s = {x \ closure s . f x \ t}" + using closure_minimal[of s "{x \ closure s. f x \ t}"] by auto + thus ?thesis by auto +qed + +lemma continuous_on_closure_norm_le: + assumes "continuous_on (closure s) f" "\y \ s. norm(f y) \ b" "x \ (closure s)" + shows "norm(f x) \ b" +proof- + have *:"f ` s \ cball 0 b" using assms(2)[unfolded mem_cball_0[THEN sym]] by auto + show ?thesis + using image_closure_subset[OF assms(1) closed_cball[of 0 b] *] assms(3) + unfolding subset_eq apply(erule_tac x="f x" in ballE) by (auto simp add: dist_def) +qed + +text{* Making a continuous function avoid some value in a neighbourhood. *} + +lemma continuous_within_avoid: + assumes "continuous (at x within s) f" "x \ s" "f x \ a" + shows "\e>0. \y \ s. dist x y < e --> f y \ a" +proof- + obtain d where "d>0" and d:"\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < dist (f x) a" + using assms(1)[unfolded continuous_within Lim_within, THEN spec[where x="dist (f x) a"]] assms(3)[unfolded dist_nz] by auto + { fix y assume " y\s" "dist x y < d" + hence "f y \ a" using d[THEN bspec[where x=y]] assms(3)[unfolded dist_nz] + apply auto unfolding dist_nz[THEN sym] by (auto simp add: dist_sym) } + thus ?thesis using `d>0` by auto +qed + +lemma continuous_at_avoid: + assumes "continuous (at x) f" "f x \ a" + shows "\e>0. \y. dist x y < e \ f y \ a" +using assms using continuous_within_avoid[of x UNIV f a, unfolded within_UNIV] by auto + +lemma continuous_on_avoid: + assumes "continuous_on s f" "x \ s" "f x \ a" + shows "\e>0. \y \ s. dist x y < e \ f y \ a" +using assms(1)[unfolded continuous_on_eq_continuous_within, THEN bspec[where x=x], OF assms(2)] continuous_within_avoid[of x s f a] assms(2,3) by auto + +lemma continuous_on_open_avoid: + assumes "continuous_on s f" "open s" "x \ s" "f x \ a" + shows "\e>0. \y. dist x y < e \ f y \ a" +using assms(1)[unfolded continuous_on_eq_continuous_at[OF assms(2)], THEN bspec[where x=x], OF assms(3)] continuous_at_avoid[of x f a] assms(3,4) by auto + +text{* Proving a function is constant by proving open-ness of level set. *} + +lemma continuous_levelset_open_in_cases: + "connected s \ continuous_on s f \ + openin (subtopology euclidean s) {x \ s. f x = a} + ==> (\x \ s. f x \ a) \ (\x \ s. f x = a)" +unfolding connected_clopen using continuous_closed_in_preimage_constant by auto + +lemma continuous_levelset_open_in: + "connected s \ continuous_on s f \ + openin (subtopology euclidean s) {x \ s. f x = a} \ + (\x \ s. f x = a) ==> (\x \ s. f x = a)" +using continuous_levelset_open_in_cases[of s f ] +by meson + +lemma continuous_levelset_open: + assumes "connected s" "continuous_on s f" "open {x \ s. f x = a}" "\x \ s. f x = a" + shows "\x \ s. f x = a" +using continuous_levelset_open_in[OF assms(1,2), of a, unfolded openin_open] using assms (3,4) by auto + +text{* Some arithmetical combinations (more to prove). *} + +lemma open_scaling[intro]: + assumes "c \ 0" "open s" + shows "open((\x. c *s x) ` s)" +proof- + { fix x assume "x \ s" + then obtain e where "e>0" and e:"\x'. dist x' x < e \ x' \ s" using assms(2)[unfolded open_def, THEN bspec[where x=x]] by auto + have "e * abs c > 0" using assms(1)[unfolded zero_less_abs_iff[THEN sym]] using real_mult_order[OF `e>0`] by auto + moreover + { fix y assume "dist y (c *s x) < e * \c\" + hence "norm ((1 / c) *s y - x) < e" unfolding dist_def + using norm_mul[of c "(1 / c) *s y - x", unfolded vector_ssub_ldistrib, unfolded vector_smult_assoc] assms(1) + mult_less_imp_less_left[of "abs c" "norm ((1 / c) *s y - x)" e, unfolded real_mult_commute[of "abs c" e]] assms(1)[unfolded zero_less_abs_iff[THEN sym]] by simp + hence "y \ op *s c ` s" using rev_image_eqI[of "(1 / c) *s y" s y "op *s c"] e[THEN spec[where x="(1 / c) *s y"]] assms(1) unfolding dist_def vector_smult_assoc by auto } + ultimately have "\e>0. \x'. dist x' (c *s x) < e \ x' \ op *s c ` s" apply(rule_tac x="e * abs c" in exI) by auto } + thus ?thesis unfolding open_def by auto +qed + +lemma open_negations: + "open s ==> open ((\ x. -x) ` s)" unfolding pth_3 by auto + +lemma open_translation: + assumes "open s" shows "open((\x. a + x) ` s)" +proof- + { fix x have "continuous (at x) (\x. x - a)" using continuous_sub[of "at x" "\x. x" "\x. a"] continuous_at_id[of x] continuous_const[of "at x" a] by auto } + moreover have "{x. x - a \ s} = op + a ` s" apply auto unfolding image_iff apply(rule_tac x="x - a" in bexI) by auto + ultimately show ?thesis using continuous_open_preimage_univ[of "\x. x - a" s] using assms by auto +qed + +lemma open_affinity: + assumes "open s" "c \ 0" + shows "open ((\x. a + c *s x) ` s)" +proof- + have *:"(\x. a + c *s x) = (\x. a + x) \ (\x. c *s x)" unfolding o_def .. + have "op + a ` op *s c ` s = (op + a \ op *s c) ` s" by auto + thus ?thesis using assms open_translation[of "op *s c ` s" a] unfolding * by auto +qed + +lemma interior_translation: "interior ((\x. a + x) ` s) = (\x. a + x) ` (interior s)" +proof (rule set_ext, rule) + fix x assume "x \ interior (op + a ` s)" + then obtain e where "e>0" and e:"ball x e \ op + a ` s" unfolding mem_interior by auto + hence "ball (x - a) e \ s" unfolding subset_eq Ball_def mem_ball dist_def apply auto apply(erule_tac x="a + xa" in allE) unfolding ab_group_add_class.diff_diff_eq[THEN sym] by auto + thus "x \ op + a ` interior s" unfolding image_iff apply(rule_tac x="x - a" in bexI) unfolding mem_interior using `e > 0` by auto +next + fix x assume "x \ op + a ` interior s" + then obtain y e where "e>0" and e:"ball y e \ s" and y:"x = a + y" unfolding image_iff Bex_def mem_interior by auto + { fix z have *:"a + y - z = y + a - z" by auto + assume "z\ball x e" + hence "z - a \ s" using e[unfolded subset_eq, THEN bspec[where x="z - a"]] unfolding mem_ball dist_def y ab_group_add_class.diff_diff_eq2 * by auto + hence "z \ op + a ` s" unfolding image_iff by(auto intro!: bexI[where x="z - a"]) } + hence "ball x e \ op + a ` s" unfolding subset_eq by auto + thus "x \ interior (op + a ` s)" unfolding mem_interior using `e>0` by auto +qed + +subsection {* Preservation of compactness and connectedness under continuous function. *} + +lemma compact_continuous_image: + assumes "continuous_on s f" "compact s" + shows "compact(f ` s)" +proof- + { fix x assume x:"\n::nat. x n \ f ` s" + then obtain y where y:"\n. y n \ s \ x n = f (y n)" unfolding image_iff Bex_def using choice[of "\n xa. xa \ s \ x n = f xa"] by auto + then obtain l r where "l\s" and r:"\m n. m < n \ r m < r n" and lr:"((y \ r) ---> l) sequentially" using assms(2)[unfolded compact_def, THEN spec[where x=y]] by auto + { fix e::real assume "e>0" + then obtain d where "d>0" and d:"\x'\s. dist x' l < d \ dist (f x') (f l) < e" using assms(1)[unfolded continuous_on_def, THEN bspec[where x=l], OF `l\s`] by auto + then obtain N::nat where N:"\n\N. dist ((y \ r) n) l < d" using lr[unfolded Lim_sequentially, THEN spec[where x=d]] by auto + { fix n::nat assume "n\N" hence "dist ((x \ r) n) (f l) < e" using N[THEN spec[where x=n]] d[THEN bspec[where x="y (r n)"]] y[THEN spec[where x="r n"]] by auto } + hence "\N. \n\N. dist ((x \ r) n) (f l) < e" by auto } + hence "\l\f ` s. \r. (\m n. m < n \ r m < r n) \ ((x \ r) ---> l) sequentially" unfolding Lim_sequentially using r lr `l\s` by auto } + thus ?thesis unfolding compact_def by auto +qed + +lemma connected_continuous_image: + assumes "continuous_on s f" "connected s" + shows "connected(f ` s)" +proof- + { fix T assume as: "T \ {}" "T \ f ` s" "openin (subtopology euclidean (f ` s)) T" "closedin (subtopology euclidean (f ` s)) T" + have "{x \ s. f x \ T} = {} \ {x \ s. f x \ T} = s" + using assms(1)[unfolded continuous_on_open, THEN spec[where x=T]] + using assms(1)[unfolded continuous_on_closed, THEN spec[where x=T]] + using assms(2)[unfolded connected_clopen, THEN spec[where x="{x \ s. f x \ T}"]] as(3,4) by auto + hence False using as(1,2) + using as(4)[unfolded closedin_def topspace_euclidean_subtopology] by auto } + thus ?thesis unfolding connected_clopen by auto +qed + +text{* Continuity implies uniform continuity on a compact domain. *} + +lemma compact_uniformly_continuous: + assumes "continuous_on s f" "compact s" + shows "uniformly_continuous_on s f" +proof- + { fix x assume x:"x\s" + hence "\xa. \y. 0 < xa \ (y > 0 \ (\x'\s. dist x' x < y \ dist (f x') (f x) < xa))" using assms(1)[unfolded continuous_on_def, THEN bspec[where x=x]] by auto + hence "\fa. \xa>0. \x'\s. fa xa > 0 \ (dist x' x < fa xa \ dist (f x') (f x) < xa)" using choice[of "\e d. e>0 \ d>0 \(\x'\s. (dist x' x < d \ dist (f x') (f x) < e))"] by auto } + then have "\x\s. \y. \xa. 0 < xa \ (\x'\s. y xa > 0 \ (dist x' x < y xa \ dist (f x') (f x) < xa))" by auto + then obtain d where d:"\e>0. \x\s. \x'\s. d x e > 0 \ (dist x' x < d x e \ dist (f x') (f x) < e)" + using bchoice[of s "\x fa. \xa>0. \x'\s. fa xa > 0 \ (dist x' x < fa xa \ dist (f x') (f x) < xa)"] by blast + + { fix e::real assume "e>0" + + { fix x assume "x\s" hence "x \ ball x (d x (e / 2))" unfolding centre_in_ball using d[THEN spec[where x="e/2"]] using `e>0` by auto } + hence "s \ \{ball x (d x (e / 2)) |x. x \ s}" unfolding subset_eq by auto + moreover + { fix b assume "b\{ball x (d x (e / 2)) |x. x \ s}" hence "open b" by auto } + ultimately obtain ea where "ea>0" and ea:"\x\s. \b\{ball x (d x (e / 2)) |x. x \ s}. ball x ea \ b" using heine_borel_lemma[OF assms(2), of "{ball x (d x (e / 2)) | x. x\s }"] by auto + + { fix x y assume "x\s" "y\s" and as:"dist y x < ea" + obtain z where "z\s" and z:"ball x ea \ ball z (d z (e / 2))" using ea[THEN bspec[where x=x]] and `x\s` by auto + hence "x\ball z (d z (e / 2))" using `ea>0` unfolding subset_eq by auto + hence "dist (f z) (f x) < e / 2" using d[THEN spec[where x="e/2"]] and `e>0` and `x\s` and `z\s` + by (auto simp add: dist_sym) + moreover have "y\ball z (d z (e / 2))" using as and `ea>0` and z[unfolded subset_eq] + by (auto simp add: dist_sym) + hence "dist (f z) (f y) < e / 2" using d[THEN spec[where x="e/2"]] and `e>0` and `y\s` and `z\s` + by (auto simp add: dist_sym) + ultimately have "dist (f y) (f x) < e" using dist_triangle_half_r[of "f z" "f x" e "f y"] + by (auto simp add: dist_sym) } + then have "\d>0. \x\s. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using `ea>0` by auto } + thus ?thesis unfolding uniformly_continuous_on_def by auto +qed + +text{* Continuity of inverse function on compact domain. *} + +lemma continuous_on_inverse: + assumes "continuous_on s f" "compact s" "\x \ s. g (f x) = x" + shows "continuous_on (f ` s) g" +proof- + have *:"g ` f ` s = s" using assms(3) by (auto simp add: image_iff) + { fix t assume t:"closedin (subtopology euclidean (g ` f ` s)) t" + then obtain T where T: "closed T" "t = s \ T" unfolding closedin_closed unfolding * by auto + have "continuous_on (s \ T) f" using continuous_on_subset[OF assms(1), of "s \ t"] + unfolding T(2) and Int_left_absorb by auto + moreover have "compact (s \ T)" + using assms(2) unfolding compact_eq_bounded_closed + using bounded_subset[of s "s \ T"] and T(1) by auto + ultimately have "closed (f ` t)" using T(1) unfolding T(2) + using compact_continuous_image unfolding compact_eq_bounded_closed by auto + moreover have "{x \ f ` s. g x \ t} = f ` s \ f ` t" using assms(3) unfolding T(2) by auto + ultimately have "closedin (subtopology euclidean (f ` s)) {x \ f ` s. g x \ t}" + unfolding closedin_closed by auto } + thus ?thesis unfolding continuous_on_closed by auto +qed + +subsection{* A uniformly convergent limit of continuous functions is continuous. *} + +lemma continuous_uniform_limit: + assumes "\ (trivial_limit net)" "eventually (\n. continuous_on s (f n)) net" + "\e>0. eventually (\n. \x \ s. norm(f n x - g x) < e) net" + shows "continuous_on s g" +proof- + { fix x and e::real assume "x\s" "e>0" + have "eventually (\n. \x\s. norm (f n x - g x) < e / 3) net" using `e>0` assms(3)[THEN spec[where x="e/3"]] by auto + then obtain n where n:"\xa\s. norm (f n xa - g xa) < e / 3" "continuous_on s (f n)" + using eventually_and[of "(\n. \x\s. norm (f n x - g x) < e / 3)" "(\n. continuous_on s (f n))" net] assms(1,2) eventually_happens by blast + have "e / 3 > 0" using `e>0` by auto + then obtain d where "d>0" and d:"\x'\s. dist x' x < d \ dist (f n x') (f n x) < e / 3" + using n(2)[unfolded continuous_on_def, THEN bspec[where x=x], OF `x\s`, THEN spec[where x="e/3"]] by blast + { fix y assume "y\s" "dist y x < d" + hence "dist (f n y) (f n x) < e / 3" using d[THEN bspec[where x=y]] by auto + hence "norm (f n y - g x) < 2 * e / 3" using norm_triangle_lt[of "f n y - f n x" "f n x - g x" "2*e/3"] + using n(1)[THEN bspec[where x=x], OF `x\s`] unfolding dist_def unfolding ab_group_add_class.ab_diff_minus by auto + hence "dist (g y) (g x) < e" unfolding dist_def using n(1)[THEN bspec[where x=y], OF `y\s`] + unfolding norm_minus_cancel[of "f n y - g y", THEN sym] using norm_triangle_lt[of "f n y - g x" "g y - f n y" e] by (auto simp add: uminus_add_conv_diff) } + hence "\d>0. \x'\s. dist x' x < d \ dist (g x') (g x) < e" using `d>0` by auto } + thus ?thesis unfolding continuous_on_def by auto +qed + +subsection{* Topological properties of linear functions. *} + +lemma linear_lim_0: fixes f::"real^'a \ real^'b" + assumes "linear f" shows "(f ---> 0) (at (0))" +proof- + obtain B where "B>0" and B:"\x. norm (f x) \ B * norm x" using linear_bounded_pos[OF assms] by auto + { fix e::real assume "e>0" + { fix x::"real^'a" assume "norm x < e / B" + hence "B * norm x < e" using `B>0` using mult_strict_right_mono[of "norm x" " e / B" B] unfolding real_mult_commute by auto + hence "norm (f x) < e" using B[THEN spec[where x=x]] `B>0` using order_le_less_trans[of "norm (f x)" "B * norm x" e] by auto } + moreover have "e / B > 0" using `e>0` `B>0` divide_pos_pos by auto + ultimately have "\d>0. \x. 0 < dist x 0 \ dist x 0 < d \ dist (f x) 0 < e" unfolding dist_def by auto } + thus ?thesis unfolding Lim_at by auto +qed + +lemma linear_continuous_at: + assumes "linear f" shows "continuous (at a) f" + unfolding continuous_at Lim_at_zero[of f "f a" a] using linear_lim_0[OF assms] + unfolding Lim_null[of "\x. f (a + x)"] unfolding linear_sub[OF assms, THEN sym] by auto + +lemma linear_continuous_within: + "linear f ==> continuous (at x within s) f" + using continuous_at_imp_continuous_within[of x f s] using linear_continuous_at[of f] by auto + +lemma linear_continuous_on: + "linear f ==> continuous_on s f" + using continuous_at_imp_continuous_on[of s f] using linear_continuous_at[of f] by auto + +text{* Also bilinear functions, in composition form. *} + +lemma bilinear_continuous_at_compose: + "continuous (at x) f \ continuous (at x) g \ bilinear h + ==> continuous (at x) (\x. h (f x) (g x))" + unfolding continuous_at using Lim_bilinear[of f "f x" "(at x)" g "g x" h] by auto + +lemma bilinear_continuous_within_compose: + "continuous (at x within s) f \ continuous (at x within s) g \ bilinear h + ==> continuous (at x within s) (\x. h (f x) (g x))" + unfolding continuous_within using Lim_bilinear[of f "f x"] by auto + +lemma bilinear_continuous_on_compose: + "continuous_on s f \ continuous_on s g \ bilinear h + ==> continuous_on s (\x. h (f x) (g x))" + unfolding continuous_on_eq_continuous_within apply auto apply(erule_tac x=x in ballE) apply auto apply(erule_tac x=x in ballE) apply auto + using bilinear_continuous_within_compose[of _ s f g h] by auto + +subsection{* Topological stuff lifted from and dropped to R *} + + +lemma open_vec1: + "open(vec1 ` s) \ + (\x \ s. \e>0. \x'. abs(x' - x) < e --> x' \ s)" (is "?lhs = ?rhs") + unfolding open_def apply simp unfolding forall_vec1 dist_vec1 vec1_in_image_vec1 by simp + +lemma islimpt_approachable_vec1: + "(vec1 x) islimpt (vec1 ` s) \ + (\e>0. \x'\ s. x' \ x \ abs(x' - x) < e)" + by (auto simp add: islimpt_approachable dist_vec1 vec1_eq) + +lemma closed_vec1: + "closed (vec1 ` s) \ + (\x. (\e>0. \x' \ s. x' \ x \ abs(x' - x) < e) + --> x \ s)" + unfolding closed_limpt islimpt_approachable forall_vec1 apply simp + unfolding dist_vec1 vec1_in_image_vec1 abs_minus_commute by auto + +lemma continuous_at_vec1_range: + "continuous (at x) (vec1 o f) \ (\e>0. \d>0. + \x'. norm(x' - x) < d --> abs(f x' - f x) < e)" + unfolding continuous_at unfolding Lim_at apply simp unfolding dist_vec1 unfolding dist_nz[THEN sym] unfolding dist_def apply auto + apply(erule_tac x=e in allE) apply auto apply (rule_tac x=d in exI) apply auto apply (erule_tac x=x' in allE) apply auto + apply(erule_tac x=e in allE) by auto + +lemma continuous_on_vec1_range: + " continuous_on s (vec1 o f) \ (\x \ s. \e>0. \d>0. (\x' \ s. norm(x' - x) < d --> abs(f x' - f x) < e))" + unfolding continuous_on_def apply (simp del: dist_sym) unfolding dist_vec1 unfolding dist_def .. + +lemma continuous_at_vec1_norm: + "\x. continuous (at x) (vec1 o norm)" + unfolding continuous_at_vec1_range using real_abs_sub_norm order_le_less_trans by blast + +lemma continuous_on_vec1_norm: + "\s. continuous_on s (vec1 o norm)" +unfolding continuous_on_vec1_range norm_vec1[THEN sym] by (metis norm_vec1 order_le_less_trans real_abs_sub_norm) + +lemma continuous_at_vec1_component: + assumes "1 \ i" "i \ dimindex(UNIV::('a set))" + shows "continuous (at (a::real^'a)) (\ x. vec1(x$i))" +proof- + { fix e::real and x assume "0 < dist x a" "dist x a < e" "e>0" + hence "\x $ i - a $ i\ < e" using component_le_norm[of i "x - a"] vector_minus_component[of i x a] assms unfolding dist_def by auto } + thus ?thesis unfolding continuous_at tendsto_def eventually_at dist_vec1 by auto +qed + +lemma continuous_on_vec1_component: + assumes "i \ {1..dimindex (UNIV::'a set)}" shows "continuous_on s (\ x::real^'a. vec1(x$i))" +proof- + { fix e::real and x xa assume "x\s" "e>0" "xa\s" "0 < norm (xa - x) \ norm (xa - x) < e" + hence "\xa $ i - x $ i\ < e" using component_le_norm[of i "xa - x"] vector_minus_component[of i xa x] assms by auto } + thus ?thesis unfolding continuous_on Lim_within dist_vec1 unfolding dist_def by auto +qed + +lemma continuous_at_vec1_infnorm: + "continuous (at x) (vec1 o infnorm)" + unfolding continuous_at Lim_at o_def unfolding dist_vec1 unfolding dist_def + apply auto apply (rule_tac x=e in exI) apply auto + using order_trans[OF real_abs_sub_infnorm infnorm_le_norm, of _ x] by (metis xt1(7)) + +text{* Hence some handy theorems on distance, diameter etc. of/from a set. *} + +lemma compact_attains_sup: + assumes "compact (vec1 ` s)" "s \ {}" + shows "\x \ s. \y \ s. y \ x" +proof- + from assms(1) have a:"bounded (vec1 ` s)" "closed (vec1 ` s)" unfolding compact_eq_bounded_closed by auto + { fix e::real assume as: "\x\s. x \ rsup s" "rsup s \ s" "0 < e" "\x'\s. x' = rsup s \ \ rsup s - x' < e" + have "isLub UNIV s (rsup s)" using rsup[OF assms(2)] unfolding setle_def using as(1) by auto + moreover have "isUb UNIV s (rsup s - e)" unfolding isUb_def unfolding setle_def using as(4,2) by auto + ultimately have False using isLub_le_isUb[of UNIV s "rsup s" "rsup s - e"] using `e>0` by auto } + thus ?thesis using bounded_has_rsup(1)[OF a(1) assms(2)] using a(2)[unfolded closed_vec1, THEN spec[where x="rsup s"]] + apply(rule_tac x="rsup s" in bexI) by auto +qed + +lemma compact_attains_inf: + assumes "compact (vec1 ` s)" "s \ {}" shows "\x \ s. \y \ s. x \ y" +proof- + from assms(1) have a:"bounded (vec1 ` s)" "closed (vec1 ` s)" unfolding compact_eq_bounded_closed by auto + { fix e::real assume as: "\x\s. x \ rinf s" "rinf s \ s" "0 < e" + "\x'\s. x' = rinf s \ \ abs (x' - rinf s) < e" + have "isGlb UNIV s (rinf s)" using rinf[OF assms(2)] unfolding setge_def using as(1) by auto + moreover + { fix x assume "x \ s" + hence *:"abs (x - rinf s) = x - rinf s" using as(1)[THEN bspec[where x=x]] by auto + have "rinf s + e \ x" using as(4)[THEN bspec[where x=x]] using as(2) `x\s` unfolding * by auto } + hence "isLb UNIV s (rinf s + e)" unfolding isLb_def and setge_def by auto + ultimately have False using isGlb_le_isLb[of UNIV s "rinf s" "rinf s + e"] using `e>0` by auto } + thus ?thesis using bounded_has_rinf(1)[OF a(1) assms(2)] using a(2)[unfolded closed_vec1, THEN spec[where x="rinf s"]] + apply(rule_tac x="rinf s" in bexI) by auto +qed + +lemma continuous_attains_sup: + "compact s \ s \ {} \ continuous_on s (vec1 o f) + ==> (\x \ s. \y \ s. f y \ f x)" + using compact_attains_sup[of "f ` s"] + using compact_continuous_image[of s "vec1 \ f"] unfolding image_compose by auto + +lemma continuous_attains_inf: + "compact s \ s \ {} \ continuous_on s (vec1 o f) + ==> (\x \ s. \y \ s. f x \ f y)" + using compact_attains_inf[of "f ` s"] + using compact_continuous_image[of s "vec1 \ f"] unfolding image_compose by auto + +lemma distance_attains_sup: + assumes "compact s" "s \ {}" + shows "\x \ s. \y \ s. dist a y \ dist a x" +proof- + { fix x assume "x\s" fix e::real assume "e>0" + { fix x' assume "x'\s" and as:"norm (x' - x) < e" + hence "\norm (x' - a) - norm (x - a)\ < e" + using real_abs_sub_norm[of "x' - a" "x - a"] by auto } + hence "\d>0. \x'\s. norm (x' - x) < d \ \dist x' a - dist x a\ < e" using `e>0` unfolding dist_def by auto } + thus ?thesis using assms + using continuous_attains_sup[of s "\x. dist a x"] + unfolding continuous_on_vec1_range by (auto simp add: dist_sym) +qed + +text{* For *minimal* distance, we only need closure, not compactness. *} + +lemma distance_attains_inf: + assumes "closed s" "s \ {}" + shows "\x \ s. \y \ s. dist a x \ dist a y" +proof- + from assms(2) obtain b where "b\s" by auto + let ?B = "cball a (dist b a) \ s" + have "b \ ?B" using `b\s` by (simp add: dist_sym) + hence "?B \ {}" by auto + moreover + { fix x assume "x\?B" + fix e::real assume "e>0" + { fix x' assume "x'\?B" and as:"norm (x' - x) < e" + hence "\norm (x' - a) - norm (x - a)\ < e" + using real_abs_sub_norm[of "x' - a" "x - a"] by auto } + hence "\d>0. \x'\?B. norm (x' - x) < d \ \dist x' a - dist x a\ < e" using `e>0` unfolding dist_def by auto } + hence "continuous_on (cball a (dist b a) \ s) (vec1 \ dist a)" unfolding continuous_on_vec1_range + by (auto simp add: dist_sym) + moreover have "compact ?B" using compact_cball[of a "dist b a"] unfolding compact_eq_bounded_closed using bounded_Int and closed_Int and assms(1) by auto + ultimately obtain x where "x\cball a (dist b a) \ s" "\y\cball a (dist b a) \ s. dist a x \ dist a y" using continuous_attains_inf[of ?B "dist a"] by fastsimp + thus ?thesis by fastsimp +qed + +subsection{* We can now extend limit compositions to consider the scalar multiplier. *} + +lemma Lim_mul: + assumes "((vec1 o c) ---> vec1 d) net" "(f ---> l) net" + shows "((\x. c(x) *s f x) ---> (d *s l)) net" +proof- + have "bilinear (\x. op *s (dest_vec1 (x::real^1)))" unfolding bilinear_def linear_def + unfolding dest_vec1_add dest_vec1_cmul + apply vector apply auto unfolding semiring_class.right_distrib semiring_class.left_distrib by auto + thus ?thesis using Lim_bilinear[OF assms, of "\x y. (dest_vec1 x) *s y"] by auto +qed + +lemma Lim_vmul: + "((vec1 o c) ---> vec1 d) net ==> ((\x. c(x) *s v) ---> d *s v) net" + using Lim_mul[of c d net "\x. v" v] using Lim_const[of v] by auto + +lemma continuous_vmul: + "continuous net (vec1 o c) ==> continuous net (\x. c(x) *s v)" + unfolding continuous_def using Lim_vmul[of c] by auto + +lemma continuous_mul: + "continuous net (vec1 o c) \ continuous net f + ==> continuous net (\x. c(x) *s f x) " + unfolding continuous_def using Lim_mul[of c] by auto + +lemma continuous_on_vmul: + "continuous_on s (vec1 o c) ==> continuous_on s (\x. c(x) *s v)" + unfolding continuous_on_eq_continuous_within using continuous_vmul[of _ c] by auto + +lemma continuous_on_mul: + "continuous_on s (vec1 o c) \ continuous_on s f + ==> continuous_on s (\x. c(x) *s f x)" + unfolding continuous_on_eq_continuous_within using continuous_mul[of _ c] by auto + +text{* And so we have continuity of inverse. *} + +lemma Lim_inv: + assumes "((vec1 o f) ---> vec1 l) (net::'a net)" "l \ 0" + shows "((vec1 o inverse o f) ---> vec1(inverse l)) net" +proof(cases "trivial_limit net") + case True thus ?thesis unfolding tendsto_def unfolding eventually_def by auto +next + case False note ntriv = this + { fix e::real assume "e>0" + hence "0 < min (\l\ / 2) (l\ * e / 2)" using `l\0` mult_pos_pos[of "l^2" "e/2"] by auto + then obtain y where y1:"\x. netord net x y" and + y:"\x. netord net x y \ dist ((vec1 \ f) x) (vec1 l) < min (\l\ / 2) (l\ * e / 2)" using ntriv + using assms(1)[unfolded tendsto_def eventually_def, THEN spec[where x="min (abs l / 2) (l ^ 2 * e / 2)"]] by auto + { fix x assume "netord net x y" + hence *:"\f x - l\ < min (\l\ / 2) (l\ * e / 2)" using y[THEN spec[where x=x]] unfolding o_def dist_vec1 by auto + hence fx0:"f x \ 0" using `l \ 0` by auto + hence fxl0: "(f x) * l \ 0" using `l \ 0` by auto + from * have **:"\f x - l\ < l\ * e / 2" by auto + have "\f x\ * 2 \ \l\" using * by (auto simp del: Arith_Tools.less_divide_eq_number_of1) + hence "\f x\ * 2 * \l\ \ \l\ * \l\" unfolding mult_le_cancel_right by auto + hence "\f x * l\ * 2 \ \l\^2" unfolding real_mult_commute and power2_eq_square by auto + hence ***:"inverse \f x * l\ \ inverse (l\ / 2)" using fxl0 + using le_imp_inverse_le[of "l^2 / 2" "\f x * l\"] by auto + + have "dist ((vec1 \ inverse \ f) x) (vec1 (inverse l)) < e" unfolding o_def unfolding dist_vec1 + unfolding inverse_diff_inverse[OF fx0 `l\0`] apply simp + unfolding mult_commute[of "inverse (f x)"] + unfolding real_divide_def[THEN sym] + unfolding divide_divide_eq_left + unfolding nonzero_abs_divide[OF fxl0] + using mult_less_le_imp_less[OF **, of "inverse \f x * l\", of "inverse (l^2 / 2)"] using *** using fx0 `l\0` + unfolding inverse_eq_divide using `e>0` by auto } + hence "(\y. (\x. netord net x y) \ (\x. netord net x y \ dist ((vec1 \ inverse \ f) x) (vec1 (inverse l)) < e))" + using y1 by auto } + thus ?thesis unfolding tendsto_def eventually_def by auto +qed + +lemma continuous_inv: + "continuous net (vec1 o f) \ f(netlimit net) \ 0 + ==> continuous net (vec1 o inverse o f)" + unfolding continuous_def using Lim_inv by auto + +lemma continuous_at_within_inv: + assumes "continuous (at a within s) (vec1 o f)" "f a \ 0" + shows "continuous (at a within s) (vec1 o inverse o f)" +proof(cases "trivial_limit (at a within s)") + case True thus ?thesis unfolding continuous_def tendsto_def eventually_def by auto +next + case False note cs = this + thus ?thesis using netlimit_within[OF cs] assms(2) continuous_inv[OF assms(1)] by auto +qed + +lemma continuous_at_inv: + "continuous (at a) (vec1 o f) \ f a \ 0 + ==> continuous (at a) (vec1 o inverse o f) " + using within_UNIV[THEN sym, of a] using continuous_at_within_inv[of a UNIV] by auto + +subsection{* Preservation properties for pasted sets. *} + +lemma bounded_pastecart: + assumes "bounded s" "bounded t" + shows "bounded { pastecart x y | x y . (x \ s \ y \ t)}" +proof- + obtain a b where ab:"\x\s. norm x \ a" "\x\t. norm x \ b" using assms[unfolded bounded_def] by auto + { fix x y assume "x\s" "y\t" + hence "norm x \ a" "norm y \ b" using ab by auto + hence "norm (pastecart x y) \ a + b" using norm_pastecart[of x y] by auto } + thus ?thesis unfolding bounded_def by auto +qed + +lemma closed_pastecart: + assumes "closed s" "closed t" + shows "closed {pastecart x y | x y . x \ s \ y \ t}" +proof- + { fix x l assume as:"\n::nat. x n \ {pastecart x y |x y. x \ s \ y \ t}" "(x ---> l) sequentially" + { fix n::nat have "fstcart (x n) \ s" "sndcart (x n) \ t" using as(1)[THEN spec[where x=n]] by auto } note * = this + moreover + { fix e::real assume "e>0" + then obtain N::nat where N:"\n\N. dist (x n) l < e" using as(2)[unfolded Lim_sequentially, THEN spec[where x=e]] by auto + { fix n::nat assume "n\N" + hence "dist (fstcart (x n)) (fstcart l) < e" "dist (sndcart (x n)) (sndcart l) < e" + using N[THEN spec[where x=n]] dist_fstcart[of "x n" l] dist_sndcart[of "x n" l] by auto } + hence "\N. \n\N. dist (fstcart (x n)) (fstcart l) < e" "\N. \n\N. dist (sndcart (x n)) (sndcart l) < e" by auto } + ultimately have "fstcart l \ s" "sndcart l \ t" + using assms(1)[unfolded closed_sequential_limits, THEN spec[where x="\n. fstcart (x n)"], THEN spec[where x="fstcart l"]] + using assms(2)[unfolded closed_sequential_limits, THEN spec[where x="\n. sndcart (x n)"], THEN spec[where x="sndcart l"]] + unfolding Lim_sequentially by auto + hence "l \ {pastecart x y |x y. x \ s \ y \ t}" using pastecart_fst_snd[THEN sym, of l] by auto } + thus ?thesis unfolding closed_sequential_limits by auto +qed + +lemma compact_pastecart: + "compact s \ compact t ==> compact {pastecart x y | x y . x \ s \ y \ t}" + unfolding compact_eq_bounded_closed using bounded_pastecart[of s t] closed_pastecart[of s t] by auto + +text{* Hence some useful properties follow quite easily. *} + +lemma compact_scaling: + assumes "compact s" shows "compact ((\x. c *s x) ` s)" +proof- + let ?f = "\x. c *s x" + have *:"linear ?f" unfolding linear_def vector_smult_assoc vector_add_ldistrib real_mult_commute by auto + show ?thesis using compact_continuous_image[of s ?f] continuous_at_imp_continuous_on[of s ?f] + using linear_continuous_at[OF *] assms by auto +qed + +lemma compact_negations: + assumes "compact s" shows "compact ((\x. -x) ` s)" +proof- + have "uminus ` s = (\x. -1 *s x) ` s" apply auto unfolding image_iff pth_3 by auto + thus ?thesis using compact_scaling[OF assms, of "-1"] by auto +qed + +lemma compact_sums: + assumes "compact s" "compact t" shows "compact {x + y | x y. x \ s \ y \ t}" +proof- + have *:"{x + y | x y. x \ s \ y \ t} =(\z. fstcart z + sndcart z) ` {pastecart x y | x y. x \ s \ y \ t}" + apply auto unfolding image_iff apply(rule_tac x="pastecart xa y" in bexI) unfolding fstcart_pastecart sndcart_pastecart by auto + have "linear (\z::real^('a, 'a) finite_sum. fstcart z + sndcart z)" unfolding linear_def + unfolding fstcart_add sndcart_add apply auto + unfolding vector_add_ldistrib fstcart_cmul[THEN sym] sndcart_cmul[THEN sym] by auto + hence "continuous_on {pastecart x y |x y. x \ s \ y \ t} (\z. fstcart z + sndcart z)" + using continuous_at_imp_continuous_on linear_continuous_at by auto + thus ?thesis unfolding * using compact_continuous_image compact_pastecart[OF assms] by auto +qed + +lemma compact_differences: + assumes "compact s" "compact t" shows "compact {x - y | x y. x \ s \ y \ t}" +proof- + have "{x - y | x y::real^'a. x\s \ y \ t} = {x + y | x y. x \ s \ y \ (uminus ` t)}" + apply auto apply(rule_tac x= xa in exI) apply auto apply(rule_tac x=xa in exI) by auto + thus ?thesis using compact_sums[OF assms(1) compact_negations[OF assms(2)]] by auto +qed + +lemma compact_translation: + assumes "compact s" shows "compact ((\x. a + x) ` s)" +proof- + have "{x + y |x y. x \ s \ y \ {a}} = (\x. a + x) ` s" by auto + thus ?thesis using compact_sums[OF assms compact_sing[of a]] by auto +qed + +lemma compact_affinity: + assumes "compact s" shows "compact ((\x. a + c *s x) ` s)" +proof- + have "op + a ` op *s c ` s = (\x. a + c *s x) ` s" by auto + thus ?thesis using compact_translation[OF compact_scaling[OF assms], of a c] by auto +qed + +text{* Hence we get the following. *} + +lemma compact_sup_maxdistance: + assumes "compact s" "s \ {}" + shows "\x\s. \y\s. \u\s. \v\s. norm(u - v) \ norm(x - y)" +proof- + have "{x - y | x y . x\s \ y\s} \ {}" using `s \ {}` by auto + then obtain x where x:"x\{x - y |x y. x \ s \ y \ s}" "\y\{x - y |x y. x \ s \ y \ s}. norm y \ norm x" + using compact_differences[OF assms(1) assms(1)] + using distance_attains_sup[unfolded dist_def, of "{x - y | x y . x\s \ y\s}" 0] by(auto simp add: norm_minus_cancel) + from x(1) obtain a b where "a\s" "b\s" "x = a - b" by auto + thus ?thesis using x(2)[unfolded `x = a - b`] by blast +qed + +text{* We can state this in terms of diameter of a set. *} + +definition "diameter s = (if s = {} then 0::real else rsup {norm(x - y) | x y. x \ s \ y \ s})" + +lemma diameter_bounded: + assumes "bounded s" + shows "\x\s. \y\s. norm(x - y) \ diameter s" + "\d>0. d < diameter s --> (\x\s. \y\s. norm(x - y) > d)" +proof- + let ?D = "{norm (x - y) |x y. x \ s \ y \ s}" + obtain a where a:"\x\s. norm x \ a" using assms[unfolded bounded_def] by auto + { fix x y assume "x \ s" "y \ s" + hence "norm (x - y) \ 2 * a" using norm_triangle_ineq[of x "-y", unfolded norm_minus_cancel] a[THEN bspec[where x=x]] a[THEN bspec[where x=y]] by (auto simp add: ring_simps) } + note * = this + { fix x y assume "x\s" "y\s" hence "s \ {}" by auto + have lub:"isLub UNIV ?D (rsup ?D)" using * rsup[of ?D] using `s\{}` unfolding setle_def by auto + have "norm(x - y) \ diameter s" unfolding diameter_def using `s\{}` *[OF `x\s` `y\s`] `x\s` `y\s` isLubD1[OF lub] unfolding setle_def by auto } + moreover + { fix d::real assume "d>0" "d < diameter s" + hence "s\{}" unfolding diameter_def by auto + hence lub:"isLub UNIV ?D (rsup ?D)" using * rsup[of ?D] unfolding setle_def by auto + have "\d' \ ?D. d' > d" + proof(rule ccontr) + assume "\ (\d'\{norm (x - y) |x y. x \ s \ y \ s}. d < d')" + hence as:"\d'\?D. d' \ d" apply auto apply(erule_tac x="norm (x - y)" in allE) by auto + hence "isUb UNIV ?D d" unfolding isUb_def unfolding setle_def by auto + thus False using `d < diameter s` `s\{}` isLub_le_isUb[OF lub, of d] unfolding diameter_def by auto + qed + hence "\x\s. \y\s. norm(x - y) > d" by auto } + ultimately show "\x\s. \y\s. norm(x - y) \ diameter s" + "\d>0. d < diameter s --> (\x\s. \y\s. norm(x - y) > d)" by auto +qed + +lemma diameter_bounded_bound: + "bounded s \ x \ s \ y \ s ==> norm(x - y) \ diameter s" + using diameter_bounded by blast + +lemma diameter_compact_attained: + assumes "compact s" "s \ {}" + shows "\x\s. \y\s. (norm(x - y) = diameter s)" +proof- + have b:"bounded s" using assms(1) compact_eq_bounded_closed by auto + then obtain x y where xys:"x\s" "y\s" and xy:"\u\s. \v\s. norm (u - v) \ norm (x - y)" using compact_sup_maxdistance[OF assms] by auto + hence "diameter s \ norm (x - y)" using rsup_le[of "{norm (x - y) |x y. x \ s \ y \ s}" "norm (x - y)"] + unfolding setle_def and diameter_def by auto + thus ?thesis using diameter_bounded(1)[OF b, THEN bspec[where x=x], THEN bspec[where x=y], OF xys] and xys by auto +qed + +text{* Related results with closure as the conclusion. *} + +lemma closed_scaling: + assumes "closed s" shows "closed ((\x. c *s x) ` s)" +proof(cases "s={}") + case True thus ?thesis by auto +next + case False + show ?thesis + proof(cases "c=0") + have *:"(\x. 0) ` s = {0}" using `s\{}` by auto + case True thus ?thesis apply auto unfolding * using closed_sing by auto + next + case False + { fix x l assume as:"\n::nat. x n \ op *s c ` s" "(x ---> l) sequentially" + { fix n::nat have "(1 / c) *s x n \ s" using as(1)[THEN spec[where x=n]] using `c\0` by (auto simp add: vector_smult_assoc) } + moreover + { fix e::real assume "e>0" + hence "0 < e *\c\" using `c\0` mult_pos_pos[of e "abs c"] by auto + then obtain N where "\n\N. dist (x n) l < e * \c\" using as(2)[unfolded Lim_sequentially, THEN spec[where x="e * abs c"]] by auto + hence "\N. \n\N. dist ((1 / c) *s x n) ((1 / c) *s l) < e" unfolding dist_def unfolding vector_ssub_ldistrib[THEN sym] norm_mul + using mult_imp_div_pos_less[of "abs c" _ e] `c\0` by auto } + hence "((\n. (1 / c) *s x n) ---> (1 / c) *s l) sequentially" unfolding Lim_sequentially by auto + ultimately have "l \ op *s c ` s" using assms[unfolded closed_sequential_limits, THEN spec[where x="\n. (1/c) *s x n"], THEN spec[where x="(1/c) *s l"]] + unfolding image_iff using `c\0` apply(rule_tac x="(1 / c) *s l" in bexI) apply auto unfolding vector_smult_assoc by auto } + thus ?thesis unfolding closed_sequential_limits by auto + qed +qed + +lemma closed_negations: + assumes "closed s" shows "closed ((\x. -x) ` s)" + using closed_scaling[OF assms, of "-1"] unfolding pth_3 by auto + +lemma compact_closed_sums: + assumes "compact s" "closed t" shows "closed {x + y | x y. x \ s \ y \ t}" +proof- + let ?S = "{x + y |x y. x \ s \ y \ t}" + { fix x l assume as:"\n. x n \ ?S" "(x ---> l) sequentially" + from as(1) obtain f where f:"\n. x n = fst (f n) + snd (f n)" "\n. fst (f n) \ s" "\n. snd (f n) \ t" + using choice[of "\n y. x n = (fst y) + (snd y) \ fst y \ s \ snd y \ t"] by auto + obtain l' r where "l'\s" and r:"\m n. m < n \ r m < r n" and lr:"(((\n. fst (f n)) \ r) ---> l') sequentially" + using assms(1)[unfolded compact_def, THEN spec[where x="\ n. fst (f n)"]] using f(2) by auto + have "((\n. snd (f (r n))) ---> l - l') sequentially" + using Lim_sub[OF lim_subsequence[OF r as(2)] lr] and f(1) unfolding o_def by auto + hence "l - l' \ t" + using assms(2)[unfolded closed_sequential_limits, THEN spec[where x="\ n. snd (f (r n))"], THEN spec[where x="l - l'"]] + using f(3) by auto + hence "l \ ?S" using `l' \ s` apply auto apply(rule_tac x=l' in exI) apply(rule_tac x="l - l'" in exI) by auto + } + thus ?thesis unfolding closed_sequential_limits by auto +qed + +lemma closed_compact_sums: + assumes "closed s" "compact t" + shows "closed {x + y | x y. x \ s \ y \ t}" +proof- + have "{x + y |x y. x \ t \ y \ s} = {x + y |x y. x \ s \ y \ t}" apply auto + apply(rule_tac x=y in exI) apply auto apply(rule_tac x=y in exI) by auto + thus ?thesis using compact_closed_sums[OF assms(2,1)] by simp +qed + +lemma compact_closed_differences: + assumes "compact s" "closed t" + shows "closed {x - y | x y. x \ s \ y \ t}" +proof- + have "{x + y |x y. x \ s \ y \ uminus ` t} = {x - y |x y. x \ s \ y \ t}" + apply auto apply(rule_tac x=xa in exI) apply auto apply(rule_tac x=xa in exI) by auto + thus ?thesis using compact_closed_sums[OF assms(1) closed_negations[OF assms(2)]] by auto +qed + +lemma closed_compact_differences: + assumes "closed s" "compact t" + shows "closed {x - y | x y. x \ s \ y \ t}" +proof- + have "{x + y |x y. x \ s \ y \ uminus ` t} = {x - y |x y. x \ s \ y \ t}" + apply auto apply(rule_tac x=xa in exI) apply auto apply(rule_tac x=xa in exI) by auto + thus ?thesis using closed_compact_sums[OF assms(1) compact_negations[OF assms(2)]] by simp +qed + +lemma closed_translation: + assumes "closed s" shows "closed ((\x. a + x) ` s)" +proof- + have "{a + y |y. y \ s} = (op + a ` s)" by auto + thus ?thesis using compact_closed_sums[OF compact_sing[of a] assms] by auto +qed + +lemma translation_UNIV: + "range (\x::real^'a. a + x) = UNIV" + apply (auto simp add: image_iff) apply(rule_tac x="x - a" in exI) by auto + +lemma translation_diff: "(\x::real^'a. a + x) ` (s - t) = ((\x. a + x) ` s) - ((\x. a + x) ` t)" by auto + +lemma closure_translation: + "closure ((\x. a + x) ` s) = (\x. a + x) ` (closure s)" +proof- + have *:"op + a ` (UNIV - s) = UNIV - op + a ` s" apply auto unfolding image_iff apply(rule_tac x="x - a" in bexI) by auto + show ?thesis unfolding closure_interior translation_diff translation_UNIV using interior_translation[of a "UNIV - s"] unfolding * by auto +qed + +lemma frontier_translation: + "frontier((\x. a + x) ` s) = (\x. a + x) ` (frontier s)" + unfolding frontier_def translation_diff interior_translation closure_translation by auto + +subsection{* Separation between points and sets. *} + +lemma separate_point_closed: + "closed s \ a \ s ==> (\d>0. \x\s. d \ dist a x)" +proof(cases "s = {}") + case True + thus ?thesis by(auto intro!: exI[where x=1]) +next + case False + assume "closed s" "a \ s" + then obtain x where "x\s" "\y\s. dist a x \ dist a y" using `s \ {}` distance_attains_inf [of s a] by blast + with `x\s` show ?thesis using dist_pos_lt[of a x] and`a \ s` by blast +qed + +lemma separate_compact_closed: + assumes "compact s" and "closed t" and "s \ t = {}" + shows "\d>0. \x\s. \y\t. d \ dist x y" +proof- + have "0 \ {x - y |x y. x \ s \ y \ t}" using assms(3) by auto + then obtain d where "d>0" and d:"\x\{x - y |x y. x \ s \ y \ t}. d \ dist 0 x" + using separate_point_closed[OF compact_closed_differences[OF assms(1,2)], of 0] by auto + { fix x y assume "x\s" "y\t" + hence "x - y \ {x - y |x y. x \ s \ y \ t}" by auto + hence "d \ dist (x - y) 0" using d[THEN bspec[where x="x - y"]] using dist_sym + by (auto simp add: dist_sym) + hence "d \ dist x y" unfolding dist_def by auto } + thus ?thesis using `d>0` by auto +qed + +lemma separate_closed_compact: + assumes "closed s" and "compact t" and "s \ t = {}" + shows "\d>0. \x\s. \y\t. d \ dist x y" +proof- + have *:"t \ s = {}" using assms(3) by auto + show ?thesis using separate_compact_closed[OF assms(2,1) *] + apply auto apply(rule_tac x=d in exI) apply auto apply (erule_tac x=y in ballE) + by (auto simp add: dist_sym) +qed + +(* A cute way of denoting open and closed intervals using overloading. *) + +lemma interval: fixes a :: "'a::ord^'n" shows + "{a <..< b} = {x::'a^'n. \i \ dimset a. a$i < x$i \ x$i < b$i}" and + "{a .. b} = {x::'a^'n. \i \ dimset a. a$i \ x$i \ x$i \ b$i}" + by (auto simp add: expand_set_eq vector_less_def vector_less_eq_def) + +lemma mem_interval: + "x \ {a<.. (\i \ dimset a. a$i < x$i \ x$i < b$i)" + "x \ {a .. b} \ (\i \ dimset a. a$i \ x$i \ x$i \ b$i)" + using interval[of a b] + by(auto simp add: expand_set_eq vector_less_def vector_less_eq_def) + +lemma interval_eq_empty: fixes a :: "real^'n" shows + "({a <..< b} = {} \ (\i \ dimset a. b$i \ a$i))" (is ?th1) and + "({a .. b} = {} \ (\i \ dimset a. b$i < a$i))" (is ?th2) +proof- + { fix i x assume i:"i\dimset a" and as:"b$i \ a$i" and x:"x\{a <..< b}" + hence "a $ i < x $ i \ x $ i < b $ i" unfolding mem_interval by auto + hence "a$i < b$i" by auto + hence False using as by auto } + moreover + { assume as:"\i \ dimset a. \ (b$i \ a$i)" + let ?x = "(1/2) *s (a + b)" + { fix i assume i:"i\dimset a" + hence "a$i < b$i" using as[THEN bspec[where x=i]] by auto + hence "a$i < ((1/2) *s (a+b)) $ i" "((1/2) *s (a+b)) $ i < b$i" + unfolding vector_smult_component[OF i] and vector_add_component[OF i] + by (auto simp add: Arith_Tools.less_divide_eq_number_of1) } + hence "{a <..< b} \ {}" using mem_interval(1)[of "?x" a b] by auto } + ultimately show ?th1 by blast + + { fix i x assume i:"i\dimset a" and as:"b$i < a$i" and x:"x\{a .. b}" + hence "a $ i \ x $ i \ x $ i \ b $ i" unfolding mem_interval by auto + hence "a$i \ b$i" by auto + hence False using as by auto } + moreover + { assume as:"\i \ dimset a. \ (b$i < a$i)" + let ?x = "(1/2) *s (a + b)" + { fix i assume i:"i\dimset a" + hence "a$i \ b$i" using as[THEN bspec[where x=i]] by auto + hence "a$i \ ((1/2) *s (a+b)) $ i" "((1/2) *s (a+b)) $ i \ b$i" + unfolding vector_smult_component[OF i] and vector_add_component[OF i] + by (auto simp add: Arith_Tools.less_divide_eq_number_of1) } + hence "{a .. b} \ {}" using mem_interval(2)[of "?x" a b] by auto } + ultimately show ?th2 by blast +qed + +lemma interval_ne_empty: fixes a :: "real^'n" shows + "{a .. b} \ {} \ (\i \ dimset a. a$i \ b$i)" and + "{a <..< b} \ {} \ (\i \ dimset a. a$i < b$i)" + unfolding interval_eq_empty[of a b] by auto + +lemma subset_interval_imp: fixes a :: "real^'n" shows + "(\i \ dimset a. a$i \ c$i \ d$i \ b$i) \ {c .. d} \ {a .. b}" and + "(\i \ dimset a. a$i < c$i \ d$i < b$i) \ {c .. d} \ {a<..i \ dimset a. a$i \ c$i \ d$i \ b$i) \ {c<.. {a .. b}" and + "(\i \ dimset a. a$i \ c$i \ d$i \ b$i) \ {c<.. {a<.. {a<.. {a .. b}" +proof(simp add: subset_eq, rule) + fix x + assume x:"x \{a<.. dimset a" + hence "a $ i \ x $ i" + using x order_less_imp_le[of "a$i" "x$i"] + by(simp add: expand_set_eq vector_less_def vector_less_eq_def Cart_eq) + } + moreover + { fix i assume "i \ dimset a" + hence "x $ i \ b $ i" + using x + using x order_less_imp_le[of "x$i" "b$i"] + by(simp add: expand_set_eq vector_less_def vector_less_eq_def Cart_eq) + } + ultimately + show "a \ x \ x \ b" + by(simp add: expand_set_eq vector_less_def vector_less_eq_def Cart_eq) +qed + +lemma subset_interval: fixes a :: "real^'n" shows + "{c .. d} \ {a .. b} \ (\i \ dimset a. c$i \ d$i) --> (\i \ dimset a. a$i \ c$i \ d$i \ b$i)" (is ?th1) and + "{c .. d} \ {a<.. (\i \ dimset a. c$i \ d$i) --> (\i \ dimset a. a$i < c$i \ d$i < b$i)" (is ?th2) and + "{c<.. {a .. b} \ (\i \ dimset a. c$i < d$i) --> (\i \ dimset a. a$i \ c$i \ d$i \ b$i)" (is ?th3) and + "{c<.. {a<.. (\i \ dimset a. c$i < d$i) --> (\i \ dimset a. a$i \ c$i \ d$i \ b$i)" (is ?th4) +proof- + show ?th1 unfolding subset_eq and Ball_def and mem_interval apply auto by(erule_tac x=xa in allE, simp)+ + show ?th2 unfolding subset_eq and Ball_def and mem_interval apply auto by(erule_tac x=xa in allE, simp)+ + { assume as: "{c<.. {a .. b}" "\i \ dimset a. c$i < d$i" + hence "{c<.. {}" unfolding interval_eq_empty by auto + fix i assume i:"i \ dimset a" + (** TODO combine the following two parts as done in the HOL_light version. **) + { let ?x = "(\ j. (if j=i then ((min (a$j) (d$j))+c$j)/2 else (c$j+d$j)/2))::real^'n" + assume as2: "a$i > c$i" + { fix j assume j:"j\dimset a" + hence "c $ j < ?x $ j \ ?x $ j < d $ j" unfolding Cart_lambda_beta[THEN bspec[where x=j], OF j] + apply(cases "j=i") using as(2)[THEN bspec[where x=j], OF j] + by (auto simp add: Arith_Tools.less_divide_eq_number_of1 as2) } + hence "?x\{c<..{a .. b}" + unfolding mem_interval apply auto apply(rule_tac x=i in bexI) + unfolding Cart_lambda_beta[THEN bspec[where x=i], OF i] + using as(2)[THEN bspec[where x=i], OF i] and as2 and i + by (auto simp add: Arith_Tools.less_divide_eq_number_of1) + ultimately have False using as by auto } + hence "a$i \ c$i" by(rule ccontr)auto + moreover + { let ?x = "(\ j. (if j=i then ((max (b$j) (c$j))+d$j)/2 else (c$j+d$j)/2))::real^'n" + assume as2: "b$i < d$i" + { fix j assume j:"j\dimset a" + hence "d $ j > ?x $ j \ ?x $ j > c $ j" unfolding Cart_lambda_beta[THEN bspec[where x=j], OF j] + apply(cases "j=i") using as(2)[THEN bspec[where x=j], OF j] + by (auto simp add: Arith_Tools.less_divide_eq_number_of1 as2) } + hence "?x\{c<..{a .. b}" + unfolding mem_interval apply auto apply(rule_tac x=i in bexI) + unfolding Cart_lambda_beta[THEN bspec[where x=i], OF i] + using as(2)[THEN bspec[where x=i], OF i] and as2 and i + by (auto simp add: Arith_Tools.less_divide_eq_number_of1) + ultimately have False using as by auto } + hence "b$i \ d$i" by(rule ccontr)auto + ultimately + have "a$i \ c$i \ d$i \ b$i" by auto + } note part1 = this + thus ?th3 unfolding subset_eq and Ball_def and mem_interval apply auto by(erule_tac x=xa in allE, simp)+ + { assume as:"{c<.. {a<..i \ dimset a. c$i < d$i" + fix i assume i:"i \ dimset a" + from as(1) have "{c<.. {a..b}" using interval_open_subset_closed[of a b] by auto + hence "a$i \ c$i \ d$i \ b$i" using part1 and as(2) and i by auto } note * = this + thus ?th4 unfolding subset_eq and Ball_def and mem_interval apply auto by(erule_tac x=xa in allE, simp)+ +qed + +lemma disjoint_interval: fixes a::"real^'n" shows + "{a .. b} \ {c .. d} = {} \ (\i \ dimset a. (b$i < a$i \ d$i < c$i \ b$i < c$i \ d$i < a$i))" (is ?th1) and + "{a .. b} \ {c<.. (\i \ dimset a. (b$i < a$i \ d$i \ c$i \ b$i \ c$i \ d$i \ a$i))" (is ?th2) and + "{a<.. {c .. d} = {} \ (\i \ dimset a. (b$i \ a$i \ d$i < c$i \ b$i \ c$i \ d$i \ a$i))" (is ?th3) and + "{a<.. {c<.. (\i \ dimset a. (b$i \ a$i \ d$i \ c$i \ b$i \ c$i \ d$i \ a$i))" (is ?th4) +proof- + let ?z = "(\ i. ((max (a$i) (c$i)) + (min (b$i) (d$i))) / 2)::real^'n" + show ?th1 ?th2 ?th3 ?th4 + unfolding expand_set_eq and Int_iff and empty_iff and mem_interval and ball_conj_distrib[THEN sym] and eq_False + by (auto simp add: Cart_lambda_beta' Arith_Tools.less_divide_eq_number_of1 intro!: bexI elim!: allE[where x="?z"]) +qed + +lemma inter_interval: fixes a :: "'a::linorder^'n" shows + "{a .. b} \ {c .. d} = {(\ i. max (a$i) (c$i)) .. (\ i. min (b$i) (d$i))}" + unfolding expand_set_eq and Int_iff and mem_interval + by (auto simp add: Cart_lambda_beta' Arith_Tools.less_divide_eq_number_of1 intro!: bexI) + +(* Moved interval_open_subset_closed a bit upwards *) + +lemma open_interval_lemma: fixes x :: "real" shows + "a < x \ x < b ==> (\d>0. \x'. abs(x' - x) < d --> a < x' \ x' < b)" + by(rule_tac x="min (x - a) (b - x)" in exI, auto) + +lemma open_interval: fixes a :: "real^'n" shows "open {a<..{a<..dimset x" + hence "\d>0. \x'. abs (x' - (x$i)) < d \ a$i < x' \ x' < b$i" + using x[unfolded mem_interval, THEN bspec[where x=i]] + using open_interval_lemma[of "a$i" "x$i" "b$i"] by auto } + + hence "\i\dimset x. \d>0. \x'. abs (x' - (x$i)) < d \ a$i < x' \ x' < b$i" by auto + then obtain d where d:"\i\dimset x. 0 < d i \ (\x'. \x' - x $ i\ < d i \ a $ i < x' \ x' < b $ i)" + using bchoice[of "dimset x" "\i d. d>0 \ (\x'. \x' - x $ i\ < d \ a $ i < x' \ x' < b $ i)"] by auto + + let ?d = "Min (d ` dimset x)" + have **:"finite (d ` dimset x)" "d ` dimset x \ {}" using dimindex_ge_1[of "UNIV::'n set"] by auto + have "?d>0" unfolding Min_gr_iff[OF **] using d by auto + moreover + { fix x' assume as:"dist x' x < ?d" + { fix i assume i:"i \ dimset x" + have "\x'$i - x $ i\ < d i" + using norm_bound_component_lt[OF as[unfolded dist_def], THEN bspec[where x=i], OF i] + unfolding vector_minus_component[OF i] and Min_gr_iff[OF **] using i by auto + hence "a $ i < x' $ i" "x' $ i < b $ i" using d[THEN bspec[where x=i], OF i] by auto } + hence "a < x' \ x' < b" unfolding vector_less_def by auto } + ultimately have "\e>0. \x'. dist x' x < e \ x' \ {a<..dimset x" and as:"\e>0. \x'\{a..b}. x' \ x \ dist x' x < e"(* and xab:"a$i > x$i \ b$i < x$i"*) + { assume xa:"a$i > x$i" + with as obtain y where y:"y\{a..b}" "y \ x" "dist y x < a$i - x$i" by(erule_tac x="a$i - x$i" in allE)auto + hence False unfolding mem_interval and dist_def + using component_le_norm[OF i, of "y-x", unfolded vector_minus_component[OF i]] and i and xa by(auto elim!: ballE[where x=i]) + } hence "a$i \ x$i" by(rule ccontr)auto + moreover + { assume xb:"b$i < x$i" + with as obtain y where y:"y\{a..b}" "y \ x" "dist y x < x$i - b$i" by(erule_tac x="x$i - b$i" in allE)auto + hence False unfolding mem_interval and dist_def + using component_le_norm[OF i, of "y-x", unfolded vector_minus_component[OF i]] and i and xb by(auto elim!: ballE[where x=i]) + } hence "x$i \ b$i" by(rule ccontr)auto + ultimately + have "a $ i \ x $ i \ x $ i \ b $ i" by auto } + thus ?thesis unfolding closed_limpt islimpt_approachable mem_interval by auto +qed + +lemma interior_closed_interval: fixes a :: "real^'n" shows + "interior {a .. b} = {a<.. ?L" using interior_maximal[OF interval_open_subset_closed open_interval] by auto +next + { fix x assume "\T. open T \ x \ T \ T \ {a..b}" + then obtain s where s:"open s" "x \ s" "s \ {a..b}" by auto + then obtain e where "e>0" and e:"\x'. dist x' x < e \ x' \ {a..b}" unfolding open_def and subset_eq by auto + { fix i assume i:"i\dimset x" + have "dist (x - (e / 2) *s basis i) x < e" + "dist (x + (e / 2) *s basis i) x < e" + unfolding dist_def apply auto + unfolding norm_minus_cancel and norm_mul using norm_basis[OF i] and `e>0` by auto + hence "a $ i \ (x - (e / 2) *s basis i) $ i" + "(x + (e / 2) *s basis i) $ i \ b $ i" + using e[THEN spec[where x="x - (e/2) *s basis i"]] + and e[THEN spec[where x="x + (e/2) *s basis i"]] + unfolding mem_interval using i by auto + hence "a $ i < x $ i" and "x $ i < b $ i" + unfolding vector_minus_component[OF i] and vector_add_component[OF i] + unfolding vector_smult_component[OF i] and basis_component[OF i] using `e>0` by auto } + hence "x \ {a<.. ?R" unfolding interior_def and subset_eq by auto +qed + +lemma bounded_closed_interval: fixes a :: "real^'n" shows + "bounded {a .. b}" +proof- + let ?b = "\i\dimset a. \a$i\ + \b$i\" + { fix x::"real^'n" assume x:"\i\dimset a. a $ i \ x $ i \ x $ i \ b $ i" + { fix i assume "i\dimset a" + hence "\x$i\ \ \a$i\ + \b$i\" using x[THEN bspec[where x=i]] by auto } + hence "(\i\dimset a. \x $ i\) \ ?b" by(rule setsum_mono)auto + hence "norm x \ ?b" using norm_le_l1[of x] by auto } + thus ?thesis unfolding interval and bounded_def by auto +qed + +lemma bounded_interval: fixes a :: "real^'n" shows + "bounded {a .. b} \ bounded {a<.. UNIV) \ ({a<.. UNIV)" + using bounded_interval[of a b] + by auto + +lemma compact_interval: fixes a :: "real^'n" shows + "compact {a .. b}" + using bounded_closed_imp_compact using bounded_interval[of a b] using closed_interval[of a b] by auto + +lemma open_interval_midpoint: fixes a :: "real^'n" + assumes "{a<.. {}" shows "((1/2) *s (a + b)) \ {a<..dimset a" + hence "a $ i < ((1 / 2) *s (a + b)) $ i \ ((1 / 2) *s (a + b)) $ i < b $ i" + using assms[unfolded interval_ne_empty, THEN bspec[where x=i]] + unfolding vector_smult_component[OF i] and vector_add_component[OF i] + by(auto simp add: Arith_Tools.less_divide_eq_number_of1) } + thus ?thesis unfolding mem_interval by auto +qed + +lemma open_closed_interval_convex: fixes x :: "real^'n" + assumes x:"x \ {a<.. {a .. b}" and e:"0 < e" "e \ 1" + shows "(e *s x + (1 - e) *s y) \ {a<..dimset a" + have "a $ i = e * a$i + (1 - e) * a$i" unfolding left_diff_distrib by simp + also have "\ < e * x $ i + (1 - e) * y $ i" apply(rule add_less_le_mono) + using e unfolding mult_less_cancel_left and mult_le_cancel_left apply simp_all + using x i unfolding mem_interval apply(erule_tac x=i in ballE) apply simp_all + using y i unfolding mem_interval apply(erule_tac x=i in ballE) by simp_all + finally have "a $ i < (e *s x + (1 - e) *s y) $ i" using i by (auto simp add: vector_add_component vector_smult_component) + moreover { + have "b $ i = e * b$i + (1 - e) * b$i" unfolding left_diff_distrib by simp + also have "\ > e * x $ i + (1 - e) * y $ i" apply(rule add_less_le_mono) + using e unfolding mult_less_cancel_left and mult_le_cancel_left apply simp_all + using x i unfolding mem_interval apply(erule_tac x=i in ballE) apply simp_all + using y i unfolding mem_interval apply(erule_tac x=i in ballE) by simp_all + finally have "(e *s x + (1 - e) *s y) $ i < b $ i" using i by (auto simp add: vector_add_component vector_smult_component) + } ultimately have "a $ i < (e *s x + (1 - e) *s y) $ i \ (e *s x + (1 - e) *s y) $ i < b $ i" by auto } + thus ?thesis unfolding mem_interval by auto +qed + +lemma closure_open_interval: fixes a :: "real^'n" + assumes "{a<.. {}" + shows "closure {a<.. {a .. b}" + def f == "\n::nat. x + (inverse (real n + 1)) *s (?c - x)" + { fix n assume fn:"f n < b \ a < f n \ f n = x" and xc:"x \ ?c" + have *:"0 < inverse (real n + 1)" "inverse (real n + 1) \ 1" unfolding inverse_le_1_iff by auto + have "inverse (real n + 1) *s (1 / 2) *s (a + b) + (1 - inverse (real n + 1)) *s x = + x + inverse (real n + 1) *s ((1 / 2) *s (a + b) - x)" by (auto simp add: vector_ssub_ldistrib vector_add_ldistrib field_simps vector_sadd_rdistrib[THEN sym]) + hence "f n < b" and "a < f n" using open_closed_interval_convex[OF open_interval_midpoint[OF assms] as *] unfolding f_def by auto + hence False using fn unfolding f_def using xc by(auto simp add: vector_mul_lcancel vector_ssub_ldistrib) } + moreover + { assume "\ (f ---> x) sequentially" + { fix e::real assume "e>0" + hence "\N::nat. inverse (real (N + 1)) < e" using real_arch_inv[of e] apply (auto simp add: Suc_pred') apply(rule_tac x="n - 1" in exI) by auto + then obtain N::nat where "inverse (real (N + 1)) < e" by auto + hence "\n\N. inverse (real n + 1) < e" by (auto, metis Suc_le_mono le_SucE less_imp_inverse_less nat_le_real_less order_less_trans real_of_nat_Suc real_of_nat_Suc_gt_zero) + hence "\N::nat. \n\N. inverse (real n + 1) < e" by auto } + hence "((vec1 \ (\n. inverse (real n + 1))) ---> vec1 0) sequentially" + unfolding Lim_sequentially by(auto simp add: dist_vec1) + hence "(f ---> x) sequentially" unfolding f_def + using Lim_add[OF Lim_const, of "\n::nat. (inverse (real n + 1)) *s ((1 / 2) *s (a + b) - x)" 0 sequentially x] + using Lim_vmul[of "\n::nat. inverse (real n + 1)" 0 sequentially "((1 / 2) *s (a + b) - x)"] by auto } + ultimately have "x \ closure {a<..a. s \ {-a<..0" and b:"\x\s. norm x \ b" using assms[unfolded bounded_pos] by auto + def a \ "(\ i. b+1)::real^'n" + { fix x assume "x\s" + fix i assume i:"i\dimset a" + have "(-a)$i < x$i" and "x$i < a$i" using b[THEN bspec[where x=x], OF `x\s`] and component_le_norm[OF i, of x] + unfolding vector_uminus_component[OF i] and a_def and Cart_lambda_beta'[OF i] by auto + } + thus ?thesis by(auto intro: exI[where x=a] simp add: vector_less_def) +qed + +lemma bounded_subset_open_interval: + "bounded s ==> (\a b. s \ {a<..a. s \ {-a .. a}" +proof- + obtain a where "s \ {- a<.. (\a b. s \ {a .. b})" + using bounded_subset_closed_interval_symmetric[of s] by auto + +lemma frontier_closed_interval: + "frontier {a .. b} = {a .. b} - {a<.. {}" shows "{a<.. {c .. d} = {} \ {a<.. {c<.. {a .. b} ==> x \ {a<.. (x = a) \ (x = b)" + by(simp add: Cart_eq vector_less_def vector_less_eq_def dim1, auto) + +lemma in_interval_1: fixes x :: "real^1" shows + "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b) \ + (x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" +by(simp add: Cart_eq vector_less_def vector_less_eq_def dim1 dest_vec1_def) + +lemma interval_eq_empty_1: fixes a :: "real^1" shows + "{a .. b} = {} \ dest_vec1 b < dest_vec1 a" + "{a<.. dest_vec1 b \ dest_vec1 a" + unfolding interval_eq_empty and dim1 and dest_vec1_def by auto + +lemma subset_interval_1: fixes a :: "real^1" shows + "({a .. b} \ {c .. d} \ dest_vec1 b < dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + "({a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ + dest_vec1 c < dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b < dest_vec1 d)" + "({a<.. {c .. d} \ dest_vec1 b \ dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + "({a<.. {c<.. dest_vec1 b \ dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + unfolding subset_interval[of a b c d] unfolding forall_dimindex_1 and dest_vec1_def by auto + +lemma eq_interval_1: fixes a :: "real^1" shows + "{a .. b} = {c .. d} \ + dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ + dest_vec1 a = dest_vec1 c \ dest_vec1 b = dest_vec1 d" +using set_eq_subset[of "{a .. b}" "{c .. d}"] +using subset_interval_1(1)[of a b c d] +using subset_interval_1(1)[of c d a b] +by auto + +lemma disjoint_interval_1: fixes a :: "real^1" shows + "{a .. b} \ {c .. d} = {} \ dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b < dest_vec1 c \ dest_vec1 d < dest_vec1 a" + "{a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + "{a<.. {c .. d} = {} \ dest_vec1 b \ dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + "{a<.. {c<.. dest_vec1 b \ dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + unfolding disjoint_interval and dest_vec1_def and dim1 by auto + +lemma open_closed_interval_1: fixes a :: "real^1" shows + "{a<.. dest_vec1 b ==> {a .. b} = {a<.. {a,b}" + unfolding expand_set_eq apply simp unfolding vector_less_def and vector_less_eq_def and dim1 and dest_vec1_eq[THEN sym] and dest_vec1_def by auto + +(* Some stuff for half-infinite intervals too; FIXME: notation? *) + +lemma closed_interval_left: fixes b::"real^'n" + shows "closed {x::real^'n. \i \ dimset x. x$i \ b$i}" +proof- + { fix i assume i:"i\dimset b" + fix x::"real^'n" assume x:"\e>0. \x'\{x. \i\dimset b. x $ i \ b $ i}. x' \ x \ dist x' x < e" + { assume "x$i > b$i" + then obtain y where "y $ i \ b $ i" "y \ x" "dist y x < x$i - b$i" using x[THEN spec[where x="x$i - b$i"]] and i by (auto, erule_tac x=i in ballE)auto + hence False using component_le_norm[OF i, of "y - x"] unfolding dist_def and vector_minus_component[OF i] by auto } + hence "x$i \ b$i" by(rule ccontr)auto } + thus ?thesis unfolding closed_limpt unfolding islimpt_approachable by blast +qed + +lemma closed_interval_right: fixes a::"real^'n" + shows "closed {x::real^'n. \i \ dimset x. a$i \ x$i}" +proof- + { fix i assume i:"i\dimset a" + fix x::"real^'n" assume x:"\e>0. \x'\{x. \i\dimset a. a $ i \ x $ i}. x' \ x \ dist x' x < e" + { assume "a$i > x$i" + then obtain y where "a $ i \ y $ i" "y \ x" "dist y x < a$i - x$i" using x[THEN spec[where x="a$i - x$i"]] and i by(auto, erule_tac x=i in ballE)auto + hence False using component_le_norm[OF i, of "y - x"] unfolding dist_def and vector_minus_component[OF i] by auto } + hence "a$i \ x$i" by(rule ccontr)auto } + thus ?thesis unfolding closed_limpt unfolding islimpt_approachable by blast +qed + +subsection{* Intervals in general, including infinite and mixtures of open and closed. *} + +definition "is_interval s \ (\a\s. \b\s. \x. a \ x \ x \ b \ x \ s)" + +lemma is_interval_interval: fixes a::"real^'n" shows + "is_interval {a<.. real^'n" + assumes "(f ---> l) net" shows "((vec1 o (\y. a \ (f y))) ---> vec1(a \ l)) net" +proof(cases "a = vec 0") + case True thus ?thesis using dot_lzero and Lim_const[of 0 net] unfolding vec1_vec and o_def by auto +next + case False + { fix e::real + assume "0 < e" "\e>0. \y. (\x. netord net x y) \ (\x. netord net x y \ dist l (f x) < e)" + then obtain x y where x:"netord net x y" and y:"\x. netord net x y \ dist l (f x) < e / norm a" apply(erule_tac x="e / norm a" in allE) apply auto using False using norm_ge_zero[of a] apply auto + using divide_pos_pos[of e "norm a"] by auto + { fix z assume "netord net z y" hence "dist l (f z) < e / norm a" using y by blast + hence "norm a * norm (l - f z) < e" unfolding dist_def and + pos_less_divide_eq[OF False[unfolded vec_0 zero_less_norm_iff[of a, THEN sym]]] and real_mult_commute by auto + hence "\a \ l - a \ f z\ < e" using order_le_less_trans[OF norm_cauchy_schwarz_abs[of a "l - f z"], of e] unfolding dot_rsub[symmetric] by auto } + hence "\y. (\x. netord net x y) \ (\x. netord net x y \ \a \ l - a \ f x\ < e)" using x by auto } + thus ?thesis using assms unfolding Lim apply (auto simp add: dist_sym) + unfolding dist_vec1 by auto +qed + +lemma continuous_at_vec1_dot: + "continuous (at x) (vec1 o (\y. a \ y))" +proof- + have "((\x. x) ---> x) (at x)" unfolding Lim_at by auto + thus ?thesis unfolding continuous_at and o_def using Lim_vec1_dot[of "\x. x" x "at x" a] by auto +qed + +lemma continuous_on_vec1_dot: + "continuous_on s (vec1 o (\y. a \ y)) " + using continuous_at_imp_continuous_on[of s "vec1 o (\y. a \ y)"] + using continuous_at_vec1_dot + by auto + +lemma closed_halfspace_le: fixes a::"real^'n" + shows "closed {x. a \ x \ b}" +proof- + have *:"{x \ UNIV. (vec1 \ op \ a) x \ vec1 ` {r. \x. a \ x = r \ r \ b}} = {x. a \ x \ b}" by auto + let ?T = "{x::real^1. (\i\dimset x. x$i \ (vec1 b)$i)}" + have "closed ?T" using closed_interval_left[of "vec1 b"] by simp + moreover have "vec1 ` {r. \x. a \ x = r \ r \ b} = range (vec1 \ op \ a) \ ?T" unfolding dim1 + unfolding image_def apply auto unfolding vec1_component[unfolded One_nat_def] by auto + ultimately have "\T. closed T \ vec1 ` {r. \x. a \ x = r \ r \ b} = range (vec1 \ op \ a) \ T" by auto + hence "closedin euclidean {x \ UNIV. (vec1 \ op \ a) x \ vec1 ` {r. \x. a \ x = r \ r \ b}}" + using continuous_on_vec1_dot[of UNIV a, unfolded continuous_on_closed subtopology_UNIV] unfolding closedin_closed + by (auto elim!: allE[where x="vec1 ` {r. (\x. a \ x = r \ r \ b)}"]) + thus ?thesis unfolding closed_closedin[THEN sym] and * by auto +qed + +lemma closed_halfspace_ge: "closed {x. a \ x \ b}" + using closed_halfspace_le[of "-a" "-b"] unfolding dot_lneg by auto + +lemma closed_hyperplane: "closed {x. a \ x = b}" +proof- + have "{x. a \ x = b} = {x. a \ x \ b} \ {x. a \ x \ b}" by auto + thus ?thesis using closed_halfspace_le[of a b] and closed_halfspace_ge[of b a] using closed_Int by auto +qed + +lemma closed_halfspace_component_le: + assumes "i \ {1 .. dimindex (UNIV::'n set)}" shows "closed {x::real^'n. x$i \ a}" + using closed_halfspace_le[of "(basis i)::real^'n" a] unfolding dot_basis[OF assms] by auto + +lemma closed_halfspace_component_ge: + assumes "i \ {1 .. dimindex (UNIV::'n set)}" shows "closed {x::real^'n. x$i \ a}" + using closed_halfspace_ge[of a "(basis i)::real^'n"] unfolding dot_basis[OF assms] by auto + +text{* Openness of halfspaces. *} + +lemma open_halfspace_lt: "open {x. a \ x < b}" +proof- + have "UNIV - {x. b \ a \ x} = {x. a \ x < b}" by auto + thus ?thesis using closed_halfspace_ge[unfolded closed_def, of b a] by auto +qed + +lemma open_halfspace_gt: "open {x. a \ x > b}" +proof- + have "UNIV - {x. b \ a \ x} = {x. a \ x > b}" by auto + thus ?thesis using closed_halfspace_le[unfolded closed_def, of a b] by auto +qed + +lemma open_halfspace_component_lt: + assumes "i \ {1 .. dimindex(UNIV::'n set)}" shows "open {x::real^'n. x$i < a}" + using open_halfspace_lt[of "(basis i)::real^'n" a] unfolding dot_basis[OF assms] by auto + +lemma open_halfspace_component_gt: + assumes "i \ {1 .. dimindex(UNIV::'n set)}" shows "open {x::real^'n. x$i > a}" + using open_halfspace_gt[of a "(basis i)::real^'n"] unfolding dot_basis[OF assms] by auto + +text{* This gives a simple derivation of limit component bounds. *} + +lemma Lim_component_le: fixes f :: "'a \ real^'n" + assumes "(f ---> l) net" "\ (trivial_limit net)" "eventually (\x. f(x)$i \ b) net" + and i:"i\ {1 .. dimindex(UNIV::'n set)}" + shows "l$i \ b" +proof- + { fix x have "x \ {x::real^'n. basis i \ x \ b} \ x$i \ b" unfolding dot_basis[OF i] by auto } note * = this + show ?thesis using Lim_in_closed_set[of "{x. basis i \ x \ b}" f net l] unfolding * + using closed_halfspace_le[of "(basis i)::real^'n" b] and assms(1,2,3) by auto +qed + +lemma Lim_component_ge: fixes f :: "'a \ real^'n" + assumes "(f ---> l) net" "\ (trivial_limit net)" "eventually (\x. b \ (f x)$i) net" + and i:"i\ {1 .. dimindex(UNIV::'n set)}" + shows "b \ l$i" +proof- + { fix x have "x \ {x::real^'n. basis i \ x \ b} \ x$i \ b" unfolding dot_basis[OF i] by auto } note * = this + show ?thesis using Lim_in_closed_set[of "{x. basis i \ x \ b}" f net l] unfolding * + using closed_halfspace_ge[of b "(basis i)::real^'n"] and assms(1,2,3) by auto +qed + +lemma Lim_component_eq: fixes f :: "'a \ real^'n" + assumes net:"(f ---> l) net" "~(trivial_limit net)" and ev:"eventually (\x. f(x)$i = b) net" + and i:"i\ {1 .. dimindex(UNIV::'n set)}" + shows "l$i = b" + using ev[unfolded order_eq_iff eventually_and] using Lim_component_ge[OF net, of b i] and Lim_component_le[OF net, of i b] using i by auto + +lemma Lim_drop_le: fixes f :: "'a \ real^1" shows + "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. dest_vec1 (f x) \ b) net ==> dest_vec1 l \ b" + using Lim_component_le[of f l net 1 b] unfolding dest_vec1_def and dim1 by auto + +lemma Lim_drop_ge: fixes f :: "'a \ real^1" shows + "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. b \ dest_vec1 (f x)) net ==> b \ dest_vec1 l" + using Lim_component_ge[of f l net b 1] unfolding dest_vec1_def and dim1 by auto + +text{* Limits relative to a union. *} + +lemma Lim_within_union: + "(f ---> l) (at x within (s \ t)) \ + (f ---> l) (at x within s) \ (f ---> l) (at x within t)" + unfolding Lim_within apply auto apply blast apply blast + apply(erule_tac x=e in allE)+ apply auto + apply(rule_tac x="min d da" in exI) by auto + +lemma continuous_on_union: + assumes "closed s" "closed t" "continuous_on s f" "continuous_on t f" + shows "continuous_on (s \ t) f" + using assms unfolding continuous_on unfolding Lim_within_union + unfolding Lim unfolding trivial_limit_within unfolding closed_limpt by auto + +lemma continuous_on_cases: fixes g :: "real^'m \ real ^'n" + assumes "closed s" "closed t" "continuous_on s f" "continuous_on t g" + "\x. (x\s \ \ P x) \ (x \ t \ P x) \ f x = g x" + shows "continuous_on (s \ t) (\x. if P x then f x else g x)" +proof- + let ?h = "(\x. if P x then f x else g x)" + have "\x\s. f x = (if P x then f x else g x)" using assms(5) by auto + hence "continuous_on s ?h" using continuous_on_eq[of s f ?h] using assms(3) by auto + moreover + have "\x\t. g x = (if P x then f x else g x)" using assms(5) by auto + hence "continuous_on t ?h" using continuous_on_eq[of t g ?h] using assms(4) by auto + ultimately show ?thesis using continuous_on_union[OF assms(1,2), of ?h] by auto +qed + + +text{* Some more convenient intermediate-value theorem formulations. *} + +lemma connected_ivt_hyperplane: fixes y :: "real^'n" + assumes "connected s" "x \ s" "y \ s" "a \ x \ b" "b \ a \ y" + shows "\z \ s. a \ z = b" +proof(rule ccontr) + assume as:"\ (\z\s. a \ z = b)" + let ?A = "{x::real^'n. a \ x < b}" + let ?B = "{x::real^'n. a \ x > b}" + have "open ?A" "open ?B" using open_halfspace_lt and open_halfspace_gt by auto + moreover have "?A \ ?B = {}" by auto + moreover have "s \ ?A \ ?B" using as by auto + ultimately show False using assms(1)[unfolded connected_def not_ex, THEN spec[where x="?A"], THEN spec[where x="?B"]] and assms(2-5) by auto +qed + +lemma connected_ivt_component: fixes x::"real^'n" shows + "connected s \ x \ s \ y \ s \ k \ dimset x \ x$k \ a \ a \ y$k \ (\z\s. z$k = a)" + using connected_ivt_hyperplane[of s x y "(basis k)::real^'n" a] by (auto simp add: dot_basis) + +text{* Also more convenient formulations of monotone convergence. *} + +lemma bounded_increasing_convergent: fixes s::"nat \ real^1" + assumes "bounded {s n| n::nat. True}" "\n. dest_vec1(s n) \ dest_vec1(s(Suc n))" + shows "\l. (s ---> l) sequentially" +proof- + obtain a where a:"\n. \dest_vec1 (s n)\ \ a" using assms(1)[unfolded bounded_def abs_dest_vec1] by auto + { fix m::nat + have "\ n. n\m \ dest_vec1 (s m) \ dest_vec1 (s n)" + apply(induct_tac n) apply simp using assms(2) apply(erule_tac x="na" in allE) by(auto simp add: not_less_eq_eq) } + hence "\m n. m \ n \ dest_vec1 (s m) \ dest_vec1 (s n)" by auto + then obtain l where "\e>0. \N. \n\N. \dest_vec1 (s n) - l\ < e" using convergent_bounded_monotone[OF a] by auto + thus ?thesis unfolding Lim_sequentially apply(rule_tac x="vec1 l" in exI) + unfolding dist_def unfolding abs_dest_vec1 and dest_vec1_sub by auto +qed + +subsection{* Basic homeomorphism definitions. *} + +definition "homeomorphism s t f g \ + (\x\s. (g(f x) = x)) \ (f ` s = t) \ continuous_on s f \ + (\y\t. (f(g y) = y)) \ (g ` t = s) \ continuous_on t g" + +definition homeomorphic :: "((real^'a) set) \ ((real^'b) set) \ bool" (infixr "homeomorphic" 60) where + homeomorphic_def: "s homeomorphic t \ (\f g. homeomorphism s t f g)" + +lemma homeomorphic_refl: "s homeomorphic s" + unfolding homeomorphic_def + unfolding homeomorphism_def + using continuous_on_id + apply(rule_tac x = "(\x::real^'a.x)" in exI) + apply(rule_tac x = "(\x::real^'b.x)" in exI) + by blast + +lemma homeomorphic_sym: + "s homeomorphic t \ t homeomorphic s" +unfolding homeomorphic_def +unfolding homeomorphism_def +by blast + +lemma homeomorphic_trans: + assumes "s homeomorphic t" "t homeomorphic u" shows "s homeomorphic u" +proof- + obtain f1 g1 where fg1:"\x\s. g1 (f1 x) = x" "f1 ` s = t" "continuous_on s f1" "\y\t. f1 (g1 y) = y" "g1 ` t = s" "continuous_on t g1" + using assms(1) unfolding homeomorphic_def homeomorphism_def by auto + obtain f2 g2 where fg2:"\x\t. g2 (f2 x) = x" "f2 ` t = u" "continuous_on t f2" "\y\u. f2 (g2 y) = y" "g2 ` u = t" "continuous_on u g2" + using assms(2) unfolding homeomorphic_def homeomorphism_def by auto + + { fix x assume "x\s" hence "(g1 \ g2) ((f2 \ f1) x) = x" using fg1(1)[THEN bspec[where x=x]] and fg2(1)[THEN bspec[where x="f1 x"]] and fg1(2) by auto } + moreover have "(f2 \ f1) ` s = u" using fg1(2) fg2(2) by auto + moreover have "continuous_on s (f2 \ f1)" using continuous_on_compose[OF fg1(3)] and fg2(3) unfolding fg1(2) by auto + moreover { fix y assume "y\u" hence "(f2 \ f1) ((g1 \ g2) y) = y" using fg2(4)[THEN bspec[where x=y]] and fg1(4)[THEN bspec[where x="g2 y"]] and fg2(5) by auto } + moreover have "(g1 \ g2) ` u = s" using fg1(5) fg2(5) by auto + moreover have "continuous_on u (g1 \ g2)" using continuous_on_compose[OF fg2(6)] and fg1(6) unfolding fg2(5) by auto + ultimately show ?thesis unfolding homeomorphic_def homeomorphism_def apply(rule_tac x="f2 \ f1" in exI) apply(rule_tac x="g1 \ g2" in exI) by auto +qed + +lemma homeomorphic_minimal: + "s homeomorphic t \ + (\f g. (\x\s. f(x) \ t \ (g(f(x)) = x)) \ + (\y\t. g(y) \ s \ (f(g(y)) = y)) \ + continuous_on s f \ continuous_on t g)" +unfolding homeomorphic_def homeomorphism_def +apply auto apply (rule_tac x=f in exI) apply (rule_tac x=g in exI) +apply auto apply (rule_tac x=f in exI) apply (rule_tac x=g in exI) apply auto +unfolding image_iff +apply(erule_tac x="g x" in ballE) apply(erule_tac x="x" in ballE) +apply auto apply(rule_tac x="g x" in bexI) apply auto +apply(erule_tac x="f x" in ballE) apply(erule_tac x="x" in ballE) +apply auto apply(rule_tac x="f x" in bexI) by auto + +subsection{* Relatively weak hypotheses if a set is compact. *} + +definition "inv_on f s = (\x. SOME y. y\s \ f y = x)" + +lemma assumes "inj_on f s" "x\s" + shows "inv_on f s (f x) = x" + using assms unfolding inj_on_def inv_on_def by auto + +lemma homeomorphism_compact: + assumes "compact s" "continuous_on s f" "f ` s = t" "inj_on f s" + shows "\g. homeomorphism s t f g" +proof- + def g \ "\x. SOME y. y\s \ f y = x" + have g:"\x\s. g (f x) = x" using assms(3) assms(4)[unfolded inj_on_def] unfolding g_def by auto + { fix y assume "y\t" + then obtain x where x:"f x = y" "x\s" using assms(3) by auto + hence "g (f x) = x" using g by auto + hence "f (g y) = y" unfolding x(1)[THEN sym] by auto } + hence g':"\x\t. f (g x) = x" by auto + moreover + { fix x + have "x\s \ x \ g ` t" using g[THEN bspec[where x=x]] unfolding image_iff using assms(3) by(auto intro!: bexI[where x="f x"]) + moreover + { assume "x\g ` t" + then obtain y where y:"y\t" "g y = x" by auto + then obtain x' where x':"x'\s" "f x' = y" using assms(3) by auto + hence "x \ s" unfolding g_def using someI2[of "\b. b\s \ f b = y" x' "\x. x\s"] unfolding y(2)[THEN sym] and g_def by auto } + ultimately have "x\s \ x \ g ` t" by auto } + hence "g ` t = s" by auto + ultimately + show ?thesis unfolding homeomorphism_def homeomorphic_def + apply(rule_tac x=g in exI) using g and assms(3) and continuous_on_inverse[OF assms(2,1), of g, unfolded assms(3)] and assms(2) by auto +qed + +lemma homeomorphic_compact: + "compact s \ continuous_on s f \ (f ` s = t) \ inj_on f s + \ s homeomorphic t" + unfolding homeomorphic_def by(metis homeomorphism_compact) + +text{* Preservation of topological properties. *} + +lemma homeomorphic_compactness: + "s homeomorphic t ==> (compact s \ compact t)" +unfolding homeomorphic_def homeomorphism_def +by (metis compact_continuous_image) + +text{* Results on translation, scaling etc. *} + +lemma homeomorphic_scaling: + assumes "c \ 0" shows "s homeomorphic ((\x. c *s x) ` s)" + unfolding homeomorphic_minimal + apply(rule_tac x="\x. c *s x" in exI) + apply(rule_tac x="\x. (1 / c) *s x" in exI) + apply auto unfolding vector_smult_assoc using assms apply auto + using continuous_on_cmul[OF continuous_on_id] by auto + +lemma homeomorphic_translation: + "s homeomorphic ((\x. a + x) ` s)" + unfolding homeomorphic_minimal + apply(rule_tac x="\x. a + x" in exI) + apply(rule_tac x="\x. -a + x" in exI) + using continuous_on_add[OF continuous_on_const continuous_on_id] by auto + +lemma homeomorphic_affinity: + assumes "c \ 0" shows "s homeomorphic ((\x. a + c *s x) ` s)" +proof- + have *:"op + a ` op *s c ` s = (\x. a + c *s x) ` s" by auto + show ?thesis + using homeomorphic_trans + using homeomorphic_scaling[OF assms, of s] + using homeomorphic_translation[of "(\x. c *s x) ` s" a] unfolding * by auto +qed + +lemma homeomorphic_balls: fixes a b ::"real^'a" + assumes "0 < d" "0 < e" + shows "(ball a d) homeomorphic (ball b e)" (is ?th) + "(cball a d) homeomorphic (cball b e)" (is ?cth) +proof- + have *:"\e / d\ > 0" "\d / e\ >0" using assms using divide_pos_pos by auto + show ?th unfolding homeomorphic_minimal + apply(rule_tac x="\x. b + (e/d) *s (x - a)" in exI) + apply(rule_tac x="\x. a + (d/e) *s (x - b)" in exI) + apply (auto simp add: dist_sym) unfolding dist_def and vector_smult_assoc using assms apply auto + unfolding norm_minus_cancel and norm_mul + using continuous_on_add[OF continuous_on_const continuous_on_cmul[OF continuous_on_sub[OF continuous_on_id continuous_on_const]]] + apply (auto simp add: dist_sym) + using pos_less_divide_eq[OF *(1), THEN sym] unfolding real_mult_commute[of _ "\e / d\"] + using pos_less_divide_eq[OF *(2), THEN sym] unfolding real_mult_commute[of _ "\d / e\"] + by (auto simp add: dist_sym) +next + have *:"\e / d\ > 0" "\d / e\ >0" using assms using divide_pos_pos by auto + show ?cth unfolding homeomorphic_minimal + apply(rule_tac x="\x. b + (e/d) *s (x - a)" in exI) + apply(rule_tac x="\x. a + (d/e) *s (x - b)" in exI) + apply (auto simp add: dist_sym) unfolding dist_def and vector_smult_assoc using assms apply auto + unfolding norm_minus_cancel and norm_mul + using continuous_on_add[OF continuous_on_const continuous_on_cmul[OF continuous_on_sub[OF continuous_on_id continuous_on_const]]] + apply auto + using pos_le_divide_eq[OF *(1), THEN sym] unfolding real_mult_commute[of _ "\e / d\"] + using pos_le_divide_eq[OF *(2), THEN sym] unfolding real_mult_commute[of _ "\d / e\"] + by auto +qed + +text{* "Isometry" (up to constant bounds) of injective linear map etc. *} + +lemma cauchy_isometric: + assumes e:"0 < e" and s:"subspace s" and f:"linear f" and normf:"\x\s. norm(f x) \ e * norm(x)" and xs:"\n::nat. x n \ s" and cf:"cauchy(f o x)" + shows "cauchy x" +proof- + { fix d::real assume "d>0" + then obtain N where N:"\n\N. norm (f (x n) - f (x N)) < e * d" + using cf[unfolded cauchy o_def dist_def, THEN spec[where x="e*d"]] and e and mult_pos_pos[of e d] by auto + { fix n assume "n\N" + hence "norm (f (x n - x N)) < e * d" using N[THEN spec[where x=n]] unfolding linear_sub[OF f, THEN sym] by auto + moreover have "e * norm (x n - x N) \ norm (f (x n - x N))" + using subspace_sub[OF s, of "x n" "x N"] using xs[THEN spec[where x=N]] and xs[THEN spec[where x=n]] + using normf[THEN bspec[where x="x n - x N"]] by auto + ultimately have "norm (x n - x N) < d" using `e>0` + using mult_left_less_imp_less[of e "norm (x n - x N)" d] by auto } + hence "\N. \n\N. norm (x n - x N) < d" by auto } + thus ?thesis unfolding cauchy and dist_def by auto +qed + +lemma complete_isometric_image: + assumes "0 < e" and s:"subspace s" and f:"linear f" and normf:"\x\s. norm(f x) \ e * norm(x)" and cs:"complete s" + shows "complete(f ` s)" +proof- + { fix g assume as:"\n::nat. g n \ f ` s" and cfg:"cauchy g" + then obtain x where "\n. x n \ s \ g n = f (x n)" unfolding image_iff and Bex_def + using choice[of "\ n xa. xa \ s \ g n = f xa"] by auto + hence x:"\n. x n \ s" "\n. g n = f (x n)" by auto + hence "f \ x = g" unfolding expand_fun_eq by auto + then obtain l where "l\s" and l:"(x ---> l) sequentially" + using cs[unfolded complete_def, THEN spec[where x="x"]] + using cauchy_isometric[OF `0l\f ` s. (g ---> l) sequentially" + using linear_continuous_at[OF f, unfolded continuous_at_sequentially, THEN spec[where x=x], of l] + unfolding `f \ x = g` by auto } + thus ?thesis unfolding complete_def by auto +qed + +lemma dist_0_norm:"dist 0 x = norm x" unfolding dist_def by(auto simp add: norm_minus_cancel) + +lemma injective_imp_isometric: fixes f::"real^'m \ real^'n" + assumes s:"closed s" "subspace s" and f:"linear f" "\x\s. (f x = 0) \ (x = 0)" + shows "\e>0. \x\s. norm (f x) \ e * norm(x)" +proof(cases "s \ {0::real^'m}") + case True + { fix x assume "x \ s" + hence "x = 0" using True by auto + hence "norm x \ norm (f x)" by auto } + thus ?thesis by(auto intro!: exI[where x=1]) +next + case False + then obtain a where a:"a\0" "a\s" by auto + from False have "s \ {}" by auto + let ?S = "{f x| x. (x \ s \ norm x = norm a)}" + let ?S' = "{x::real^'m. x\s \ norm x = norm a}" + let ?S'' = "{x::real^'m. norm x = norm a}" + + have "?S'' = frontier(cball 0 (norm a))" unfolding frontier_cball and dist_def by (auto simp add: norm_minus_cancel) + hence "compact ?S''" using compact_frontier[OF compact_cball, of 0 "norm a"] by auto + moreover have "?S' = s \ ?S''" by auto + ultimately have "compact ?S'" using closed_inter_compact[of s ?S''] using s(1) by auto + moreover have *:"f ` ?S' = ?S" by auto + ultimately have "compact ?S" using compact_continuous_image[OF linear_continuous_on[OF f(1)], of ?S'] by auto + hence "closed ?S" using compact_imp_closed by auto + moreover have "?S \ {}" using a by auto + ultimately obtain b' where "b'\?S" "\y\?S. norm b' \ norm y" using distance_attains_inf[of ?S 0] unfolding dist_0_norm by auto + then obtain b where "b\s" and ba:"norm b = norm a" and b:"\x\{x \ s. norm x = norm a}. norm (f b) \ norm (f x)" unfolding *[THEN sym] unfolding image_iff by auto + + let ?e = "norm (f b) / norm b" + have "norm b > 0" using ba and a and norm_ge_zero by auto + moreover have "norm (f b) > 0" using f(2)[THEN bspec[where x=b], OF `b\s`] using `norm b >0` unfolding zero_less_norm_iff by auto + ultimately have "0 < norm (f b) / norm b" by(simp only: divide_pos_pos) + moreover + { fix x assume "x\s" + hence "norm (f b) / norm b * norm x \ norm (f x)" + proof(cases "x=0") + case True thus "norm (f b) / norm b * norm x \ norm (f x)" by auto + next + case False + hence *:"0 < norm a / norm x" using `a\0` unfolding zero_less_norm_iff[THEN sym] by(simp only: divide_pos_pos) + have "\c. \x\s. c *s x \ s" using s[unfolded subspace_def] by auto + hence "(norm a / norm x) *s x \ {x \ s. norm x = norm a}" using `x\s` and `x\0` by auto + thus "norm (f b) / norm b * norm x \ norm (f x)" using b[THEN bspec[where x="(norm a / norm x) *s x"]] + unfolding linear_cmul[OF f(1)] and norm_mul and ba using `x\0` `a\0` + by (auto simp add: real_mult_commute pos_le_divide_eq pos_divide_le_eq) + qed } + ultimately + show ?thesis by auto +qed + +lemma closed_injective_image_subspace: + assumes "subspace s" "linear f" "\x\s. f x = 0 --> x = 0" "closed s" + shows "closed(f ` s)" +proof- + obtain e where "e>0" and e:"\x\s. e * norm x \ norm (f x)" using injective_imp_isometric[OF assms(4,1,2,3)] by auto + show ?thesis using complete_isometric_image[OF `e>0` assms(1,2) e] and assms(4) + unfolding complete_eq_closed[THEN sym] by auto +qed + +subsection{* Some properties of a canonical subspace. *} + +lemma subspace_substandard: + "subspace {x::real^'n. (\i \ dimset x. d < i \ x$i = 0)}" + unfolding subspace_def by(auto simp add: vector_add_component vector_smult_component elim!: ballE) + +lemma closed_substandard: + "closed {x::real^'n. \i \ dimset x. d < i --> x$i = 0}" (is "closed ?A") +proof- + let ?D = "{Suc d..dimindex(UNIV::('n set))}" + let ?Bs = "{{x::real^'n. basis i \ x = 0}| i. i \ ?D}" + { fix x + { assume "x\?A" + hence x:"\i\?D. d < i \ x $ i = 0" by auto + hence "x\ \ ?Bs" by(auto simp add: dot_basis x) } + moreover + { assume x:"x\\?Bs" + { fix i assume i:"i\dimset x" and "d < i" + hence "i \ ?D" by auto + then obtain B where BB:"B \ ?Bs" and B:"B = {x::real^'n. basis i \ x = 0}" by auto + hence "x $ i = 0" unfolding B unfolding dot_basis[OF i] using x by auto } + hence "x\?A" by auto } + ultimately have "x\?A \ x\ \?Bs" by auto } + hence "?A = \ ?Bs" by auto + thus ?thesis by(auto simp add: closed_Inter closed_hyperplane) +qed + +lemma dim_substandard: + assumes "d \ dimindex(UNIV::'n set)" + shows "dim {x::real^'n. \i \ dimset x. d < i --> x$i = 0} = d" (is "dim ?A = d") +proof- + let ?D = "{1..dimindex (UNIV::'n set)}" + let ?B = "(basis::nat\real^'n) ` {1..d}" + + let ?bas = "basis::nat \ real^'n" + + have "?B \ ?A" by (auto simp add: basis_component) + + moreover + { fix x::"real^'n" assume "x\?A" + hence "x\ span ?B" + proof(induct d arbitrary: x) + case 0 hence "x=0" unfolding Cart_eq by auto + thus ?case using subspace_0[OF subspace_span[of "{}"]] by auto + next + case (Suc n) + hence *:"\i\?D. Suc n < i \ x $ i = 0" by auto + have **:"{1..n} \ {1..Suc n}" by auto + def y \ "x - x$(Suc n) *s basis (Suc n)" + have y:"x = y + (x$Suc n) *s basis (Suc n)" unfolding y_def by auto + { fix i assume i:"i\?D" and i':"n < i" + hence "y $ i = 0" unfolding y_def unfolding vector_minus_component[OF i] + and vector_smult_component[OF i] and basis_component[OF i] using i' + using *[THEN bspec[where x=i]] by auto } + hence "y \ span (basis ` {1..Suc n})" using Suc(1)[of y] + using span_mono[of "?bas ` {1..n}" "?bas ` {1..Suc n}"] + using image_mono[OF **, of basis] by auto + moreover + have "basis (Suc n) \ span (?bas ` {1..Suc n})" by(rule span_superset, auto) + hence "x$(Suc n) *s basis (Suc n) \ span (?bas ` {1..Suc n})" using span_mul by auto + ultimately + have "y + x$(Suc n) *s basis (Suc n) \ span (?bas ` {1..Suc n})" + using span_add by auto + thus ?case using y by auto + qed + } + hence "?A \ span ?B" by auto + + moreover + { fix x assume "x \ ?B" + hence "x\{(basis i)::real^'n |i. i \ ?D}" using assms by auto } + hence "independent ?B" using independent_mono[OF independent_stdbasis, of ?B] and assms by auto + + moreover + have "{1..d} \ ?D" unfolding subset_eq using assms by auto + hence *:"inj_on (basis::nat\real^'n) {1..d}" using subset_inj_on[OF basis_inj, of "{1..d}"] using assms by auto + have "?B hassize d" unfolding hassize_def and card_image[OF *] by auto + + ultimately show ?thesis using dim_unique[of "basis ` {1..d}" ?A] by auto +qed + +text{* Hence closure and completeness of all subspaces. *} + +lemma closed_subspace: fixes s::"(real^'n) set" + assumes "subspace s" shows "closed s" +proof- + let ?t = "{x::real^'n. \i\{1..dimindex (UNIV :: 'n set)}. dim s x$i = 0}" + have "dim s \ dimindex (UNIV :: 'n set)" using dim_subset_univ by auto + obtain f where f:"linear f" "f ` ?t = s" "inj_on f ?t" + using subspace_isomorphism[OF subspace_substandard[of "dim s"] assms] + using dim_substandard[OF dim_subset_univ[of s]] by auto + have "\x\?t. f x = 0 \ x = 0" using linear_0[OF f(1)] using f(3)[unfolded inj_on_def] + by(erule_tac x=0 in ballE) auto + moreover have "closed ?t" using closed_substandard by auto + moreover have "subspace ?t" using subspace_substandard by auto + ultimately show ?thesis using closed_injective_image_subspace[of ?t f] + unfolding f(2) using f(1) by auto +qed + +lemma complete_subspace: + "subspace s ==> complete s" + using complete_eq_closed closed_subspace + by auto + +lemma dim_closure: + "dim(closure s) = dim s" (is "?dc = ?d") +proof- + have "?dc \ ?d" using closure_minimal[OF span_inc, of s] + using closed_subspace[OF subspace_span, of s] + using dim_subset[of "closure s" "span s"] unfolding dim_span by auto + thus ?thesis using dim_subset[OF closure_subset, of s] by auto +qed + +text{* Affine transformations of intervals. *} + +lemma affinity_inverses: + assumes m0: "m \ (0::'a::field)" + shows "(\x. m *s x + c) o (\x. inverse(m) *s x + (-(inverse(m) *s c))) = id" + "(\x. inverse(m) *s x + (-(inverse(m) *s c))) o (\x. m *s x + c) = id" + using m0 +apply (auto simp add: expand_fun_eq vector_add_ldistrib vector_smult_assoc) +by (simp add: vector_smult_lneg[symmetric] vector_smult_assoc vector_sneg_minus1[symmetric]) + +lemma real_affinity_le: + "0 < (m::'a::ordered_field) ==> (m * x + c \ y \ x \ inverse(m) * y + -(c / m))" + by (simp add: field_simps inverse_eq_divide) + +lemma real_le_affinity: + "0 < (m::'a::ordered_field) ==> (y \ m * x + c \ inverse(m) * y + -(c / m) \ x)" + by (simp add: field_simps inverse_eq_divide) + +lemma real_affinity_lt: + "0 < (m::'a::ordered_field) ==> (m * x + c < y \ x < inverse(m) * y + -(c / m))" + by (simp add: field_simps inverse_eq_divide) + +lemma real_lt_affinity: + "0 < (m::'a::ordered_field) ==> (y < m * x + c \ inverse(m) * y + -(c / m) < x)" + by (simp add: field_simps inverse_eq_divide) + +lemma real_affinity_eq: + "(m::'a::ordered_field) \ 0 ==> (m * x + c = y \ x = inverse(m) * y + -(c / m))" + by (simp add: field_simps inverse_eq_divide) + +lemma real_eq_affinity: + "(m::'a::ordered_field) \ 0 ==> (y = m * x + c \ inverse(m) * y + -(c / m) = x)" + by (simp add: field_simps inverse_eq_divide) + +lemma vector_affinity_eq: + assumes m0: "(m::'a::field) \ 0" + shows "m *s x + c = y \ x = inverse m *s y + -(inverse m *s c)" +proof + assume h: "m *s x + c = y" + hence "m *s x = y - c" by (simp add: ring_simps) + hence "inverse m *s (m *s x) = inverse m *s (y - c)" by simp + then show "x = inverse m *s y + - (inverse m *s c)" + using m0 by (simp add: vector_smult_assoc vector_ssub_ldistrib) +next + assume h: "x = inverse m *s y + - (inverse m *s c)" + show "m *s x + c = y" unfolding h diff_minus[symmetric] + using m0 by (simp add: vector_smult_assoc vector_ssub_ldistrib) +qed + +lemma vector_eq_affinity: + "(m::'a::field) \ 0 ==> (y = m *s x + c \ inverse(m) *s y + -(inverse(m) *s c) = x)" + using vector_affinity_eq[where m=m and x=x and y=y and c=c] + by metis + +lemma image_affinity_interval: fixes m::real + shows "(\x. m *s x + c) ` {a .. b} = + (if {a .. b} = {} then {} + else (if 0 \ m then {m *s a + c .. m *s b + c} + else {m *s b + c .. m *s a + c}))" +proof(cases "m=0") + { fix x assume "x \ c" "c \ x" + hence "x=c" unfolding vector_less_eq_def and Cart_eq by(auto elim!: ballE) } + moreover case True + moreover have "c \ {m *s a + c..m *s b + c}" unfolding True by(auto simp add: vector_less_eq_def) + ultimately show ?thesis by auto +next + case False + { fix y assume "a \ y" "y \ b" "m > 0" + hence "m *s a + c \ m *s y + c" "m *s y + c \ m *s b + c" + unfolding vector_less_eq_def by(auto simp add: vector_smult_component vector_add_component) + } moreover + { fix y assume "a \ y" "y \ b" "m < 0" + hence "m *s b + c \ m *s y + c" "m *s y + c \ m *s a + c" + unfolding vector_less_eq_def by(auto simp add: vector_smult_component vector_add_component mult_left_mono_neg elim!:ballE) + } moreover + { fix y assume "m > 0" "m *s a + c \ y" "y \ m *s b + c" + hence "y \ (\x. m *s x + c) ` {a..b}" + unfolding image_iff Bex_def mem_interval vector_less_eq_def + apply(auto simp add: vector_smult_component vector_add_component vector_minus_component vector_smult_assoc pth_3[symmetric] + intro!: exI[where x="(1 / m) *s (y - c)"]) + by(auto elim!: ballE simp add: pos_le_divide_eq pos_divide_le_eq real_mult_commute) + } moreover + { fix y assume "m *s b + c \ y" "y \ m *s a + c" "m < 0" + hence "y \ (\x. m *s x + c) ` {a..b}" + unfolding image_iff Bex_def mem_interval vector_less_eq_def + apply(auto simp add: vector_smult_component vector_add_component vector_minus_component vector_smult_assoc pth_3[symmetric] + intro!: exI[where x="(1 / m) *s (y - c)"]) + by(auto elim!: ballE simp add: neg_le_divide_eq neg_divide_le_eq real_mult_commute) + } + ultimately show ?thesis using False by auto +qed + +subsection{* Banach fixed point theorem (not really topological...) *} + +lemma banach_fix: + assumes s:"complete s" "s \ {}" and c:"0 \ c" "c < 1" and f:"(f ` s) \ s" and + lipschitz:"\x\s. \y\s. dist (f x) (f y) \ c * dist x y" + shows "\! x\s. (f x = x)" +proof- + have "1 - c > 0" using c by auto + + from s(2) obtain z0 where "z0 \ s" by auto + def z \ "\ n::nat. fun_pow n f z0" + { fix n::nat + have "z n \ s" unfolding z_def + proof(induct n) case 0 thus ?case using `z0 \s` by auto + next case Suc thus ?case using f by auto qed } + note z_in_s = this + + def d \ "dist (z 0) (z 1)" + + have fzn:"\n. f (z n) = z (Suc n)" unfolding z_def by auto + { fix n::nat + have "dist (z n) (z (Suc n)) \ (c ^ n) * d" + proof(induct n) + case 0 thus ?case unfolding d_def by auto + next + case (Suc m) + hence "c * dist (z m) (z (Suc m)) \ c ^ Suc m * d" + using `0 \ c` using mult_mono1_class.mult_mono1[of "dist (z m) (z (Suc m))" "c ^ m * d" c] by auto + thus ?case using lipschitz[THEN bspec[where x="z m"], OF z_in_s, THEN bspec[where x="z (Suc m)"], OF z_in_s] + unfolding fzn and mult_le_cancel_left by auto + qed + } note cf_z = this + + { fix n m::nat + have "(1 - c) * dist (z m) (z (m+n)) \ (c ^ m) * d * (1 - c ^ n)" + proof(induct n) + case 0 show ?case by auto + next + case (Suc k) + have "(1 - c) * dist (z m) (z (m + Suc k)) \ (1 - c) * (dist (z m) (z (m + k)) + dist (z (m + k)) (z (Suc (m + k))))" + using dist_triangle and c by(auto simp add: dist_triangle) + also have "\ \ (1 - c) * (dist (z m) (z (m + k)) + c ^ (m + k) * d)" + using cf_z[of "m + k"] and c by auto + also have "\ \ c ^ m * d * (1 - c ^ k) + (1 - c) * c ^ (m + k) * d" + using Suc by (auto simp add: ring_simps) + also have "\ = (c ^ m) * (d * (1 - c ^ k) + (1 - c) * c ^ k * d)" + unfolding power_add by (auto simp add: ring_simps) + also have "\ \ (c ^ m) * d * (1 - c ^ Suc k)" + using c by (auto simp add: ring_simps dist_pos_le) + finally show ?case by auto + qed + } note cf_z2 = this + { fix e::real assume "e>0" + hence "\N. \m n. N \ m \ N \ n \ dist (z m) (z n) < e" + proof(cases "d = 0") + case True + hence "\n. z n = z0" using cf_z2[of 0] and c unfolding z_def by (auto simp add: pos_prod_le[OF `1 - c > 0`] dist_le_0) + thus ?thesis using `e>0` by auto + next + case False hence "d>0" unfolding d_def using dist_pos_le[of "z 0" "z 1"] + by (metis False d_def real_less_def) + hence "0 < e * (1 - c) / d" using `e>0` and `1-c>0` + using divide_pos_pos[of "e * (1 - c)" d] and mult_pos_pos[of e "1 - c"] by auto + then obtain N where N:"c ^ N < e * (1 - c) / d" using real_arch_pow_inv[of "e * (1 - c) / d" c] and c by auto + { fix m n::nat assume "m>n" and as:"m\N" "n\N" + have *:"c ^ n \ c ^ N" using `n\N` and c using power_decreasing[OF `n\N`, of c] by auto + have "1 - c ^ (m - n) > 0" using c and power_strict_mono[of c 1 "m - n"] using `m>n` by auto + hence **:"d * (1 - c ^ (m - n)) / (1 - c) > 0" + using real_mult_order[OF `d>0`, of "1 - c ^ (m - n)"] + using divide_pos_pos[of "d * (1 - c ^ (m - n))" "1 - c"] + using `0 < 1 - c` by auto + + have "dist (z m) (z n) \ c ^ n * d * (1 - c ^ (m - n)) / (1 - c)" + using cf_z2[of n "m - n"] and `m>n` unfolding pos_le_divide_eq[OF `1-c>0`] + by (auto simp add: real_mult_commute dist_sym) + also have "\ \ c ^ N * d * (1 - c ^ (m - n)) / (1 - c)" + using mult_right_mono[OF * order_less_imp_le[OF **]] + unfolding real_mult_assoc by auto + also have "\ < (e * (1 - c) / d) * d * (1 - c ^ (m - n)) / (1 - c)" + using mult_strict_right_mono[OF N **] unfolding real_mult_assoc by auto + also have "\ = e * (1 - c ^ (m - n))" using c and `d>0` and `1 - c > 0` by auto + also have "\ \ e" using c and `1 - c ^ (m - n) > 0` and `e>0` using mult_right_le_one_le[of e "1 - c ^ (m - n)"] by auto + finally have "dist (z m) (z n) < e" by auto + } note * = this + { fix m n::nat assume as:"N\m" "N\n" + hence "dist (z n) (z m) < e" + proof(cases "n = m") + case True thus ?thesis using `e>0` by auto + next + case False thus ?thesis using as and *[of n m] *[of m n] unfolding nat_neq_iff by (auto simp add: dist_sym) + qed } + thus ?thesis by auto + qed + } + hence "cauchy z" unfolding cauchy_def by auto + then obtain x where "x\s" and x:"(z ---> x) sequentially" using s(1)[unfolded compact_def complete_def, THEN spec[where x=z]] and z_in_s by auto + + def e \ "dist (f x) x" + have "e = 0" proof(rule ccontr) + assume "e \ 0" hence "e>0" unfolding e_def using dist_pos_le[of "f x" x] + by (metis dist_eq_0 dist_nz dist_sym e_def) + then obtain N where N:"\n\N. dist (z n) x < e / 2" + using x[unfolded Lim_sequentially, THEN spec[where x="e/2"]] by auto + hence N':"dist (z N) x < e / 2" by auto + + have *:"c * dist (z N) x \ dist (z N) x" unfolding mult_le_cancel_right2 + using dist_pos_le[of "z N" x] and c + by (metis dist_eq_0 dist_nz dist_sym order_less_asym real_less_def) + have "dist (f (z N)) (f x) \ c * dist (z N) x" using lipschitz[THEN bspec[where x="z N"], THEN bspec[where x=x]] + using z_in_s[of N] `x\s` using c by auto + also have "\ < e / 2" using N' and c using * by auto + finally show False unfolding fzn + using N[THEN spec[where x="Suc N"]] and dist_triangle_half_r[of "z (Suc N)" "f x" e x] + unfolding e_def by auto + qed + hence "f x = x" unfolding e_def and dist_eq_0 by auto + moreover + { fix y assume "f y = y" "y\s" + hence "dist x y \ c * dist x y" using lipschitz[THEN bspec[where x=x], THEN bspec[where x=y]] + using `x\s` and `f x = x` by auto + hence "dist x y = 0" unfolding mult_le_cancel_right1 + using c and dist_pos_le[of x y] by auto + hence "y = x" unfolding dist_eq_0 by auto + } + ultimately show ?thesis unfolding Bex1_def using `x\s` by blast+ +qed + +subsection{* Edelstein fixed point theorem. *} + +lemma edelstein_fix: + assumes s:"compact s" "s \ {}" and gs:"(g ` s) \ s" + and dist:"\x\s. \y\s. x \ y \ dist (g x) (g y) < dist x y" + shows "\! x::real^'a\s. g x = x" +proof(cases "\x\s. g x \ x") + obtain x where "x\s" using s(2) by auto + case False hence g:"\x\s. g x = x" by auto + { fix y assume "y\s" + hence "x = y" using `x\s` and dist[THEN bspec[where x=x], THEN bspec[where x=y]] + unfolding g[THEN bspec[where x=x], OF `x\s`] + unfolding g[THEN bspec[where x=y], OF `y\s`] by auto } + thus ?thesis unfolding Bex1_def using `x\s` and g by blast+ +next + case True + then obtain x where [simp]:"x\s" and "g x \ x" by auto + { fix x y assume "x \ s" "y \ s" + hence "dist (g x) (g y) \ dist x y" + using dist[THEN bspec[where x=x], THEN bspec[where x=y]] by auto } note dist' = this + def y \ "g x" + have [simp]:"y\s" unfolding y_def using gs[unfolded image_subset_iff] and `x\s` by blast + def f \ "\ n. fun_pow n g" + have [simp]:"\n z. g (f n z) = f (Suc n) z" unfolding f_def by auto + have [simp]:"\z. f 0 z = z" unfolding f_def by auto + { fix n::nat and z assume "z\s" + have "f n z \ s" unfolding f_def + proof(induct n) + case 0 thus ?case using `z\s` by simp + next + case (Suc n) thus ?case using gs[unfolded image_subset_iff] by auto + qed } note fs = this + { fix m n ::nat assume "m\n" + fix w z assume "w\s" "z\s" + have "dist (f n w) (f n z) \ dist (f m w) (f m z)" using `m\n` + proof(induct n) + case 0 thus ?case by auto + next + case (Suc n) + thus ?case proof(cases "m\n") + case True thus ?thesis using Suc(1) + using dist'[OF fs fs, OF `w\s` `z\s`, of n n] by auto + next + case False hence mn:"m = Suc n" using Suc(2) by simp + show ?thesis unfolding mn by auto + qed + qed } note distf = this + + def h \ "\n. pastecart (f n x) (f n y)" + let ?s2 = "{pastecart x y |x y. x \ s \ y \ s}" + obtain l r where "l\?s2" and r:"\m n. m < n \ r m < r n" and lr:"((h \ r) ---> l) sequentially" + using compact_pastecart[OF s(1) s(1), unfolded compact_def, THEN spec[where x=h]] unfolding h_def + using fs[OF `x\s`] and fs[OF `y\s`] by blast + def a \ "fstcart l" def b \ "sndcart l" + have lab:"l = pastecart a b" unfolding a_def b_def and pastecart_fst_snd by simp + have [simp]:"a\s" "b\s" unfolding a_def b_def using `l\?s2` by auto + + have "continuous_on UNIV fstcart" and "continuous_on UNIV sndcart" + using linear_continuous_on using linear_fstcart and linear_sndcart by auto + hence lima:"((fstcart \ (h \ r)) ---> a) sequentially" and limb:"((sndcart \ (h \ r)) ---> b) sequentially" + unfolding atomize_conj unfolding continuous_on_sequentially + apply(erule_tac x="h \ r" in allE) apply(erule_tac x="h \ r" in allE) using lr + unfolding o_def and h_def a_def b_def by auto + + { fix n::nat + have *:"\fx fy x y. dist fx fy \ dist x y \ \ (dist (fx - fy) (a - b) < dist a b - dist x y)" unfolding dist_def by norm + { fix x y ::"real^'a" + have "dist (-x) (-y) = dist x y" unfolding dist_def + using norm_minus_cancel[of "x - y"] by (auto simp add: uminus_add_conv_diff) } note ** = this + + { assume as:"dist a b > dist (f n x) (f n y)" + then obtain Na Nb where "\m\Na. dist (f (r m) x) a < (dist a b - dist (f n x) (f n y)) / 2" + and "\m\Nb. dist (f (r m) y) b < (dist a b - dist (f n x) (f n y)) / 2" + using lima limb unfolding h_def Lim_sequentially by (fastsimp simp del: Arith_Tools.less_divide_eq_number_of1) + hence "dist (f (r (Na + Nb + n)) x - f (r (Na + Nb + n)) y) (a - b) < dist a b - dist (f n x) (f n y)" + apply(erule_tac x="Na+Nb+n" in allE) + apply(erule_tac x="Na+Nb+n" in allE) apply simp + using dist_triangle_add_half[of a "f (r (Na + Nb + n)) x" "dist a b - dist (f n x) (f n y)" + "-b" "- f (r (Na + Nb + n)) y"] + unfolding ** unfolding group_simps(12) by (auto simp add: dist_sym) + moreover + have "dist (f (r (Na + Nb + n)) x - f (r (Na + Nb + n)) y) (a - b) \ dist a b - dist (f n x) (f n y)" + using distf[of n "r (Na+Nb+n)", OF _ `x\s` `y\s`] + using monotone_bigger[OF r, of "Na+Nb+n"] + using *[of "f (r (Na + Nb + n)) x" "f (r (Na + Nb + n)) y" "f n x" "f n y"] by auto + ultimately have False by simp + } + hence "dist a b \ dist (f n x) (f n y)" by(rule ccontr)auto } + note ab_fn = this + + have [simp]:"a = b" proof(rule ccontr) + def e \ "dist a b - dist (g a) (g b)" + assume "a\b" hence "e > 0" unfolding e_def using dist by fastsimp + hence "\n. dist (f n x) a < e/2 \ dist (f n y) b < e/2" + using lima limb unfolding Lim_sequentially + apply (auto elim!: allE[where x="e/2"]) apply(rule_tac x="r (max N Na)" in exI) unfolding h_def by fastsimp + then obtain n where n:"dist (f n x) a < e/2 \ dist (f n y) b < e/2" by auto + have "dist (f (Suc n) x) (g a) \ dist (f n x) a" + using dist[THEN bspec[where x="f n x"], THEN bspec[where x="a"]] and fs by auto + moreover have "dist (f (Suc n) y) (g b) \ dist (f n y) b" + using dist[THEN bspec[where x="f n y"], THEN bspec[where x="b"]] and fs by auto + ultimately have "dist (f (Suc n) x) (g a) + dist (f (Suc n) y) (g b) < e" using n by auto + thus False unfolding e_def using ab_fn[of "Suc n"] by norm + qed + + have [simp]:"\n. f (Suc n) x = f n y" unfolding f_def y_def by(induct_tac n)auto + { fix x y assume "x\s" "y\s" moreover + fix e::real assume "e>0" ultimately + have "dist y x < e \ dist (g y) (g x) < e" using dist by fastsimp } + hence "continuous_on s g" unfolding continuous_on_def by auto + + hence "((sndcart \ h \ r) ---> g a) sequentially" unfolding continuous_on_sequentially + apply (rule allE[where x="\n. (fstcart \ h \ r) n"]) apply (erule ballE[where x=a]) + using lima unfolding h_def o_def using fs[OF `x\s`] by (auto simp add: y_def) + hence "g a = a" using Lim_unique[OF trivial_limit_sequentially limb, of "g a"] + unfolding `a=b` and o_assoc by auto + moreover + { fix x assume "x\s" "g x = x" "x\a" + hence "False" using dist[THEN bspec[where x=a], THEN bspec[where x=x]] + using `g a = a` and `a\s` by auto } + ultimately show "\!x\s. g x = x" unfolding Bex1_def using `a\s` by blast +qed + +end \ No newline at end of file diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/List.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Nat.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/NatBin.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Nominal/Nominal.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Nominal/nominal_inductive.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Nominal/nominal_inductive2.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Nominal/nominal_primrec.ML --- a/src/HOL/Nominal/nominal_primrec.ML Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Nominal/nominal_primrec.ML Thu Mar 05 02:24:36 2009 +0100 @@ -374,7 +374,9 @@ in lthy'' |> LocalTheory.note Thm.theoremK ((qualify (Binding.name "simps"), - [Attrib.internal (K Simplifier.simp_add)]), maps snd simps') + map (Attrib.internal o K) + [Simplifier.simp_add, Nitpick_Const_Simp_Thms.add]), + maps snd simps') |> snd end) [goals] |> diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/NumberTheory/Chinese.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/NumberTheory/IntPrimes.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Option.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Option.thy Thu Mar 05 02:24:36 2009 +0100 @@ -0,0 +1,124 @@ +(* Title: HOL/Option.thy + Author: Folklore +*) + +header {* Datatype option *} + +theory Option +imports Datatype +begin + +datatype 'a option = None | Some 'a + +lemma not_None_eq [iff]: "(x ~= None) = (EX y. x = Some y)" + by (induct x) auto + +lemma not_Some_eq [iff]: "(ALL y. x ~= Some y) = (x = None)" + by (induct x) auto + +text{*Although it may appear that both of these equalities are helpful +only when applied to assumptions, in practice it seems better to give +them the uniform iff attribute. *} + +lemma option_caseE: + assumes c: "(case x of None => P | Some y => Q y)" + obtains + (None) "x = None" and P + | (Some) y where "x = Some y" and "Q y" + using c by (cases x) simp_all + +lemma insert_None_conv_UNIV: "insert None (range Some) = UNIV" + by (rule set_ext, case_tac x) auto + +lemma inj_Some [simp]: "inj_on Some A" + by (rule inj_onI) simp + + +subsubsection {* Operations *} + +primrec the :: "'a option => 'a" where +"the (Some x) = x" + +primrec set :: "'a option => 'a set" where +"set None = {}" | +"set (Some x) = {x}" + +lemma ospec [dest]: "(ALL x:set A. P x) ==> A = Some x ==> P x" + by simp + +declaration {* fn _ => + Classical.map_cs (fn cs => cs addSD2 ("ospec", thm "ospec")) +*} + +lemma elem_set [iff]: "(x : set xo) = (xo = Some x)" + by (cases xo) auto + +lemma set_empty_eq [simp]: "(set xo = {}) = (xo = None)" + by (cases xo) auto + +definition + map :: "('a \ 'b) \ 'a option \ 'b option" +where + [code del]: "map = (%f y. case y of None => None | Some x => Some (f x))" + +lemma option_map_None [simp, code]: "map f None = None" + by (simp add: map_def) + +lemma option_map_Some [simp, code]: "map f (Some x) = Some (f x)" + by (simp add: map_def) + +lemma option_map_is_None [iff]: + "(map f opt = None) = (opt = None)" + by (simp add: map_def split add: option.split) + +lemma option_map_eq_Some [iff]: + "(map f xo = Some y) = (EX z. xo = Some z & f z = y)" + by (simp add: map_def split add: option.split) + +lemma option_map_comp: + "map f (map g opt) = map (f o g) opt" + by (simp add: map_def split add: option.split) + +lemma option_map_o_sum_case [simp]: + "map f o sum_case g h = sum_case (map f o g) (map f o h)" + by (rule ext) (simp split: sum.split) + + +hide (open) const set map + +subsubsection {* Code generator setup *} + +definition + is_none :: "'a option \ bool" where + is_none_none [code post, symmetric, code inline]: "is_none x \ x = None" + +lemma is_none_code [code]: + shows "is_none None \ True" + and "is_none (Some x) \ False" + unfolding is_none_none [symmetric] by simp_all + +hide (open) const is_none + +code_type option + (SML "_ option") + (OCaml "_ option") + (Haskell "Maybe _") + +code_const None and Some + (SML "NONE" and "SOME") + (OCaml "None" and "Some _") + (Haskell "Nothing" and "Just") + +code_instance option :: eq + (Haskell -) + +code_const "eq_class.eq \ 'a\eq option \ 'a option \ bool" + (Haskell infixl 4 "==") + +code_reserved SML + option NONE SOME + +code_reserved OCaml + option None Some + +end diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Power.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Presburger.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/RComplete.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Rational.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/RealDef.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/RealVector.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Ring_and_Field.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/SEQ.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/SetInterval.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Statespace/state_space.ML --- a/src/HOL/Statespace/state_space.ML Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Statespace/state_space.ML Thu Mar 05 02:24:36 2009 +0100 @@ -611,7 +611,7 @@ Syntax.const "StateFun.lookup"$Syntax.free (project_name T)$Syntax.free n$s | NONE => if get_silent (Context.Proof ctxt) - then Syntax.const "StateFun.lookup"$Syntax.const "arbitrary"$Syntax.free n$s + then Syntax.const "StateFun.lookup" $ Syntax.const "undefined" $ Syntax.free n $ s else raise TERM ("StateSpace.gen_lookup_tr: component " ^ n ^ " not defined",[])); fun lookup_tr ctxt [s,Free (n,_)] = gen_lookup_tr ctxt s n; @@ -637,8 +637,8 @@ | NONE => if get_silent (Context.Proof ctxt) then Syntax.const "StateFun.update"$ - Syntax.const "arbitrary"$Syntax.const "arbitrary"$ - Syntax.free n$(Syntax.const KN $ v)$s + Syntax.const "undefined" $ Syntax.const "undefined" $ + Syntax.free n $ (Syntax.const KN $ v) $ s else raise TERM ("StateSpace.gen_update_tr: component " ^ n ^ " not defined",[])) end; diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/Qelim/presburger.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/atp_wrapper.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/datatype_codegen.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/datatype_package.ML --- a/src/HOL/Tools/datatype_package.ML Thu Mar 05 02:20:06 2009 +0100 +++ b/src/HOL/Tools/datatype_package.ML Thu Mar 05 02:24:36 2009 +0100 @@ -629,14 +629,6 @@ (** a datatype antiquotation **) -local - -val sym_datatype = Pretty.command "datatype"; -val sym_binder = Pretty.str "\\ {\\isacharequal}"; (*FIXME use proper symbol*) -val sym_sep = Pretty.str "{\\isacharbar}\\ "; - -in - fun args_datatype (ctxt, args) = let val (tyco, (ctxt', args')) = Args.tyname (ctxt, args); @@ -654,26 +646,19 @@ in if member (op =) s " " then Pretty.enclose "(" ")" [p] else p end; - fun pretty_constr (co, []) = - Syntax.pretty_term ctxt (Const (co, ty)) - | pretty_constr (co, [ty']) = - (Pretty.block o Pretty.breaks) - [Syntax.pretty_term ctxt (Const (co, ty' --> ty)), - pretty_typ_br ty'] - | pretty_constr (co, tys) = - (Pretty.block o Pretty.breaks) - (Syntax.pretty_term ctxt (Const (co, tys ---> ty)) :: - map pretty_typ_br tys); + fun pretty_constr (co, tys) = + (Pretty.block o Pretty.breaks) + (Syntax.pretty_term ctxt (Const (co, tys ---> ty)) :: + map pretty_typ_br tys); in Pretty.block - (sym_datatype :: Pretty.brk 1 :: + (Pretty.command "datatype" :: Pretty.brk 1 :: Syntax.pretty_typ ctxt ty :: - sym_binder :: Pretty.brk 1 :: - flat (separate [Pretty.brk 1, sym_sep] + Pretty.str " =" :: Pretty.brk 1 :: + flat (separate [Pretty.brk 1, Pretty.str "| "] (map (single o pretty_constr) cos))) end -end; (** package setup **) diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/inductive_package.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/inductive_set_package.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/int_factor_simprocs.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/refute.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/res_atp.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/res_clause.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/res_hol_clause.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/sat_solver.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Tools/specification_package.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Transitive_Closure.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOL/Word/Num_Lemmas.thy diff -r 61811c9224a6 -r dcf30c9861c3 src/HOLCF/Tools/domain/domain_axioms.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/HOLCF/Tools/fixrec_package.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Provers/blast.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/General/binding.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/General/name_space.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/IsaMakefile diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/ROOT.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/attrib.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/class.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/code.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/constdefs.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/element.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/expression.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/isar_cmd.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/isar_syn.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/local_defs.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/method.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/obtain.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/proof_context.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Isar/theory_target.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/ML/ml_thms.ML --- a/src/Pure/ML/ml_thms.ML Thu Mar 05 02:20:06 2009 +0100 +++ b/src/Pure/ML/ml_thms.ML Thu Mar 05 02:24:36 2009 +0100 @@ -48,25 +48,30 @@ (* ad-hoc goals *) +val and_ = Args.$$$ "and"; val by = Args.$$$ "by"; -val goal = Scan.unless (Scan.lift by) Args.prop; +val goal = Scan.unless (by || and_) Args.name; val _ = ML_Context.add_antiq "lemma" - (fn pos => Args.context -- Args.mode "open" -- Scan.repeat1 goal -- - Scan.lift (by |-- Method.parse -- Scan.option Method.parse) >> - (fn (((ctxt, is_open), props), methods) => fn {struct_name, background} => + (fn pos => Args.context -- Args.mode "open" -- + Scan.lift (OuterParse.and_list1 (Scan.repeat1 goal) -- + (by |-- Method.parse -- Scan.option Method.parse)) >> + (fn ((ctxt, is_open), (raw_propss, methods)) => fn {struct_name, background} => let + val propss = burrow (map (rpair []) o Syntax.read_props ctxt) raw_propss; val i = serial (); val prep_result = Goal.norm_result #> Thm.default_position pos #> not is_open ? Thm.close_derivation; - fun after_qed [res] goal_ctxt = - put_thms (i, map prep_result (ProofContext.export goal_ctxt ctxt res)) goal_ctxt; + fun after_qed res goal_ctxt = + put_thms (i, map prep_result (ProofContext.export goal_ctxt ctxt (flat res))) goal_ctxt; val ctxt' = ctxt - |> Proof.theorem_i NONE after_qed [map (rpair []) props] + |> Proof.theorem_i NONE after_qed propss |> Proof.global_terminal_proof methods; val (a, background') = background |> ML_Antiquote.variant "lemma" ||> put_thms (i, the_thms ctxt' i); - val ml = (thm_bind (if length props = 1 then "thm" else "thms") a i, struct_name ^ "." ^ a); + val ml = + (thm_bind (if length (flat propss) = 1 then "thm" else "thms") a i, + struct_name ^ "." ^ a); in (K ml, background') end)); end; diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Proof/reconstruct.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/Tools/ROOT.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/axclass.ML --- a/src/Pure/axclass.ML Thu Mar 05 02:20:06 2009 +0100 +++ b/src/Pure/axclass.ML Thu Mar 05 02:24:36 2009 +0100 @@ -234,7 +234,10 @@ val map_inst_params = AxClassData.map o apsnd o apsnd; fun get_inst_param thy (c, tyco) = - (the o Symtab.lookup ((the o Symtab.lookup (fst (get_inst_params thy))) c)) tyco; + case Symtab.lookup ((the_default Symtab.empty o Symtab.lookup (fst (get_inst_params thy))) c) tyco + of SOME c' => c' + | NONE => error ("No instance parameter for constant " ^ quote c + ^ " on type constructor " ^ quote tyco); fun add_inst_param (c, tyco) inst = (map_inst_params o apfst o Symtab.map_default (c, Symtab.empty)) (Symtab.update_new (tyco, inst)) diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/pure_thy.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/sign.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/sorts.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Pure/term.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Tools/auto_solve.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Tools/code/code_funcgr.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Tools/code/code_target.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/Tools/code/code_thingol.ML diff -r 61811c9224a6 -r dcf30c9861c3 src/ZF/Tools/inductive_package.ML