# HG changeset patch # User ballarin # Date 1272997822 -7200 # Node ID 8629ac3efb197cac7f9e641eb9b23805b873a415 # Parent aace7a9694103e637d3c077c36eb26f0d19e6214# Parent d65f07abfa7c512674fe898850a48d4d8fa98bcc Merged. diff -r aace7a969410 -r 8629ac3efb19 Admin/Mercurial/isabelle-style.diff --- a/Admin/Mercurial/isabelle-style.diff Tue May 04 19:57:55 2010 +0200 +++ b/Admin/Mercurial/isabelle-style.diff Tue May 04 20:30:22 2010 +0200 @@ -23,7 +23,16 @@ diff -u gitweb/map isabelle/map --- gitweb/map 2010-02-01 16:34:34.000000000 +0100 -+++ isabelle/map 2010-03-03 15:13:25.000000000 +0100 ++++ isabelle/map 2010-04-29 23:43:54.000000000 +0200 +@@ -78,7 +78,7 @@ + + + {author|user}@{rev} ++ title="{node|short}: {desc|escape}">{author|user}@{rev} + +
{linenumber}
+
{line|escape}
@@ -206,9 +206,10 @@ {date|age} @@ -36,3 +45,12 @@ {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag} +@@ -225,6 +226,7 @@ + {desc|strip|firstline|escape|nonempty} + + ++ {author|person} + + file | diff | annotate {rename%filelogrename} + ' +Only in isabelle/: map~ diff -r aace7a969410 -r 8629ac3efb19 Admin/isatest/isatest-stats --- a/Admin/isatest/isatest-stats Tue May 04 19:57:55 2010 +0200 +++ b/Admin/isatest/isatest-stats Tue May 04 20:30:22 2010 +0200 @@ -6,7 +6,7 @@ THIS=$(cd "$(dirname "$0")"; pwd -P) -PLATFORMS="at-poly at-poly-test at64-poly mac-poly-M4 mac-poly64-M4 mac-poly-M8 mac-poly64-M8 at-poly-5.1-para-e at64-poly-5.1-para at-mac-poly-5.1-para afp at-sml-dev sun-poly" +PLATFORMS="at-poly at-poly-test at64-poly cygwin-poly mac-poly-M4 mac-poly64-M4 mac-poly-M8 mac-poly64-M8 at-poly-5.1-para-e at64-poly-5.1-para at-mac-poly-5.1-para afp at-sml-dev sun-poly" ISABELLE_SESSIONS="\ HOL-Plain \ diff -r aace7a969410 -r 8629ac3efb19 CONTRIBUTORS --- a/CONTRIBUTORS Tue May 04 19:57:55 2010 +0200 +++ b/CONTRIBUTORS Tue May 04 20:30:22 2010 +0200 @@ -6,6 +6,14 @@ Contributions to this Isabelle version -------------------------------------- +* April 2010, Florian Haftmann, TUM + Reorganization of abstract algebra type classes. + +* April 2010, Florian Haftmann, TUM + Code generation for data representations involving invariants; + various collections avaiable in theories Fset, Dlist, RBT, + Mapping and AssocList. + Contributions to Isabelle2009-1 ------------------------------- diff -r aace7a969410 -r 8629ac3efb19 NEWS --- a/NEWS Tue May 04 19:57:55 2010 +0200 +++ b/NEWS Tue May 04 20:30:22 2010 +0200 @@ -64,6 +64,11 @@ * Type constructors admit general mixfix syntax, not just infix. +* Concrete syntax may be attached to local entities without a proof +body, too. This works via regular mixfix annotations for 'fix', +'def', 'obtain' etc. or via the explicit 'write' command, which is +similar to the 'notation' command in theory specifications. + * Use of cumulative prems via "!" in some proof methods has been discontinued (legacy feature). @@ -84,6 +89,17 @@ *** Pure *** +* Predicates of locales introduces by classes carry a mandatory "class" +prefix. INCOMPATIBILITY. + +* 'code_reflect' allows to incorporate generated ML code into +runtime environment; replaces immature code_datatype antiquotation. +INCOMPATIBILITY. + +* Empty class specifications observe default sort. INCOMPATIBILITY. + +* Old 'axclass' has been discontinued. Use 'class' instead. INCOMPATIBILITY. + * Code generator: simple concept for abstract datatypes obeying invariants. * Local theory specifications may depend on extra type variables that @@ -103,6 +119,9 @@ datatype constructors have been renamed from InfixName to Infix etc. Minor INCOMPATIBILITY. +* Command 'example_proof' opens an empty proof body. This allows to +experiment with Isar, without producing any persistent result. + * Commands 'type_notation' and 'no_type_notation' declare type syntax within a local theory context, with explicit checking of the constructors involved (in contrast to the raw 'syntax' versions). @@ -111,6 +130,9 @@ context -- without introducing dependencies on parameters or assumptions, which is not possible in Isabelle/Pure. +* Command 'defaultsort' is renamed to 'default_sort', it works within +a local theory context. Minor INCOMPATIBILITY. + * Proof terms: Type substitutions on proof constants now use canonical order of type variables. INCOMPATIBILITY: Tools working with proof terms may need to be adapted. @@ -118,12 +140,8 @@ *** HOL *** -* Abstract algebra: - * class division_by_zero includes division_ring; - * numerous lemmas have been ported from field to division_ring; - * dropped lemma eq_minus_self_iff which is a duplicate for equal_neg_zero. - - INCOMPATIBILITY. +* Theory 'Finite_Set': various folding_* locales facilitate the application +of the various fold combinators on finite sets. * Library theory 'RBT' renamed to 'RBT_Impl'; new library theory 'RBT' provides abstract red-black tree type which is backed by RBT_Impl @@ -153,16 +171,11 @@ INCOMPATIBILITY. * Some generic constants have been put to appropriate theories: - - less_eq, less: Orderings - zero, one, plus, minus, uminus, times, abs, sgn: Groups - inverse, divide: Rings - + * less_eq, less: Orderings + * zero, one, plus, minus, uminus, times, abs, sgn: Groups + * inverse, divide: Rings INCOMPATIBILITY. -* Class division_ring also requires proof of fact divide_inverse. However instantiation -of parameter divide has also been required previously. INCOMPATIBILITY. - * More consistent naming of type classes involving orderings (and lattices): lower_semilattice ~> semilattice_inf @@ -214,33 +227,18 @@ INCOMPATIBILITY. -* HOLogic.strip_psplit: types are returned in syntactic order, similar -to other strip and tuple operations. INCOMPATIBILITY. - -* Various old-style primrec specifications in the HOL theories have been -replaced by new-style primrec, especially in theory List. The corresponding -constants now have authentic syntax. INCOMPATIBILITY. - -* Reorganized theory Multiset: swapped notation of pointwise and multiset order: - * pointwise ordering is instance of class order with standard syntax <= and <; - * multiset ordering has syntax <=# and <#; partial order properties are provided - by means of interpretation with prefix multiset_order. -Less duplication, less historical organization of sections, -conversion from associations lists to multisets, rudimentary code generation. -Use insert_DiffM2 [symmetric] instead of elem_imp_eq_diff_union, if needed. -INCOMPATIBILITY. - -* Reorganized theory Sum_Type; Inl and Inr now have authentic syntax. -INCOMPATIBILITY. - -* Code generation: ML and OCaml code is decorated with signatures. - -* Theory Complete_Lattice: lemmas top_def and bot_def have been -replaced by the more convenient lemmas Inf_empty and Sup_empty. -Dropped lemmas Inf_insert_simp and Sup_insert_simp, which are subsumed -by Inf_insert and Sup_insert. Lemmas Inf_UNIV and Sup_UNIV replace -former Inf_Univ and Sup_Univ. Lemmas inf_top_right and sup_bot_right -subsume inf_top and sup_bot respectively. INCOMPATIBILITY. +* Refined field classes: + * classes division_ring_inverse_zero, field_inverse_zero, linordered_field_inverse_zero + include rule inverse 0 = 0 -- subsumes former division_by_zero class. + * numerous lemmas have been ported from field to division_ring; + INCOMPATIBILITY. + +* Refined algebra theorem collections: + * dropped theorem group group_simps, use algebra_simps instead; + * dropped theorem group ring_simps, use field_simps instead; + * proper theorem collection field_simps subsumes former theorem groups field_eq_simps and field_simps; + * dropped lemma eq_minus_self_iff which is a duplicate for equal_neg_zero. + INCOMPATIBILITY. * Theory Finite_Set and List: some lemmas have been generalized from sets to lattices: @@ -256,6 +254,27 @@ INTER_fold_inter ~> INFI_fold_inf UNION_fold_union ~> SUPR_fold_sup +* Theory Complete_Lattice: lemmas top_def and bot_def have been +replaced by the more convenient lemmas Inf_empty and Sup_empty. +Dropped lemmas Inf_insert_simp and Sup_insert_simp, which are subsumed +by Inf_insert and Sup_insert. Lemmas Inf_UNIV and Sup_UNIV replace +former Inf_Univ and Sup_Univ. Lemmas inf_top_right and sup_bot_right +subsume inf_top and sup_bot respectively. INCOMPATIBILITY. + +* HOLogic.strip_psplit: types are returned in syntactic order, similar +to other strip and tuple operations. INCOMPATIBILITY. + +* Reorganized theory Multiset: swapped notation of pointwise and multiset order: + * pointwise ordering is instance of class order with standard syntax <= and <; + * multiset ordering has syntax <=# and <#; partial order properties are provided + by means of interpretation with prefix multiset_order; + * less duplication, less historical organization of sections, + conversion from associations lists to multisets, rudimentary code generation; + * use insert_DiffM2 [symmetric] instead of elem_imp_eq_diff_union, if needed. + INCOMPATIBILITY. + +* Code generation: ML and OCaml code is decorated with signatures. + * Theory List: added transpose. * Renamed Library/Quotient.thy to Library/Quotient_Type.thy to avoid @@ -298,6 +317,14 @@ *** ML *** +* Sorts.certify_sort and derived "cert" operations for types and terms +no longer minimize sorts. Thus certification at the boundary of the +inference kernel becomes invariant under addition of class relations, +which is an important monotonicity principle. Sorts are now minimized +in the syntax layer only, at the boundary between the end-user and the +system. Subtle INCOMPATIBILITY, may have to use Sign.minimize_sort +explicitly in rare situations. + * Antiquotations for basic formal entities: @{class NAME} -- type class @@ -341,6 +368,12 @@ * Configuration options now admit dynamic default values, depending on the context or even global references. +* Most operations that refer to a global context are named +accordingly, e.g. Simplifier.global_context or +ProofContext.init_global. There are some situations where a global +context actually works, but under normal circumstances one needs to +pass the proper local context through the code! + *** System *** diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarImplementation/Thy/Logic.thy --- a/doc-src/IsarImplementation/Thy/Logic.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarImplementation/Thy/Logic.thy Tue May 04 20:30:22 2010 +0200 @@ -128,8 +128,7 @@ @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\ @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\ @{index_ML Sign.add_types: "(binding * int * mixfix) list -> theory -> theory"} \\ - @{index_ML Sign.add_tyabbrs_i: " - (binding * string list * typ * mixfix) list -> theory -> theory"} \\ + @{index_ML Sign.add_type_abbrev: "binding * string list * typ -> theory -> theory"} \\ @{index_ML Sign.primitive_class: "binding * class list -> theory -> theory"} \\ @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\ @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\ @@ -168,9 +167,9 @@ type constructors @{text "\"} with @{text "k"} arguments and optional mixfix syntax. - \item @{ML Sign.add_tyabbrs_i}~@{text "[(\, \<^vec>\, \, mx), \]"} - defines a new type abbreviation @{text "(\<^vec>\)\ = \"} with - optional mixfix syntax. + \item @{ML Sign.add_type_abbrev}~@{text "(\, \<^vec>\, + \)"} defines a new type abbreviation @{text + "(\<^vec>\)\ = \"}. \item @{ML Sign.primitive_class}~@{text "(c, [c\<^isub>1, \, c\<^isub>n])"} declares a new class @{text "c"}, together with class diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarImplementation/Thy/Prelim.thy --- a/doc-src/IsarImplementation/Thy/Prelim.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarImplementation/Thy/Prelim.thy Tue May 04 20:30:22 2010 +0200 @@ -243,7 +243,7 @@ text %mlref {* \begin{mldecls} @{index_ML_type Proof.context} \\ - @{index_ML ProofContext.init: "theory -> Proof.context"} \\ + @{index_ML ProofContext.init_global: "theory -> Proof.context"} \\ @{index_ML ProofContext.theory_of: "Proof.context -> theory"} \\ @{index_ML ProofContext.transfer: "theory -> Proof.context -> Proof.context"} \\ \end{mldecls} @@ -254,7 +254,7 @@ of this type are essentially pure values, with a sliding reference to the background theory. - \item @{ML ProofContext.init}~@{text "thy"} produces a proof context + \item @{ML ProofContext.init_global}~@{text "thy"} produces a proof context derived from @{text "thy"}, initializing all data. \item @{ML ProofContext.theory_of}~@{text "ctxt"} selects the @@ -305,7 +305,7 @@ \item @{ML Context.proof_of}~@{text "context"} always produces a proof context from the generic @{text "context"}, using @{ML - "ProofContext.init"} as required (note that this re-initializes the + "ProofContext.init_global"} as required (note that this re-initializes the context data with each invocation). \end{description} diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarImplementation/Thy/document/Logic.tex --- a/doc-src/IsarImplementation/Thy/document/Logic.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarImplementation/Thy/document/Logic.tex Tue May 04 20:30:22 2010 +0200 @@ -139,8 +139,7 @@ \indexdef{}{ML}{Sign.subsort}\verb|Sign.subsort: theory -> sort * sort -> bool| \\ \indexdef{}{ML}{Sign.of\_sort}\verb|Sign.of_sort: theory -> typ * sort -> bool| \\ \indexdef{}{ML}{Sign.add\_types}\verb|Sign.add_types: (binding * int * mixfix) list -> theory -> theory| \\ - \indexdef{}{ML}{Sign.add\_tyabbrs\_i}\verb|Sign.add_tyabbrs_i: |\isasep\isanewline% -\verb| (binding * string list * typ * mixfix) list -> theory -> theory| \\ + \indexdef{}{ML}{Sign.add\_type\_abbrev}\verb|Sign.add_type_abbrev: binding * string list * typ -> theory -> theory| \\ \indexdef{}{ML}{Sign.primitive\_class}\verb|Sign.primitive_class: binding * class list -> theory -> theory| \\ \indexdef{}{ML}{Sign.primitive\_classrel}\verb|Sign.primitive_classrel: class * class -> theory -> theory| \\ \indexdef{}{ML}{Sign.primitive\_arity}\verb|Sign.primitive_arity: arity -> theory -> theory| \\ @@ -176,9 +175,7 @@ type constructors \isa{{\isasymkappa}} with \isa{k} arguments and optional mixfix syntax. - \item \verb|Sign.add_tyabbrs_i|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec {\isasymalpha}{\isacharcomma}\ {\isasymtau}{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} - defines a new type abbreviation \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} with - optional mixfix syntax. + \item \verb|Sign.add_type_abbrev|~\isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec {\isasymalpha}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} defines a new type abbreviation \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}}. \item \verb|Sign.primitive_class|~\isa{{\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub n{\isacharbrackright}{\isacharparenright}} declares a new class \isa{c}, together with class relations \isa{c\ {\isasymsubseteq}\ c\isactrlisub i}, for \isa{i\ {\isacharequal}\ {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ n}. diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarImplementation/Thy/document/Prelim.tex --- a/doc-src/IsarImplementation/Thy/document/Prelim.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarImplementation/Thy/document/Prelim.tex Tue May 04 20:30:22 2010 +0200 @@ -282,7 +282,7 @@ \begin{isamarkuptext}% \begin{mldecls} \indexdef{}{ML type}{Proof.context}\verb|type Proof.context| \\ - \indexdef{}{ML}{ProofContext.init}\verb|ProofContext.init: theory -> Proof.context| \\ + \indexdef{}{ML}{ProofContext.init\_global}\verb|ProofContext.init_global: theory -> Proof.context| \\ \indexdef{}{ML}{ProofContext.theory\_of}\verb|ProofContext.theory_of: Proof.context -> theory| \\ \indexdef{}{ML}{ProofContext.transfer}\verb|ProofContext.transfer: theory -> Proof.context -> Proof.context| \\ \end{mldecls} @@ -293,7 +293,7 @@ of this type are essentially pure values, with a sliding reference to the background theory. - \item \verb|ProofContext.init|~\isa{thy} produces a proof context + \item \verb|ProofContext.init_global|~\isa{thy} produces a proof context derived from \isa{thy}, initializing all data. \item \verb|ProofContext.theory_of|~\isa{ctxt} selects the @@ -355,7 +355,7 @@ theory from the generic \isa{context}, using \verb|ProofContext.theory_of| as required. \item \verb|Context.proof_of|~\isa{context} always produces a - proof context from the generic \isa{context}, using \verb|ProofContext.init| as required (note that this re-initializes the + proof context from the generic \isa{context}, using \verb|ProofContext.init_global| as required (note that this re-initializes the context data with each invocation). \end{description}% diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/Framework.thy --- a/doc-src/IsarRef/Thy/Framework.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/Framework.thy Tue May 04 20:30:22 2010 +0200 @@ -79,8 +79,7 @@ text_raw {*\medskip\begin{minipage}{0.6\textwidth}*} (*<*) -lemma True -proof +example_proof (*>*) assume "x \ A" and "x \ B" then have "x \ A \ B" .. @@ -107,8 +106,7 @@ *} (*<*) -lemma True -proof +example_proof (*>*) assume "x \ A" and "x \ B" then have "x \ A \ B" by (rule IntI) @@ -130,8 +128,7 @@ text_raw {*\medskip\begin{minipage}{0.6\textwidth}*} (*<*) -lemma True -proof +example_proof (*>*) have "x \ \\" proof @@ -178,8 +175,7 @@ text_raw {*\medskip\begin{minipage}{0.6\textwidth}*} (*<*) -lemma True -proof +example_proof (*>*) assume "x \ \\" then have C @@ -212,8 +208,7 @@ *} (*<*) -lemma True -proof +example_proof (*>*) assume "x \ \\" then obtain A where "x \ A" and "A \ \" .. @@ -817,8 +812,7 @@ *} text_raw {* \begingroup\footnotesize *} -(*<*)lemma True -proof +(*<*)example_proof (*>*) txt_raw {* \begin{minipage}[t]{0.18\textwidth} *} have "A \ B" @@ -877,8 +871,7 @@ text_raw {*\begin{minipage}{0.5\textwidth}*} (*<*) -lemma True -proof +example_proof (*>*) have "\x y. A x \ B y \ C x y" proof - @@ -987,8 +980,7 @@ *} (*<*) -lemma True -proof +example_proof (*>*) have "a = b" sorry also have "\ = c" sorry diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/HOL_Specific.thy --- a/doc-src/IsarRef/Thy/HOL_Specific.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/HOL_Specific.thy Tue May 04 20:30:22 2010 +0200 @@ -897,98 +897,6 @@ *} -section {* Invoking automated reasoning tools --- The Sledgehammer *} - -text {* - Isabelle/HOL includes a generic \emph{ATP manager} that allows - external automated reasoning tools to crunch a pending goal. - Supported provers include E\footnote{\url{http://www.eprover.org}}, - SPASS\footnote{\url{http://www.spass-prover.org/}}, and Vampire. - There is also a wrapper to invoke provers remotely via the - SystemOnTPTP\footnote{\url{http://www.cs.miami.edu/~tptp/cgi-bin/SystemOnTPTP}} - web service. - - The problem passed to external provers consists of the goal together - with a smart selection of lemmas from the current theory context. - The result of a successful proof search is some source text that - usually reconstructs the proof within Isabelle, without requiring - external provers again. The Metis - prover\footnote{\url{http://www.gilith.com/software/metis/}} that is - integrated into Isabelle/HOL is being used here. - - In this mode of operation, heavy means of automated reasoning are - used as a strong relevance filter, while the main proof checking - works via explicit inferences going through the Isabelle kernel. - Moreover, rechecking Isabelle proof texts with already specified - auxiliary facts is much faster than performing fully automated - search over and over again. - - \begin{matharray}{rcl} - @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \"} \\ - @{command_def (HOL) "print_atps"}@{text "\<^sup>*"} & : & @{text "context \"} \\ - @{command_def (HOL) "atp_info"}@{text "\<^sup>*"} & : & @{text "any \"} \\ - @{command_def (HOL) "atp_kill"}@{text "\<^sup>*"} & : & @{text "any \"} \\ - @{command_def (HOL) "atp_messages"}@{text "\<^sup>*"} & : & @{text "any \"} \\ - @{method_def (HOL) metis} & : & @{text method} \\ - \end{matharray} - - \begin{rail} - 'sledgehammer' ( nameref * ) - ; - 'atp\_messages' ('(' nat ')')? - ; - - 'metis' thmrefs - ; - \end{rail} - - \begin{description} - - \item @{command (HOL) sledgehammer}~@{text "prover\<^sub>1 \ prover\<^sub>n"} - invokes the specified automated theorem provers on the first - subgoal. Provers are run in parallel, the first successful result - is displayed, and the other attempts are terminated. - - Provers are defined in the theory context, see also @{command (HOL) - print_atps}. If no provers are given as arguments to @{command - (HOL) sledgehammer}, the system refers to the default defined as - ``ATP provers'' preference by the user interface. - - There are additional preferences for timeout (default: 60 seconds), - and the maximum number of independent prover processes (default: 5); - excessive provers are automatically terminated. - - \item @{command (HOL) print_atps} prints the list of automated - theorem provers available to the @{command (HOL) sledgehammer} - command. - - \item @{command (HOL) atp_info} prints information about presently - running provers, including elapsed runtime, and the remaining time - until timeout. - - \item @{command (HOL) atp_kill} terminates all presently running - provers. - - \item @{command (HOL) atp_messages} displays recent messages issued - by automated theorem provers. This allows to examine results that - might have got lost due to the asynchronous nature of default - @{command (HOL) sledgehammer} output. An optional message limit may - be specified (default 5). - - \item @{method (HOL) metis}~@{text "facts"} invokes the Metis prover - with the given facts. Metis is an automated proof tool of medium - strength, but is fully integrated into Isabelle/HOL, with explicit - inferences going through the kernel. Thus its results are - guaranteed to be ``correct by construction''. - - Note that all facts used with Metis need to be specified as explicit - arguments. There are no rule declarations as for other Isabelle - provers, like @{method blast} or @{method fast}. - - \end{description} -*} - - section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *} text {* diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/Inner_Syntax.thy --- a/doc-src/IsarRef/Thy/Inner_Syntax.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/Inner_Syntax.thy Tue May 04 20:30:22 2010 +0200 @@ -365,6 +365,7 @@ @{command_def "no_type_notation"} & : & @{text "local_theory \ local_theory"} \\ @{command_def "notation"} & : & @{text "local_theory \ local_theory"} \\ @{command_def "no_notation"} & : & @{text "local_theory \ local_theory"} \\ + @{command_def "write"} & : & @{text "proof(state) \ proof(state)"} \\ \end{matharray} \begin{rail} @@ -372,6 +373,8 @@ ; ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and') ; + 'write' mode? (nameref structmixfix + 'and') + ; \end{rail} \begin{description} @@ -392,12 +395,14 @@ but removes the specified syntax annotation from the present context. + \item @{command "write"} is similar to @{command "notation"}, but + works within an Isar proof body. + \end{description} - Compared to the underlying @{command "syntax"} and @{command - "no_syntax"} primitives (\secref{sec:syn-trans}), the above commands - provide explicit checking wrt.\ the logical context, and work within - general local theory targets, not just the global theory. + Note that the more primitive commands @{command "syntax"} and + @{command "no_syntax"} (\secref{sec:syn-trans}) provide raw access + to the syntax tables of a global theory. *} diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/Proof.thy --- a/doc-src/IsarRef/Thy/Proof.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/Proof.thy Tue May 04 20:30:22 2010 +0200 @@ -46,6 +46,28 @@ section {* Proof structure *} +subsection {* Example proofs *} + +text {* + \begin{matharray}{rcl} + @{command_def "example_proof"} & : & @{text "local_theory \ proof(state)"} \\ + \end{matharray} + + \begin{description} + + \item @{command "example_proof"} opens an empty proof body. This + allows to experiment with Isar, without producing any persistent + result. + + Structurally, this is like a vacous @{command "lemma"} statement + followed by ``@{command "proof"}~@{text "-"}'', which means the + example proof may be closed by a regular @{command "qed"}, or + discontinued by @{command "oops"}. + + \end{description} +*} + + subsection {* Blocks *} text {* diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/Spec.thy --- a/doc-src/IsarRef/Thy/Spec.thy Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/Spec.thy Tue May 04 20:30:22 2010 +0200 @@ -902,7 +902,7 @@ \begin{matharray}{rcll} @{command_def "classes"} & : & @{text "theory \ theory"} \\ @{command_def "classrel"} & : & @{text "theory \ theory"} & (axiomatic!) \\ - @{command_def "defaultsort"} & : & @{text "theory \ theory"} \\ + @{command_def "default_sort"} & : & @{text "local_theory \ local_theory"} \\ @{command_def "class_deps"}@{text "\<^sup>*"} & : & @{text "context \"} \\ \end{matharray} @@ -911,7 +911,7 @@ ; 'classrel' (nameref ('<' | subseteq) nameref + 'and') ; - 'defaultsort' sort + 'default\_sort' sort ; \end{rail} @@ -929,7 +929,7 @@ (see \secref{sec:class}) provide a way to introduce proven class relations. - \item @{command "defaultsort"}~@{text s} makes sort @{text s} the + \item @{command "default_sort"}~@{text s} makes sort @{text s} the new default sort for any type variable that is given explicitly in the text, but lacks a sort constraint (wrt.\ the current context). Type variables generated by type inference are not affected. diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/document/Framework.tex --- a/doc-src/IsarRef/Thy/document/Framework.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/document/Framework.tex Tue May 04 20:30:22 2010 +0200 @@ -97,11 +97,11 @@ \medskip\begin{minipage}{0.6\textwidth} % \isadelimproof -% +\ \ \ \ % \endisadelimproof % \isatagproof -\ \ \ \ \isacommand{assume}\isamarkupfalse% +\isacommand{assume}\isamarkupfalse% \ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}x\ {\isasymin}\ B{\isachardoublequoteclose}\isanewline \ \ \ \ \isacommand{then}\isamarkupfalse% \ \isacommand{have}\isamarkupfalse% @@ -135,11 +135,11 @@ \isamarkuptrue% % \isadelimproof -% +\ \ \ \ % \endisadelimproof % \isatagproof -\ \ \ \ \isacommand{assume}\isamarkupfalse% +\isacommand{assume}\isamarkupfalse% \ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}x\ {\isasymin}\ B{\isachardoublequoteclose}\isanewline \ \ \ \ \isacommand{then}\isamarkupfalse% \ \isacommand{have}\isamarkupfalse% @@ -166,11 +166,11 @@ \medskip\begin{minipage}{0.6\textwidth} % \isadelimproof -% +\ \ \ \ % \endisadelimproof % \isatagproof -\ \ \ \ \isacommand{have}\isamarkupfalse% +\isacommand{have}\isamarkupfalse% \ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymInter}{\isasymA}{\isachardoublequoteclose}\isanewline \ \ \ \ \isacommand{proof}\isamarkupfalse% \isanewline @@ -198,9 +198,9 @@ {\isafoldnoproof}% % \isadelimnoproof -\isanewline % \endisadelimnoproof +\isanewline % \isadelimproof \ \ \ \ % @@ -251,11 +251,11 @@ \medskip\begin{minipage}{0.6\textwidth} % \isadelimproof -% +\ \ \ \ % \endisadelimproof % \isatagproof -\ \ \ \ \isacommand{assume}\isamarkupfalse% +\isacommand{assume}\isamarkupfalse% \ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequoteclose}\isanewline \ \ \ \ \isacommand{then}\isamarkupfalse% \ \isacommand{have}\isamarkupfalse% @@ -286,9 +286,9 @@ {\isafoldnoproof}% % \isadelimnoproof -\isanewline % \endisadelimnoproof +\isanewline % \isadelimproof \ \ \ \ % @@ -326,11 +326,11 @@ \isamarkuptrue% % \isadelimproof -% +\ \ \ \ % \endisadelimproof % \isatagproof -\ \ \ \ \isacommand{assume}\isamarkupfalse% +\isacommand{assume}\isamarkupfalse% \ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequoteclose}\isanewline \ \ \ \ \isacommand{then}\isamarkupfalse% \ \isacommand{obtain}\isamarkupfalse% @@ -1186,9 +1186,9 @@ {\isafoldproof}% % \isadelimproof -\isanewline % \endisadelimproof +\isanewline % \isadelimnoproof \ \ \ \ \ \ % @@ -1201,9 +1201,9 @@ {\isafoldnoproof}% % \isadelimnoproof -\isanewline % \endisadelimnoproof +\isanewline % \isadelimproof \ \ % @@ -1268,11 +1268,11 @@ \begin{minipage}{0.5\textwidth} % \isadelimproof -% +\ \ % \endisadelimproof % \isatagproof -\ \ \isacommand{have}\isamarkupfalse% +\isacommand{have}\isamarkupfalse% \ {\isachardoublequoteopen}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ C\ x\ y{\isachardoublequoteclose}\isanewline \ \ \isacommand{proof}\isamarkupfalse% \ {\isacharminus}\isanewline @@ -1300,9 +1300,9 @@ {\isafoldnoproof}% % \isadelimnoproof -\isanewline % \endisadelimnoproof +\isanewline % \isadelimproof \ \ % @@ -1342,9 +1342,9 @@ {\isafoldnoproof}% % \isadelimnoproof -\isanewline % \endisadelimnoproof +\isanewline % \isadelimproof \ \ % @@ -1456,11 +1456,11 @@ \isamarkuptrue% % \isadelimproof -% +\ \ % \endisadelimproof % \isatagproof -\ \ \isacommand{have}\isamarkupfalse% +\isacommand{have}\isamarkupfalse% \ {\isachardoublequoteopen}a\ {\isacharequal}\ b{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse% \isanewline \ \ \isacommand{also}\isamarkupfalse% diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/document/HOL_Specific.tex --- a/doc-src/IsarRef/Thy/document/HOL_Specific.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/document/HOL_Specific.tex Tue May 04 20:30:22 2010 +0200 @@ -915,98 +915,6 @@ \end{isamarkuptext}% \isamarkuptrue% % -\isamarkupsection{Invoking automated reasoning tools --- The Sledgehammer% -} -\isamarkuptrue% -% -\begin{isamarkuptext}% -Isabelle/HOL includes a generic \emph{ATP manager} that allows - external automated reasoning tools to crunch a pending goal. - Supported provers include E\footnote{\url{http://www.eprover.org}}, - SPASS\footnote{\url{http://www.spass-prover.org/}}, and Vampire. - There is also a wrapper to invoke provers remotely via the - SystemOnTPTP\footnote{\url{http://www.cs.miami.edu/~tptp/cgi-bin/SystemOnTPTP}} - web service. - - The problem passed to external provers consists of the goal together - with a smart selection of lemmas from the current theory context. - The result of a successful proof search is some source text that - usually reconstructs the proof within Isabelle, without requiring - external provers again. The Metis - prover\footnote{\url{http://www.gilith.com/software/metis/}} that is - integrated into Isabelle/HOL is being used here. - - In this mode of operation, heavy means of automated reasoning are - used as a strong relevance filter, while the main proof checking - works via explicit inferences going through the Isabelle kernel. - Moreover, rechecking Isabelle proof texts with already specified - auxiliary facts is much faster than performing fully automated - search over and over again. - - \begin{matharray}{rcl} - \indexdef{HOL}{command}{sledgehammer}\hypertarget{command.HOL.sledgehammer}{\hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}proof\ {\isasymrightarrow}{\isachardoublequote}} \\ - \indexdef{HOL}{command}{print\_atps}\hypertarget{command.HOL.print-atps}{\hyperlink{command.HOL.print-atps}{\mbox{\isa{\isacommand{print{\isacharunderscore}atps}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}context\ {\isasymrightarrow}{\isachardoublequote}} \\ - \indexdef{HOL}{command}{atp\_info}\hypertarget{command.HOL.atp-info}{\hyperlink{command.HOL.atp-info}{\mbox{\isa{\isacommand{atp{\isacharunderscore}info}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ - \indexdef{HOL}{command}{atp\_kill}\hypertarget{command.HOL.atp-kill}{\hyperlink{command.HOL.atp-kill}{\mbox{\isa{\isacommand{atp{\isacharunderscore}kill}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ - \indexdef{HOL}{command}{atp\_messages}\hypertarget{command.HOL.atp-messages}{\hyperlink{command.HOL.atp-messages}{\mbox{\isa{\isacommand{atp{\isacharunderscore}messages}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}any\ {\isasymrightarrow}{\isachardoublequote}} \\ - \indexdef{HOL}{method}{metis}\hypertarget{method.HOL.metis}{\hyperlink{method.HOL.metis}{\mbox{\isa{metis}}}} & : & \isa{method} \\ - \end{matharray} - - \begin{rail} - 'sledgehammer' ( nameref * ) - ; - 'atp\_messages' ('(' nat ')')? - ; - - 'metis' thmrefs - ; - \end{rail} - - \begin{description} - - \item \hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}}~\isa{{\isachardoublequote}prover\isactrlsub {\isadigit{1}}\ {\isasymdots}\ prover\isactrlsub n{\isachardoublequote}} - invokes the specified automated theorem provers on the first - subgoal. Provers are run in parallel, the first successful result - is displayed, and the other attempts are terminated. - - Provers are defined in the theory context, see also \hyperlink{command.HOL.print-atps}{\mbox{\isa{\isacommand{print{\isacharunderscore}atps}}}}. If no provers are given as arguments to \hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}}, the system refers to the default defined as - ``ATP provers'' preference by the user interface. - - There are additional preferences for timeout (default: 60 seconds), - and the maximum number of independent prover processes (default: 5); - excessive provers are automatically terminated. - - \item \hyperlink{command.HOL.print-atps}{\mbox{\isa{\isacommand{print{\isacharunderscore}atps}}}} prints the list of automated - theorem provers available to the \hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}} - command. - - \item \hyperlink{command.HOL.atp-info}{\mbox{\isa{\isacommand{atp{\isacharunderscore}info}}}} prints information about presently - running provers, including elapsed runtime, and the remaining time - until timeout. - - \item \hyperlink{command.HOL.atp-kill}{\mbox{\isa{\isacommand{atp{\isacharunderscore}kill}}}} terminates all presently running - provers. - - \item \hyperlink{command.HOL.atp-messages}{\mbox{\isa{\isacommand{atp{\isacharunderscore}messages}}}} displays recent messages issued - by automated theorem provers. This allows to examine results that - might have got lost due to the asynchronous nature of default - \hyperlink{command.HOL.sledgehammer}{\mbox{\isa{\isacommand{sledgehammer}}}} output. An optional message limit may - be specified (default 5). - - \item \hyperlink{method.HOL.metis}{\mbox{\isa{metis}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} invokes the Metis prover - with the given facts. Metis is an automated proof tool of medium - strength, but is fully integrated into Isabelle/HOL, with explicit - inferences going through the kernel. Thus its results are - guaranteed to be ``correct by construction''. - - Note that all facts used with Metis need to be specified as explicit - arguments. There are no rule declarations as for other Isabelle - provers, like \hyperlink{method.blast}{\mbox{\isa{blast}}} or \hyperlink{method.fast}{\mbox{\isa{fast}}}. - - \end{description}% -\end{isamarkuptext}% -\isamarkuptrue% -% \isamarkupsection{Unstructured case analysis and induction \label{sec:hol-induct-tac}% } \isamarkuptrue% diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/document/Inner_Syntax.tex --- a/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Tue May 04 20:30:22 2010 +0200 @@ -388,6 +388,7 @@ \indexdef{}{command}{no\_type\_notation}\hypertarget{command.no-type-notation}{\hyperlink{command.no-type-notation}{\mbox{\isa{\isacommand{no{\isacharunderscore}type{\isacharunderscore}notation}}}}} & : & \isa{{\isachardoublequote}local{\isacharunderscore}theory\ {\isasymrightarrow}\ local{\isacharunderscore}theory{\isachardoublequote}} \\ \indexdef{}{command}{notation}\hypertarget{command.notation}{\hyperlink{command.notation}{\mbox{\isa{\isacommand{notation}}}}} & : & \isa{{\isachardoublequote}local{\isacharunderscore}theory\ {\isasymrightarrow}\ local{\isacharunderscore}theory{\isachardoublequote}} \\ \indexdef{}{command}{no\_notation}\hypertarget{command.no-notation}{\hyperlink{command.no-notation}{\mbox{\isa{\isacommand{no{\isacharunderscore}notation}}}}} & : & \isa{{\isachardoublequote}local{\isacharunderscore}theory\ {\isasymrightarrow}\ local{\isacharunderscore}theory{\isachardoublequote}} \\ + \indexdef{}{command}{write}\hypertarget{command.write}{\hyperlink{command.write}{\mbox{\isa{\isacommand{write}}}}} & : & \isa{{\isachardoublequote}proof{\isacharparenleft}state{\isacharparenright}\ {\isasymrightarrow}\ proof{\isacharparenleft}state{\isacharparenright}{\isachardoublequote}} \\ \end{matharray} \begin{rail} @@ -395,6 +396,8 @@ ; ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and') ; + 'write' mode? (nameref structmixfix + 'and') + ; \end{rail} \begin{description} @@ -414,11 +417,14 @@ but removes the specified syntax annotation from the present context. + \item \hyperlink{command.write}{\mbox{\isa{\isacommand{write}}}} is similar to \hyperlink{command.notation}{\mbox{\isa{\isacommand{notation}}}}, but + works within an Isar proof body. + \end{description} - Compared to the underlying \hyperlink{command.syntax}{\mbox{\isa{\isacommand{syntax}}}} and \hyperlink{command.no-syntax}{\mbox{\isa{\isacommand{no{\isacharunderscore}syntax}}}} primitives (\secref{sec:syn-trans}), the above commands - provide explicit checking wrt.\ the logical context, and work within - general local theory targets, not just the global theory.% + Note that the more primitive commands \hyperlink{command.syntax}{\mbox{\isa{\isacommand{syntax}}}} and + \hyperlink{command.no-syntax}{\mbox{\isa{\isacommand{no{\isacharunderscore}syntax}}}} (\secref{sec:syn-trans}) provide raw access + to the syntax tables of a global theory.% \end{isamarkuptext}% \isamarkuptrue% % diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/document/Proof.tex --- a/doc-src/IsarRef/Thy/document/Proof.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/document/Proof.tex Tue May 04 20:30:22 2010 +0200 @@ -65,6 +65,30 @@ } \isamarkuptrue% % +\isamarkupsubsection{Example proofs% +} +\isamarkuptrue% +% +\begin{isamarkuptext}% +\begin{matharray}{rcl} + \indexdef{}{command}{example\_proof}\hypertarget{command.example-proof}{\hyperlink{command.example-proof}{\mbox{\isa{\isacommand{example{\isacharunderscore}proof}}}}} & : & \isa{{\isachardoublequote}local{\isacharunderscore}theory\ {\isasymrightarrow}\ proof{\isacharparenleft}state{\isacharparenright}{\isachardoublequote}} \\ + \end{matharray} + + \begin{description} + + \item \hyperlink{command.example-proof}{\mbox{\isa{\isacommand{example{\isacharunderscore}proof}}}} opens an empty proof body. This + allows to experiment with Isar, without producing any persistent + result. + + Structurally, this is like a vacous \hyperlink{command.lemma}{\mbox{\isa{\isacommand{lemma}}}} statement + followed by ``\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}{\isacharminus}{\isachardoublequote}}'', which means the + example proof may be closed by a regular \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}, or + discontinued by \hyperlink{command.oops}{\mbox{\isa{\isacommand{oops}}}}. + + \end{description}% +\end{isamarkuptext}% +\isamarkuptrue% +% \isamarkupsubsection{Blocks% } \isamarkuptrue% diff -r aace7a969410 -r 8629ac3efb19 doc-src/IsarRef/Thy/document/Spec.tex --- a/doc-src/IsarRef/Thy/document/Spec.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/IsarRef/Thy/document/Spec.tex Tue May 04 20:30:22 2010 +0200 @@ -937,7 +937,7 @@ \begin{matharray}{rcll} \indexdef{}{command}{classes}\hypertarget{command.classes}{\hyperlink{command.classes}{\mbox{\isa{\isacommand{classes}}}}} & : & \isa{{\isachardoublequote}theory\ {\isasymrightarrow}\ theory{\isachardoublequote}} \\ \indexdef{}{command}{classrel}\hypertarget{command.classrel}{\hyperlink{command.classrel}{\mbox{\isa{\isacommand{classrel}}}}} & : & \isa{{\isachardoublequote}theory\ {\isasymrightarrow}\ theory{\isachardoublequote}} & (axiomatic!) \\ - \indexdef{}{command}{defaultsort}\hypertarget{command.defaultsort}{\hyperlink{command.defaultsort}{\mbox{\isa{\isacommand{defaultsort}}}}} & : & \isa{{\isachardoublequote}theory\ {\isasymrightarrow}\ theory{\isachardoublequote}} \\ + \indexdef{}{command}{default\_sort}\hypertarget{command.default-sort}{\hyperlink{command.default-sort}{\mbox{\isa{\isacommand{default{\isacharunderscore}sort}}}}} & : & \isa{{\isachardoublequote}local{\isacharunderscore}theory\ {\isasymrightarrow}\ local{\isacharunderscore}theory{\isachardoublequote}} \\ \indexdef{}{command}{class\_deps}\hypertarget{command.class-deps}{\hyperlink{command.class-deps}{\mbox{\isa{\isacommand{class{\isacharunderscore}deps}}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharasterisk}{\isachardoublequote}} & : & \isa{{\isachardoublequote}context\ {\isasymrightarrow}{\isachardoublequote}} \\ \end{matharray} @@ -946,7 +946,7 @@ ; 'classrel' (nameref ('<' | subseteq) nameref + 'and') ; - 'defaultsort' sort + 'default\_sort' sort ; \end{rail} @@ -964,7 +964,7 @@ (see \secref{sec:class}) provide a way to introduce proven class relations. - \item \hyperlink{command.defaultsort}{\mbox{\isa{\isacommand{defaultsort}}}}~\isa{s} makes sort \isa{s} the + \item \hyperlink{command.default-sort}{\mbox{\isa{\isacommand{default{\isacharunderscore}sort}}}}~\isa{s} makes sort \isa{s} the new default sort for any type variable that is given explicitly in the text, but lacks a sort constraint (wrt.\ the current context). Type variables generated by type inference are not affected. diff -r aace7a969410 -r 8629ac3efb19 doc-src/Nitpick/nitpick.tex --- a/doc-src/Nitpick/nitpick.tex Tue May 04 19:57:55 2010 +0200 +++ b/doc-src/Nitpick/nitpick.tex Tue May 04 20:30:22 2010 +0200 @@ -428,9 +428,6 @@ $\mathit{sym}.y$ are simply the bound variables $x$ and $y$ from \textit{sym}'s definition. -Although skolemization is a useful optimization, you can disable it by invoking -Nitpick with \textit{dont\_skolemize}. See \S\ref{optimizations} for details. - \subsection{Natural Numbers and Integers} \label{natural-numbers-and-integers} @@ -2193,15 +2190,6 @@ {\small See also \textit{overlord} (\S\ref{mode-of-operation}) and \textit{batch\_size} (\S\ref{optimizations}).} -\optrue{show\_skolems}{hide\_skolem} -Specifies whether the values of Skolem constants should be displayed as part of -counterexamples. Skolem constants correspond to bound variables in the original -formula and usually help us to understand why the counterexample falsifies the -formula. - -\nopagebreak -{\small See also \textit{skolemize} (\S\ref{optimizations}).} - \opfalse{show\_datatypes}{hide\_datatypes} Specifies whether the subsets used to approximate (co)in\-duc\-tive datatypes should be displayed as part of counterexamples. Such subsets are sometimes helpful when @@ -2215,8 +2203,8 @@ genuine, but they can clutter the output. \opfalse{show\_all}{dont\_show\_all} -Enabling this option effectively enables \textit{show\_skolems}, -\textit{show\_datatypes}, and \textit{show\_consts}. +Enabling this option effectively enables \textit{show\_datatypes} and +\textit{show\_consts}. \opdefault{max\_potential}{int}{$\mathbf{1}$} Specifies the maximum number of potential counterexamples to display. Setting @@ -2258,9 +2246,6 @@ arguments that are not accounted for are left alone, as if the specification had been $1,\ldots,1,n_1,\ldots,n_k$. -\nopagebreak -{\small See also \textit{uncurry} (\S\ref{optimizations}).} - \opdefault{format}{int\_seq}{$\mathbf{1}$} Specifies the default format to use. Irrespective of the default format, the extra arguments to a Skolem constant corresponding to the outer bound variables @@ -2454,15 +2439,6 @@ {\small See also \textit{debug} (\S\ref{output-format}) and \textit{show\_consts} (\S\ref{output-format}).} -\optrue{skolemize}{dont\_skolemize} -Specifies whether the formula should be skolemized. For performance reasons, -(positive) $\forall$-quanti\-fiers that occur in the scope of a higher-order -(positive) $\exists$-quanti\-fier are left unchanged. - -\nopagebreak -{\small See also \textit{debug} (\S\ref{output-format}) and -\textit{show\_skolems} (\S\ref{output-format}).} - \optrue{star\_linear\_preds}{dont\_star\_linear\_preds} Specifies whether Nitpick should use Kodkod's transitive closure operator to encode non-well-founded ``linear inductive predicates,'' i.e., inductive @@ -2474,15 +2450,6 @@ {\small See also \textit{wf} (\S\ref{scope-of-search}), \textit{debug} (\S\ref{output-format}), and \textit{iter} (\S\ref{scope-of-search}).} -\optrue{uncurry}{dont\_uncurry} -Specifies whether Nitpick should uncurry functions. Uncurrying has on its own no -tangible effect on efficiency, but it creates opportunities for the boxing -optimization. - -\nopagebreak -{\small See also \textit{box} (\S\ref{scope-of-search}), \textit{debug} -(\S\ref{output-format}), and \textit{format} (\S\ref{output-format}).} - \optrue{fast\_descrs}{full\_descrs} Specifies whether Nitpick should optimize the definite and indefinite description operators (THE and SOME). The optimized versions usually help @@ -2498,25 +2465,6 @@ Unless you are tracking down a bug in Nitpick or distrust the peephole optimizer, you should leave this option enabled. -\opdefault{sym\_break}{int}{20} -Specifies an upper bound on the number of relations for which Kodkod generates -symmetry breaking predicates. According to the Kodkod documentation -\cite{kodkod-2009-options}, ``in general, the higher this value, the more -symmetries will be broken, and the faster the formula will be solved. But, -setting the value too high may have the opposite effect and slow down the -solving.'' - -\opdefault{sharing\_depth}{int}{3} -Specifies the depth to which Kodkod should check circuits for equivalence during -the translation to SAT. The default of 3 is the same as in Alloy. The minimum -allowed depth is 1. Increasing the sharing may result in a smaller SAT problem, -but can also slow down Kodkod. - -\opfalse{flatten\_props}{dont\_flatten\_props} -Specifies whether Kodkod should try to eliminate intermediate Boolean variables. -Although this might sound like a good idea, in practice it can drastically slow -down Kodkod. - \opdefault{max\_threads}{int}{0} Specifies the maximum number of threads to use in Kodkod. If this option is set to 0, Kodkod will compute an appropriate value based on the number of processor @@ -2569,7 +2517,7 @@ Behind the scenes, Isabelle's built-in packages and theories rely on the following attributes to affect Nitpick's behavior: -\begin{itemize} +\begin{enum} \flushitem{\textit{nitpick\_def}} \nopagebreak @@ -2611,8 +2559,8 @@ must be of the form \qquad $\lbrakk P_1;\> \ldots;\> P_m;\> M~(c\ t_{11}\ \ldots\ t_{1n});\> -\ldots;\> M~(c\ t_{k1}\ \ldots\ t_{kn})\rbrakk \,\Longrightarrow\, c\ u_1\ -\ldots\ u_n$, +\ldots;\> M~(c\ t_{k1}\ \ldots\ t_{kn})\rbrakk$ \\ +\hbox{}\qquad ${\Longrightarrow}\;\, c\ u_1\ \ldots\ u_n$, where the $P_i$'s are side conditions that do not involve $c$ and $M$ is an optional monotonic operator. The order of the assumptions is irrelevant. @@ -2623,7 +2571,7 @@ This attribute specifies the (free-form) specification of a constant defined using the \hbox{(\textbf{ax\_})}\allowbreak\textbf{specification} command. -\end{itemize} +\end{enum} When faced with a constant, Nitpick proceeds as follows: diff -r aace7a969410 -r 8629ac3efb19 etc/isar-keywords-ZF.el --- a/etc/isar-keywords-ZF.el Tue May 04 19:57:55 2010 +0200 +++ b/etc/isar-keywords-ZF.el Tue May 04 20:30:22 2010 +0200 @@ -57,7 +57,7 @@ "declaration" "declare" "def" - "defaultsort" + "default_sort" "defer" "definition" "defs" @@ -66,6 +66,7 @@ "done" "enable_pr" "end" + "example_proof" "exit" "extract" "extract_type" @@ -209,6 +210,7 @@ "using" "welcome" "with" + "write" "{" "}")) @@ -371,7 +373,7 @@ "datatype" "declaration" "declare" - "defaultsort" + "default_sort" "definition" "defs" "extract" @@ -425,6 +427,7 @@ (defconst isar-keywords-theory-goal '("corollary" + "example_proof" "instance" "interpretation" "lemma" @@ -484,7 +487,8 @@ "txt" "txt_raw" "unfolding" - "using")) + "using" + "write")) (defconst isar-keywords-proof-asm '("assume" diff -r aace7a969410 -r 8629ac3efb19 etc/isar-keywords.el --- a/etc/isar-keywords.el Tue May 04 19:57:55 2010 +0200 +++ b/etc/isar-keywords.el Tue May 04 20:30:22 2010 +0200 @@ -30,10 +30,6 @@ "arities" "assume" "atom_decl" - "atp_info" - "atp_kill" - "atp_messages" - "atp_minimize" "attribute_setup" "automaton" "ax_specification" @@ -65,6 +61,7 @@ "code_modulename" "code_monad" "code_pred" + "code_reflect" "code_reserved" "code_thms" "code_type" @@ -81,7 +78,7 @@ "declaration" "declare" "def" - "defaultsort" + "default_sort" "defer" "defer_recdef" "definition" @@ -94,6 +91,7 @@ "enable_pr" "end" "equivariance" + "example_proof" "exit" "export_code" "extract" @@ -171,7 +169,6 @@ "print_abbrevs" "print_antiquotations" "print_ast_translation" - "print_atps" "print_attributes" "print_binds" "print_cases" @@ -276,6 +273,7 @@ "values" "welcome" "with" + "write" "{" "}")) @@ -292,10 +290,12 @@ "congs" "constrains" "contains" + "datatypes" "defines" "file" "fixes" "for" + "functions" "hide_action" "hints" "identifier" @@ -361,10 +361,6 @@ '("ML_command" "ML_val" "ProofGeneral\\.pr" - "atp_info" - "atp_kill" - "atp_messages" - "atp_minimize" "boogie_status" "cd" "class_deps" @@ -388,7 +384,6 @@ "prf" "print_abbrevs" "print_antiquotations" - "print_atps" "print_attributes" "print_binds" "print_cases" @@ -475,6 +470,7 @@ "code_module" "code_modulename" "code_monad" + "code_reflect" "code_reserved" "code_type" "coinductive" @@ -486,7 +482,7 @@ "datatype" "declaration" "declare" - "defaultsort" + "default_sort" "defer_recdef" "definition" "defs" @@ -562,6 +558,7 @@ "code_pred" "corollary" "cpodef" + "example_proof" "function" "instance" "interpretation" @@ -632,7 +629,8 @@ "txt" "txt_raw" "unfolding" - "using")) + "using" + "write")) (defconst isar-keywords-proof-asm '("assume" diff -r aace7a969410 -r 8629ac3efb19 src/CCL/CCL.thy --- a/src/CCL/CCL.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/CCL/CCL.thy Tue May 04 20:30:22 2010 +0200 @@ -17,7 +17,7 @@ *} classes prog < "term" -defaultsort prog +default_sort prog arities "fun" :: (prog, prog) prog diff -r aace7a969410 -r 8629ac3efb19 src/FOL/IFOL.thy --- a/src/FOL/IFOL.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/FOL/IFOL.thy Tue May 04 20:30:22 2010 +0200 @@ -31,7 +31,7 @@ global classes "term" -defaultsort "term" +default_sort "term" typedecl o diff -r aace7a969410 -r 8629ac3efb19 src/FOL/simpdata.ML --- a/src/FOL/simpdata.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/FOL/simpdata.ML Tue May 04 20:30:22 2010 +0200 @@ -21,13 +21,13 @@ | _ => th RS @{thm iff_reflection_T}; (*Replace premises x=y, X<->Y by X==Y*) -val mk_meta_prems = - rule_by_tactic +fun mk_meta_prems ctxt = + rule_by_tactic ctxt (REPEAT_FIRST (resolve_tac [@{thm meta_eq_to_obj_eq}, @{thm def_imp_iff}])); (*Congruence rules for = or <-> (instead of ==)*) -fun mk_meta_cong rl = - Drule.export_without_context (mk_meta_eq (mk_meta_prems rl)) +fun mk_meta_cong ss rl = + Drule.export_without_context (mk_meta_eq (mk_meta_prems (Simplifier.the_context ss) rl)) handle THM _ => error("Premises and conclusion of congruence rules must use =-equality or <->"); @@ -35,10 +35,6 @@ [("op -->", [@{thm mp}]), ("op &", [@{thm conjunct1}, @{thm conjunct2}]), ("All", [@{thm spec}]), ("True", []), ("False", [])]; -(* ###FIXME: move to simplifier.ML -val mk_atomize: (string * thm list) list -> thm -> thm list -*) -(* ###FIXME: move to simplifier.ML *) fun mk_atomize pairs = let fun atoms th = (case concl_of th of @@ -52,7 +48,7 @@ | _ => [th]) in atoms end; -fun mksimps pairs = (map mk_eq o mk_atomize pairs o gen_all); +fun mksimps pairs (_: simpset) = map mk_eq o mk_atomize pairs o gen_all; (** make simplification procedures for quantifier elimination **) diff -r aace7a969410 -r 8629ac3efb19 src/FOLP/IFOLP.thy --- a/src/FOLP/IFOLP.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/FOLP/IFOLP.thy Tue May 04 20:30:22 2010 +0200 @@ -15,7 +15,7 @@ global classes "term" -defaultsort "term" +default_sort "term" typedecl p typedecl o diff -r aace7a969410 -r 8629ac3efb19 src/FOLP/simp.ML --- a/src/FOLP/simp.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/FOLP/simp.ML Tue May 04 20:30:22 2010 +0200 @@ -222,7 +222,7 @@ fun normed_rews congs = let val add_norms = add_norm_tags congs in fn thm => Variable.tradeT - (K (map (add_norms o mk_trans) o maps mk_rew_rules)) (Variable.thm_context thm) [thm] + (K (map (add_norms o mk_trans) o maps mk_rew_rules)) (Variable.global_thm_context thm) [thm] end; fun NORM norm_lhs_tac = EVERY'[rtac red2 , norm_lhs_tac, refl_tac]; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Bali/DeclConcepts.thy --- a/src/HOL/Bali/DeclConcepts.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Bali/DeclConcepts.thy Tue May 04 20:30:22 2010 +0200 @@ -1390,7 +1390,7 @@ "accimethds G pack I \ if G\Iface I accessible_in pack then imethds G I - else \ k. {}" + else (\ k. {})" text {* only returns imethds if the interface is accessible *} definition methd :: "prog \ qtname \ (sig,qtname \ methd) table" where diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Bali/TypeSafe.thy --- a/src/HOL/Bali/TypeSafe.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Bali/TypeSafe.thy Tue May 04 20:30:22 2010 +0200 @@ -9,8 +9,6 @@ section "error free" -hide_const field - lemma error_free_halloc: assumes halloc: "G\s0 \halloc oi\a\ s1" and error_free_s0: "error_free s0" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Bali/WellType.thy --- a/src/HOL/Bali/WellType.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Bali/WellType.thy Tue May 04 20:30:22 2010 +0200 @@ -94,7 +94,7 @@ "accObjectmheads G S T \ if G\RefT T accessible_in (pid S) then Objectmheads G S - else \sig. {}" + else (\sig. {})" primrec "mheads G S NullT = (\sig. {})" "mheads G S (IfaceT I) = (\sig. (\(I,h).(IfaceT I,h)) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Big_Operators.thy --- a/src/HOL/Big_Operators.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Big_Operators.thy Tue May 04 20:30:22 2010 +0200 @@ -33,7 +33,7 @@ text {* for ad-hoc proofs for @{const fold_image} *} lemma (in comm_monoid_add) comm_monoid_mult: - "comm_monoid_mult (op +) 0" + "class.comm_monoid_mult (op +) 0" proof qed (auto intro: add_assoc add_commute) notation times (infixl "*" 70) @@ -554,6 +554,26 @@ case False thus ?thesis by (simp add: setsum_def) qed +lemma setsum_nonneg_leq_bound: + fixes f :: "'a \ 'b::{ordered_ab_group_add}" + assumes "finite s" "\i. i \ s \ f i \ 0" "(\i \ s. f i) = B" "i \ s" + shows "f i \ B" +proof - + have "0 \ (\ i \ s - {i}. f i)" and "0 \ f i" + using assms by (auto intro!: setsum_nonneg) + moreover + have "(\ i \ s - {i}. f i) + f i = B" + using assms by (simp add: setsum_diff1) + ultimately show ?thesis by auto +qed + +lemma setsum_nonneg_0: + fixes f :: "'a \ 'b::{ordered_ab_group_add}" + assumes "finite s" and pos: "\ i. i \ s \ f i \ 0" + and "(\ i \ s. f i) = 0" and i: "i \ s" + shows "f i = 0" + using setsum_nonneg_leq_bound[OF assms] pos[OF i] by auto + lemma setsum_mono2: fixes f :: "'a \ 'b :: ordered_comm_monoid_add" assumes fin: "finite B" and sub: "A \ B" and nn: "\b. b \ B-A \ 0 \ f b" @@ -1033,12 +1053,12 @@ by (erule finite_induct) (auto simp add: insert_Diff_if) lemma setprod_inversef: - fixes f :: "'b \ 'a::{field,division_by_zero}" + fixes f :: "'b \ 'a::field_inverse_zero" shows "finite A ==> setprod (inverse \ f) A = inverse (setprod f A)" by (erule finite_induct) auto lemma setprod_dividef: - fixes f :: "'b \ 'a::{field,division_by_zero}" + fixes f :: "'b \ 'a::field_inverse_zero" shows "finite A ==> setprod (%x. f x / g x) A = setprod f A / setprod g A" apply (subgoal_tac @@ -1140,7 +1160,7 @@ using setprod_Un_disjoint[OF fAB dj, of ?f, unfolded eq[symmetric]] by simp then have ?thesis using a cA - by (simp add: fA1 ring_simps cong add: setprod_cong cong del: if_weak_cong)} + by (simp add: fA1 field_simps cong add: setprod_cong cong del: if_weak_cong)} ultimately show ?thesis by blast qed @@ -1180,7 +1200,8 @@ context semilattice_inf begin -lemma ab_semigroup_idem_mult_inf: "ab_semigroup_idem_mult inf" +lemma ab_semigroup_idem_mult_inf: + "class.ab_semigroup_idem_mult inf" proof qed (rule inf_assoc inf_commute inf_idem)+ lemma fold_inf_insert[simp]: "finite A \ fold inf b (insert a A) = inf a (fold inf b A)" @@ -1250,7 +1271,7 @@ context semilattice_sup begin -lemma ab_semigroup_idem_mult_sup: "ab_semigroup_idem_mult sup" +lemma ab_semigroup_idem_mult_sup: "class.ab_semigroup_idem_mult sup" by (rule semilattice_inf.ab_semigroup_idem_mult_inf)(rule dual_semilattice) lemma fold_sup_insert[simp]: "finite A \ fold sup b (insert a A) = sup a (fold sup b A)" @@ -1470,15 +1491,15 @@ using assms by (rule Max.hom_commute) lemma ab_semigroup_idem_mult_min: - "ab_semigroup_idem_mult min" + "class.ab_semigroup_idem_mult min" proof qed (auto simp add: min_def) lemma ab_semigroup_idem_mult_max: - "ab_semigroup_idem_mult max" + "class.ab_semigroup_idem_mult max" proof qed (auto simp add: max_def) lemma max_lattice: - "semilattice_inf (op \) (op >) max" + "class.semilattice_inf (op \) (op >) max" by (fact min_max.dual_semilattice) lemma dual_max: diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Boogie/Tools/boogie_commands.ML --- a/src/HOL/Boogie/Tools/boogie_commands.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Boogie/Tools/boogie_commands.ML Tue May 04 20:30:22 2010 +0200 @@ -94,12 +94,12 @@ fun after_qed [thms] = ProofContext.theory (discharge (vcs ~~ thms)) | after_qed _ = I in - ProofContext.init thy + ProofContext.init_global thy |> fold Variable.auto_fixes ts |> (fn ctxt1 => ctxt1 |> prepare |-> (fn us => fn ctxt2 => ctxt2 - |> Proof.theorem_i NONE (fn thmss => fn ctxt => + |> Proof.theorem NONE (fn thmss => fn ctxt => let val export = map (finish ctxt1) o ProofContext.export ctxt ctxt2 in after_qed (map export thmss) ctxt end) [map (rpair []) us])) end @@ -187,8 +187,8 @@ end fun prove thy meth vc = - ProofContext.init thy - |> Proof.theorem_i NONE (K I) [[(Boogie_VCs.prop_of_vc vc, [])]] + ProofContext.init_global thy + |> Proof.theorem NONE (K I) [[(Boogie_VCs.prop_of_vc vc, [])]] |> Proof.apply meth |> Seq.hd |> Proof.global_done_proof diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Boogie/Tools/boogie_loader.ML --- a/src/HOL/Boogie/Tools/boogie_loader.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Boogie/Tools/boogie_loader.ML Tue May 04 20:30:22 2010 +0200 @@ -232,7 +232,7 @@ in apsnd sort_fst_str (fold split axs ([], [])) end fun mark_axioms thy axs = - Boogie_Axioms.get (ProofContext.init thy) + Boogie_Axioms.get (ProofContext.init_global thy) |> Termtab.make o map (fn thm => (Thm.prop_of thm, Unused thm)) |> fold mark axs |> split_list_kind thy o Termtab.dest diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Complete_Lattice.thy --- a/src/HOL/Complete_Lattice.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Complete_Lattice.thy Tue May 04 20:30:22 2010 +0200 @@ -33,8 +33,8 @@ begin lemma dual_complete_lattice: - "complete_lattice Sup Inf (op \) (op >) (op \) (op \) \ \" - by (auto intro!: complete_lattice.intro dual_bounded_lattice) + "class.complete_lattice Sup Inf (op \) (op >) (op \) (op \) \ \" + by (auto intro!: class.complete_lattice.intro dual_bounded_lattice) (unfold_locales, (fact bot_least top_greatest Sup_upper Sup_least Inf_lower Inf_greatest)+) @@ -98,9 +98,9 @@ syntax "_SUP1" :: "pttrns => 'b => 'b" ("(3SUP _./ _)" [0, 10] 10) - "_SUP" :: "pttrn => 'a set => 'b => 'b" ("(3SUP _:_./ _)" [0, 10] 10) + "_SUP" :: "pttrn => 'a set => 'b => 'b" ("(3SUP _:_./ _)" [0, 0, 10] 10) "_INF1" :: "pttrns => 'b => 'b" ("(3INF _./ _)" [0, 10] 10) - "_INF" :: "pttrn => 'a set => 'b => 'b" ("(3INF _:_./ _)" [0, 10] 10) + "_INF" :: "pttrn => 'a set => 'b => 'b" ("(3INF _:_./ _)" [0, 0, 10] 10) translations "SUP x y. B" == "SUP x. SUP y. B" @@ -295,15 +295,15 @@ syntax "_UNION1" :: "pttrns => 'b set => 'b set" ("(3UN _./ _)" [0, 10] 10) - "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3UN _:_./ _)" [0, 10] 10) + "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3UN _:_./ _)" [0, 0, 10] 10) syntax (xsymbols) "_UNION1" :: "pttrns => 'b set => 'b set" ("(3\_./ _)" [0, 10] 10) - "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3\_\_./ _)" [0, 10] 10) + "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3\_\_./ _)" [0, 0, 10] 10) syntax (latex output) "_UNION1" :: "pttrns => 'b set => 'b set" ("(3\(00\<^bsub>_\<^esub>)/ _)" [0, 10] 10) - "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3\(00\<^bsub>_\_\<^esub>)/ _)" [0, 10] 10) + "_UNION" :: "pttrn => 'a set => 'b set => 'b set" ("(3\(00\<^bsub>_\_\<^esub>)/ _)" [0, 0, 10] 10) translations "UN x y. B" == "UN x. UN y. B" @@ -531,15 +531,15 @@ syntax "_INTER1" :: "pttrns => 'b set => 'b set" ("(3INT _./ _)" [0, 10] 10) - "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3INT _:_./ _)" [0, 10] 10) + "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3INT _:_./ _)" [0, 0, 10] 10) syntax (xsymbols) "_INTER1" :: "pttrns => 'b set => 'b set" ("(3\_./ _)" [0, 10] 10) - "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3\_\_./ _)" [0, 10] 10) + "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3\_\_./ _)" [0, 0, 10] 10) syntax (latex output) "_INTER1" :: "pttrns => 'b set => 'b set" ("(3\(00\<^bsub>_\<^esub>)/ _)" [0, 10] 10) - "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3\(00\<^bsub>_\_\<^esub>)/ _)" [0, 10] 10) + "_INTER" :: "pttrn => 'a set => 'b set => 'b set" ("(3\(00\<^bsub>_\_\<^esub>)/ _)" [0, 0, 10] 10) translations "INT x y. B" == "INT x. INT y. B" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Complex.thy --- a/src/HOL/Complex.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Complex.thy Tue May 04 20:30:22 2010 +0200 @@ -99,7 +99,7 @@ subsection {* Multiplication and Division *} -instantiation complex :: "{field, division_by_zero}" +instantiation complex :: field_inverse_zero begin definition diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Approximation.thy --- a/src/HOL/Decision_Procs/Approximation.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Approximation.thy Tue May 04 20:30:22 2010 +0200 @@ -3209,47 +3209,12 @@ interpret_floatarith_divide interpret_floatarith_diff interpret_floatarith_tan interpret_floatarith_powr interpret_floatarith_log interpret_floatarith_sin -ML {* -structure Float_Arith = -struct - -@{code_datatype float = Float} -@{code_datatype floatarith = Add | Minus | Mult | Inverse | Cos | Arctan - | Abs | Max | Min | Pi | Sqrt | Exp | Ln | Power | Var | Num } -@{code_datatype form = Bound | Assign | Less | LessEqual | AtLeastAtMost} - -val approx_form = @{code approx_form} -val approx_tse_form = @{code approx_tse_form} -val approx' = @{code approx'} -val approx_form_eval = @{code approx_form_eval} - -end -*} - -code_reserved Eval Float_Arith - -code_type float (Eval "Float'_Arith.float") -code_const Float (Eval "Float'_Arith.Float/ (_,/ _)") - -code_type floatarith (Eval "Float'_Arith.floatarith") -code_const Add and Minus and Mult and Inverse and Cos and Arctan and Abs and Max and Min and - Pi and Sqrt and Exp and Ln and Power and Var and Num - (Eval "Float'_Arith.Add/ (_,/ _)" and "Float'_Arith.Minus" and "Float'_Arith.Mult/ (_,/ _)" and - "Float'_Arith.Inverse" and "Float'_Arith.Cos" and - "Float'_Arith.Arctan" and "Float'_Arith.Abs" and "Float'_Arith.Max/ (_,/ _)" and - "Float'_Arith.Min/ (_,/ _)" and "Float'_Arith.Pi" and "Float'_Arith.Sqrt" and - "Float'_Arith.Exp" and "Float'_Arith.Ln" and "Float'_Arith.Power/ (_,/ _)" and - "Float'_Arith.Var" and "Float'_Arith.Num") - -code_type form (Eval "Float'_Arith.form") -code_const Bound and Assign and Less and LessEqual and AtLeastAtMost - (Eval "Float'_Arith.Bound/ (_,/ _,/ _,/ _)" and "Float'_Arith.Assign/ (_,/ _,/ _)" and - "Float'_Arith.Less/ (_,/ _)" and "Float'_Arith.LessEqual/ (_,/ _)" and - "Float'_Arith.AtLeastAtMost/ (_,/ _,/ _)") - -code_const approx_form (Eval "Float'_Arith.approx'_form") -code_const approx_tse_form (Eval "Float'_Arith.approx'_tse'_form") -code_const approx' (Eval "Float'_Arith.approx'") +code_reflect Float_Arith + datatypes float = Float + and floatarith = Add | Minus | Mult | Inverse | Cos | Arctan + | Abs | Max | Min | Pi | Sqrt | Exp | Ln | Power | Var | Num + and form = Bound | Assign | Less | LessEqual | AtLeastAtMost + functions approx_form approx_tse_form approx' approx_form_eval ML {* fun reorder_bounds_tac prems i = diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Cooper.thy --- a/src/HOL/Decision_Procs/Cooper.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Cooper.thy Tue May 04 20:30:22 2010 +0200 @@ -1910,8 +1910,9 @@ ML {* @{code cooper_test} () *} (* -code_reserved SML oo -export_code pa in SML module_name GeneratedCooper file "~~/src/HOL/Tools/Qelim/raw_generated_cooper.ML" +code_reflect Generated_Cooper + functions pa + file "~~/src/HOL/Tools/Qelim/generated_cooper.ML" *) oracle linzqe_oracle = {* diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Decision_Procs.thy --- a/src/HOL/Decision_Procs/Decision_Procs.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Decision_Procs.thy Tue May 04 20:30:22 2010 +0200 @@ -8,4 +8,4 @@ "ex/Commutative_Ring_Ex" "ex/Approximation_Ex" "ex/Dense_Linear_Order_Ex" begin -end \ No newline at end of file +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Dense_Linear_Order.thy --- a/src/HOL/Decision_Procs/Dense_Linear_Order.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Dense_Linear_Order.thy Tue May 04 20:30:22 2010 +0200 @@ -265,7 +265,7 @@ lemmas dlo_simps[no_atp] = order_refl less_irrefl not_less not_le exists_neq le_less neq_iff linear less_not_permute -lemma axiom[no_atp]: "dense_linorder (op \) (op <)" by (rule dense_linorder_axioms) +lemma axiom[no_atp]: "class.dense_linorder (op \) (op <)" by (rule dense_linorder_axioms) lemma atoms[no_atp]: shows "TERM (less :: 'a \ _)" and "TERM (less_eq :: 'a \ _)" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/MIR.thy --- a/src/HOL/Decision_Procs/MIR.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/MIR.thy Tue May 04 20:30:22 2010 +0200 @@ -5791,8 +5791,9 @@ ML {* @{code test4} () *} ML {* @{code test5} () *} -(*export_code mircfrqe mirlfrqe - in SML module_name Mir file "raw_mir.ML"*) +(*code_reflect Mir + functions mircfrqe mirlfrqe + file "mir.ML"*) oracle mirfr_oracle = {* fn (proofs, ct) => let diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy --- a/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Tue May 04 20:30:22 2010 +0200 @@ -27,7 +27,7 @@ "tmsize (CNP n c a) = 3 + polysize c + tmsize a " (* Semantics of terms tm *) -consts Itm :: "'a::{ring_char_0,division_by_zero,field} list \ 'a list \ tm \ 'a" +consts Itm :: "'a::{field_char_0, field_inverse_zero} list \ 'a list \ tm \ 'a" primrec "Itm vs bs (CP c) = (Ipoly vs c)" "Itm vs bs (Bound n) = bs!n" @@ -239,7 +239,7 @@ lemma tmadd[simp]: "Itm vs bs (tmadd (t,s)) = Itm vs bs (Add t s)" apply (induct t s rule: tmadd.induct, simp_all add: Let_def) apply (case_tac "c1 +\<^sub>p c2 = 0\<^sub>p",case_tac "n1 \ n2", simp_all) -apply (case_tac "n1 = n2", simp_all add: ring_simps) +apply (case_tac "n1 = n2", simp_all add: field_simps) apply (simp only: right_distrib[symmetric]) by (auto simp del: polyadd simp add: polyadd[symmetric]) @@ -259,7 +259,7 @@ "tmmul t = (\ i. Mul i t)" lemma tmmul[simp]: "Itm vs bs (tmmul t i) = Itm vs bs (Mul i t)" -by (induct t arbitrary: i rule: tmmul.induct, simp_all add: ring_simps) +by (induct t arbitrary: i rule: tmmul.induct, simp_all add: field_simps) lemma tmmul_nb0[simp]: "tmbound0 t \ tmbound0 (tmmul t i)" by (induct t arbitrary: i rule: tmmul.induct, auto ) @@ -270,7 +270,7 @@ by (induct t arbitrary: i rule: tmmul.induct, auto simp add: Let_def) lemma tmmul_allpolys_npoly[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "allpolys isnpoly t \ isnpoly c \ allpolys isnpoly (tmmul t c)" by (induct t rule: tmmul.induct, simp_all add: Let_def polymul_norm) definition tmneg :: "tm \ tm" where @@ -296,7 +296,7 @@ using tmneg_def by simp lemma [simp]: "isnpoly (C (-1,1))" unfolding isnpoly_def by simp lemma tmneg_allpolys_npoly[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "allpolys isnpoly t \ allpolys isnpoly (tmneg t)" unfolding tmneg_def by auto @@ -310,7 +310,7 @@ lemma tmsub_blt[simp]: "\tmboundslt n t ; tmboundslt n s\ \ tmboundslt n (tmsub t s )" using tmsub_def by simp lemma tmsub_allpolys_npoly[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "allpolys isnpoly t \ allpolys isnpoly s \ allpolys isnpoly (tmsub t s)" unfolding tmsub_def by (simp add: isnpoly_def) @@ -324,8 +324,8 @@ "simptm (CNP n c t) = (let c' = polynate c in if c' = 0\<^sub>p then simptm t else tmadd (CNP n c' (CP 0\<^sub>p ), simptm t))" lemma polynate_stupid: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" - shows "polynate t = 0\<^sub>p \ Ipoly bs t = (0::'a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" + shows "polynate t = 0\<^sub>p \ Ipoly bs t = (0::'a::{field_char_0, field_inverse_zero})" apply (subst polynate[symmetric]) apply simp done @@ -345,7 +345,7 @@ lemma [simp]: "isnpoly 0\<^sub>p" and [simp]: "isnpoly (C(1,1))" by (simp_all add: isnpoly_def) lemma simptm_allpolys_npoly[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "allpolys isnpoly (simptm p)" by (induct p rule: simptm.induct, auto simp add: Let_def) @@ -369,14 +369,14 @@ "tmbound 0 (snd (split0 t)) \ (Itm vs bs (CNP 0 (fst (split0 t)) (snd (split0 t))) = Itm vs bs t)" apply (induct t rule: split0.induct) apply simp - apply (simp add: Let_def split_def ring_simps) - apply (simp add: Let_def split_def ring_simps) - apply (simp add: Let_def split_def ring_simps) - apply (simp add: Let_def split_def ring_simps) - apply (simp add: Let_def split_def ring_simps) + apply (simp add: Let_def split_def field_simps) + apply (simp add: Let_def split_def field_simps) + apply (simp add: Let_def split_def field_simps) + apply (simp add: Let_def split_def field_simps) + apply (simp add: Let_def split_def field_simps) apply (simp add: Let_def split_def mult_assoc right_distrib[symmetric]) - apply (simp add: Let_def split_def ring_simps) - apply (simp add: Let_def split_def ring_simps) + apply (simp add: Let_def split_def field_simps) + apply (simp add: Let_def split_def field_simps) done lemma split0_ci: "split0 t = (c',t') \ Itm vs bs t = Itm vs bs (CNP 0 c' t')" @@ -387,7 +387,7 @@ qed lemma split0_nb0: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "split0 t = (c',t') \ tmbound 0 t'" proof- fix c' t' @@ -395,7 +395,7 @@ with conjunct1[OF split0[where t="t"]] show "tmbound 0 t'" by simp qed -lemma split0_nb0'[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" +lemma split0_nb0'[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "tmbound0 (snd (split0 t))" using split0_nb0[of t "fst (split0 t)" "snd (split0 t)"] by (simp add: tmbound0_tmbound_iff) @@ -418,7 +418,7 @@ lemma allpolys_split0: "allpolys isnpoly p \ allpolys isnpoly (snd (split0 p))" by (induct p rule: split0.induct, auto simp add: isnpoly_def Let_def split_def split0_stupid) -lemma isnpoly_fst_split0: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero, field})" +lemma isnpoly_fst_split0: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "allpolys isnpoly p \ isnpoly (fst (split0 p))" by (induct p rule: split0.induct, @@ -447,7 +447,7 @@ by (induct p rule: fmsize.induct) simp_all (* Semantics of formulae (fm) *) -consts Ifm ::"'a::{division_by_zero,linordered_field} list \ 'a list \ fm \ bool" +consts Ifm ::"'a::{linordered_field_inverse_zero} list \ 'a list \ fm \ bool" primrec "Ifm vs bs T = True" "Ifm vs bs F = False" @@ -969,24 +969,24 @@ definition "simpeq t = (let (c,s) = split0 (simptm t) in if c= 0\<^sub>p then eq s else Eq (CNP 0 c s))" definition "simpneq t = (let (c,s) = split0 (simptm t) in if c= 0\<^sub>p then neq s else NEq (CNP 0 c s))" -lemma simplt_islin[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simplt_islin[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "islin (simplt t)" unfolding simplt_def using split0_nb0' by (auto simp add: lt_lin Let_def split_def isnpoly_fst_split0[OF simptm_allpolys_npoly] islin_stupid allpolys_split0[OF simptm_allpolys_npoly]) -lemma simple_islin[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simple_islin[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "islin (simple t)" unfolding simple_def using split0_nb0' by (auto simp add: Let_def split_def isnpoly_fst_split0[OF simptm_allpolys_npoly] islin_stupid allpolys_split0[OF simptm_allpolys_npoly] le_lin) -lemma simpeq_islin[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpeq_islin[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "islin (simpeq t)" unfolding simpeq_def using split0_nb0' by (auto simp add: Let_def split_def isnpoly_fst_split0[OF simptm_allpolys_npoly] islin_stupid allpolys_split0[OF simptm_allpolys_npoly] eq_lin) -lemma simpneq_islin[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpneq_islin[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "islin (simpneq t)" unfolding simpneq_def using split0_nb0' @@ -994,7 +994,7 @@ lemma really_stupid: "\ (\c1 s'. (c1, s') \ split0 s)" by (cases "split0 s", auto) -lemma split0_npoly: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma split0_npoly: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and n: "allpolys isnpoly t" shows "isnpoly (fst (split0 t))" and "allpolys isnpoly (snd (split0 t))" using n @@ -1083,7 +1083,7 @@ apply (case_tac poly, auto) done -lemma simplt_nb[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simplt_nb[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "tmbound0 t \ bound0 (simplt t)" using split0 [of "simptm t" vs bs] proof(simp add: simplt_def Let_def split_def) @@ -1100,7 +1100,7 @@ fst (split0 (simptm t)) = 0\<^sub>p" by (simp add: simplt_def Let_def split_def lt_nb) qed -lemma simple_nb[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simple_nb[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "tmbound0 t \ bound0 (simple t)" using split0 [of "simptm t" vs bs] proof(simp add: simple_def Let_def split_def) @@ -1117,7 +1117,7 @@ fst (split0 (simptm t)) = 0\<^sub>p" by (simp add: simplt_def Let_def split_def le_nb) qed -lemma simpeq_nb[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpeq_nb[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "tmbound0 t \ bound0 (simpeq t)" using split0 [of "simptm t" vs bs] proof(simp add: simpeq_def Let_def split_def) @@ -1134,7 +1134,7 @@ fst (split0 (simptm t)) = 0\<^sub>p" by (simp add: simpeq_def Let_def split_def eq_nb) qed -lemma simpneq_nb[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpneq_nb[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "tmbound0 t \ bound0 (simpneq t)" using split0 [of "simptm t" vs bs] proof(simp add: simpneq_def Let_def split_def) @@ -1267,7 +1267,7 @@ lemma simpfm[simp]: "Ifm vs bs (simpfm p) = Ifm vs bs p" by(induct p arbitrary: bs rule: simpfm.induct, auto) -lemma simpfm_bound0: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpfm_bound0: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "bound0 p \ bound0 (simpfm p)" by (induct p rule: simpfm.induct, auto) @@ -1296,7 +1296,7 @@ lemma disj_lin: "islin p \ islin q \ islin (disj p q)" by (simp add: disj_def) lemma conj_lin: "islin p \ islin q \ islin (conj p q)" by (simp add: conj_def) -lemma assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "qfree p \ islin (simpfm p)" apply (induct p rule: simpfm.induct) apply (simp_all add: conj_lin disj_lin) @@ -1698,11 +1698,11 @@ {assume c: "?N c > 0" from px pos_less_divide_eq[OF c, where a="x" and b="-?Nt x s"] have px': "x < - ?Nt x s / ?N c" - by (auto simp add: not_less ring_simps) + by (auto simp add: not_less field_simps) {assume y: "y < - ?Nt x s / ?N c" hence "y * ?N c < - ?Nt x s" by (simp add: pos_less_divide_eq[OF c, where a="y" and b="-?Nt x s", symmetric]) - hence "?N c * y + ?Nt x s < 0" by (simp add: ring_simps) + hence "?N c * y + ?Nt x s < 0" by (simp add: field_simps) hence ?case using tmbound0_I[OF lin(3), where bs="bs" and b="x" and b'="y"] by simp} moreover {assume y: "y > -?Nt x s / ?N c" @@ -1715,11 +1715,11 @@ {assume c: "?N c < 0" from px neg_divide_less_eq[OF c, where a="x" and b="-?Nt x s"] have px': "x > - ?Nt x s / ?N c" - by (auto simp add: not_less ring_simps) + by (auto simp add: not_less field_simps) {assume y: "y > - ?Nt x s / ?N c" hence "y * ?N c < - ?Nt x s" by (simp add: neg_divide_less_eq[OF c, where a="y" and b="-?Nt x s", symmetric]) - hence "?N c * y + ?Nt x s < 0" by (simp add: ring_simps) + hence "?N c * y + ?Nt x s < 0" by (simp add: field_simps) hence ?case using tmbound0_I[OF lin(3), where bs="bs" and b="x" and b'="y"] by simp} moreover {assume y: "y < -?Nt x s / ?N c" @@ -1743,11 +1743,11 @@ moreover {assume c: "?N c > 0" from px pos_le_divide_eq[OF c, where a="x" and b="-?Nt x s"] - have px': "x <= - ?Nt x s / ?N c" by (simp add: not_less ring_simps) + have px': "x <= - ?Nt x s / ?N c" by (simp add: not_less field_simps) {assume y: "y < - ?Nt x s / ?N c" hence "y * ?N c < - ?Nt x s" by (simp add: pos_less_divide_eq[OF c, where a="y" and b="-?Nt x s", symmetric]) - hence "?N c * y + ?Nt x s < 0" by (simp add: ring_simps) + hence "?N c * y + ?Nt x s < 0" by (simp add: field_simps) hence ?case using tmbound0_I[OF lin(3), where bs="bs" and b="x" and b'="y"] by simp} moreover {assume y: "y > -?Nt x s / ?N c" @@ -1759,11 +1759,11 @@ moreover {assume c: "?N c < 0" from px neg_divide_le_eq[OF c, where a="x" and b="-?Nt x s"] - have px': "x >= - ?Nt x s / ?N c" by (simp add: ring_simps) + have px': "x >= - ?Nt x s / ?N c" by (simp add: field_simps) {assume y: "y > - ?Nt x s / ?N c" hence "y * ?N c < - ?Nt x s" by (simp add: neg_divide_less_eq[OF c, where a="y" and b="-?Nt x s", symmetric]) - hence "?N c * y + ?Nt x s < 0" by (simp add: ring_simps) + hence "?N c * y + ?Nt x s < 0" by (simp add: field_simps) hence ?case using tmbound0_I[OF lin(3), where bs="bs" and b="x" and b'="y"] by simp} moreover {assume y: "y < -?Nt x s / ?N c" @@ -1787,7 +1787,7 @@ moreover {assume c: "?N c > 0" hence cnz: "?N c \ 0" by simp from px eq_divide_eq[of "x" "-?Nt x s" "?N c"] cnz - have px': "x = - ?Nt x s / ?N c" by (simp add: ring_simps) + have px': "x = - ?Nt x s / ?N c" by (simp add: field_simps) {assume y: "y < -?Nt x s / ?N c" with ly have eu: "l < - ?Nt x s / ?N c" by auto with noS ly yu have th: "- ?Nt x s / ?N c \ u" by (cases "- ?Nt x s / ?N c < u", auto) @@ -1802,7 +1802,7 @@ moreover {assume c: "?N c < 0" hence cnz: "?N c \ 0" by simp from px eq_divide_eq[of "x" "-?Nt x s" "?N c"] cnz - have px': "x = - ?Nt x s / ?N c" by (simp add: ring_simps) + have px': "x = - ?Nt x s / ?N c" by (simp add: field_simps) {assume y: "y < -?Nt x s / ?N c" with ly have eu: "l < - ?Nt x s / ?N c" by auto with noS ly yu have th: "- ?Nt x s / ?N c \ u" by (cases "- ?Nt x s / ?N c < u", auto) @@ -1829,7 +1829,7 @@ moreover {assume c: "?N c \ 0" from yne c eq_divide_eq[of "y" "- ?Nt x s" "?N c"] have ?case - by (simp add: ring_simps tmbound0_I[OF lin(3), of vs x bs y] sum_eq[symmetric]) } + by (simp add: field_simps tmbound0_I[OF lin(3), of vs x bs y] sum_eq[symmetric]) } ultimately show ?case by blast qed (auto simp add: nth_pos2 tmbound0_I[where vs=vs and bs="bs" and b="y" and b'="x"] bound0_I[where vs=vs and bs="bs" and b="y" and b'="x"]) @@ -1844,7 +1844,7 @@ lemma half_sum_eq: "(u + u) / (1+1) = (u::'a::{linordered_field})" proof- - have "(u + u) = (1 + 1) * u" by (simp add: ring_simps) + have "(u + u) = (1 + 1) * u" by (simp add: field_simps) hence "(u + u) / (1+1) = (1 + 1)*u / (1 + 1)" by simp with nonzero_mult_divide_cancel_left[OF one_plus_one_nonzero, of u] show ?thesis by simp qed @@ -1987,7 +1987,7 @@ also have "\ \ (1 + 1)*?d * (?a * (-?s / ((1 + 1)*?d)) + ?r) = 0" using d mult_cancel_left[of "(1 + 1)*?d" "(?a * (-?s / ((1 + 1)*?d)) + ?r)" 0] by simp also have "\ \ (- ?a * ?s) * ((1 + 1)*?d / ((1 + 1)*?d)) + (1 + 1)*?d*?r= 0" - by (simp add: ring_simps right_distrib[of "(1 + 1)*?d"] del: right_distrib) + by (simp add: field_simps right_distrib[of "(1 + 1)*?d"] del: right_distrib) also have "\ \ - (?a * ?s) + (1 + 1)*?d*?r = 0" using d by simp finally have ?thesis using c d @@ -2003,7 +2003,7 @@ also have "\ \ (1 + 1)*?c * (?a * (-?t / ((1 + 1)*?c)) + ?r) = 0" using c mult_cancel_left[of "(1 + 1)*?c" "(?a * (-?t / ((1 + 1)*?c)) + ?r)" 0] by simp also have "\ \ (?a * -?t)* ((1 + 1)*?c) / ((1 + 1)*?c) + (1 + 1)*?c*?r= 0" - by (simp add: ring_simps right_distrib[of "(1 + 1)*?c"] del: right_distrib) + by (simp add: field_simps right_distrib[of "(1 + 1)*?c"] del: right_distrib) also have "\ \ - (?a * ?t) + (1 + 1)*?c*?r = 0" using c by simp finally have ?thesis using c d apply (simp add: r[of "- (?t/ ((1 + 1)*?c))"] msubsteq_def Let_def evaldjf_ex del: one_add_one_is_two) @@ -2014,19 +2014,19 @@ {assume c: "?c \ 0" and d: "?d\0" hence dc: "?c * ?d *(1 + 1) \ 0" by simp from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (Eq (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r = 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) also have "\ \ ((1 + 1) * ?c * ?d) * (?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r) =0 " using c d mult_cancel_left[of "(1 + 1) * ?c * ?d" "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r" 0] by simp also have "\ \ ?a * (- (?d * ?t + ?c* ?s )) + (1 + 1)*?c*?d*?r =0" - using nonzero_mult_divide_cancel_left[OF dc] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + using nonzero_mult_divide_cancel_left [OF dc] c d + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubsteq_def Let_def evaldjf_ex ring_simps) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubsteq_def Let_def evaldjf_ex field_simps) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - apply (simp add: ring_simps) + apply (simp add: field_simps) done } ultimately show ?thesis by blast qed @@ -2075,7 +2075,7 @@ also have "\ \ (1 + 1)*?d * (?a * (-?s / ((1 + 1)*?d)) + ?r) \ 0" using d mult_cancel_left[of "(1 + 1)*?d" "(?a * (-?s / ((1 + 1)*?d)) + ?r)" 0] by simp also have "\ \ (- ?a * ?s) * ((1 + 1)*?d / ((1 + 1)*?d)) + (1 + 1)*?d*?r\ 0" - by (simp add: ring_simps right_distrib[of "(1 + 1)*?d"] del: right_distrib) + by (simp add: field_simps right_distrib[of "(1 + 1)*?d"] del: right_distrib) also have "\ \ - (?a * ?s) + (1 + 1)*?d*?r \ 0" using d by simp finally have ?thesis using c d @@ -2091,7 +2091,7 @@ also have "\ \ (1 + 1)*?c * (?a * (-?t / ((1 + 1)*?c)) + ?r) \ 0" using c mult_cancel_left[of "(1 + 1)*?c" "(?a * (-?t / ((1 + 1)*?c)) + ?r)" 0] by simp also have "\ \ (?a * -?t)* ((1 + 1)*?c) / ((1 + 1)*?c) + (1 + 1)*?c*?r \ 0" - by (simp add: ring_simps right_distrib[of "(1 + 1)*?c"] del: right_distrib) + by (simp add: field_simps right_distrib[of "(1 + 1)*?c"] del: right_distrib) also have "\ \ - (?a * ?t) + (1 + 1)*?c*?r \ 0" using c by simp finally have ?thesis using c d apply (simp add: r[of "- (?t/ ((1 + 1)*?c))"] msubstneq_def Let_def evaldjf_ex del: one_add_one_is_two) @@ -2102,7 +2102,7 @@ {assume c: "?c \ 0" and d: "?d\0" hence dc: "?c * ?d *(1 + 1) \ 0" by simp from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (NEq (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r \ 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) @@ -2110,11 +2110,11 @@ using c d mult_cancel_left[of "(1 + 1) * ?c * ?d" "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r" 0] by simp also have "\ \ ?a * (- (?d * ?t + ?c* ?s )) + (1 + 1)*?c*?d*?r \ 0" using nonzero_mult_divide_cancel_left[OF dc] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstneq_def Let_def evaldjf_ex ring_simps) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstneq_def Let_def evaldjf_ex field_simps) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - apply (simp add: ring_simps) + apply (simp add: field_simps) done } ultimately show ?thesis by blast qed @@ -2169,7 +2169,7 @@ from dc' have dc'': "\ (1 + 1)*?c *?d < 0" by simp from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r < 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) @@ -2178,11 +2178,11 @@ using dc' dc'' mult_less_cancel_left_disj[of "(1 + 1) * ?c * ?d" "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r" 0] by simp also have "\ \ ?a * (- (?d * ?t + ?c* ?s )) + (1 + 1)*?c*?d*?r < 0" using nonzero_mult_divide_cancel_left[of "(1 + 1)*?c*?d"] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using dc c d nc nd dc' - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - by (simp add: ring_simps order_less_not_sym[OF dc])} + by (simp add: field_simps order_less_not_sym[OF dc])} moreover {assume dc: "?c*?d < 0" @@ -2191,7 +2191,7 @@ hence c:"?c \ 0" and d: "?d\ 0" by auto from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r < 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) @@ -2201,78 +2201,78 @@ using dc' order_less_not_sym[OF dc'] mult_less_cancel_left_disj[of "(1 + 1) * ?c * ?d" 0 "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r"] by simp also have "\ \ ?a * ((?d * ?t + ?c* ?s )) - (1 + 1)*?c*?d*?r < 0" using nonzero_mult_divide_cancel_left[of "(1 + 1)*?c*?d"] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using dc c d nc nd - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - by (simp add: ring_simps order_less_not_sym[OF dc]) } + by (simp add: field_simps order_less_not_sym[OF dc]) } moreover {assume c: "?c > 0" and d: "?d=0" from c have c'': "(1 + 1)*?c > 0" by (simp add: zero_less_mult_iff) from c have c': "(1 + 1)*?c \ 0" by simp - from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: ring_simps) + from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?t / ((1 + 1)*?c) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?t / ((1 + 1)*?c))+ ?r < 0" by (simp add: r[of "- (?t / ((1 + 1)*?c))"]) also have "\ \ (1 + 1)*?c * (?a* (- ?t / ((1 + 1)*?c))+ ?r) < 0" using c mult_less_cancel_left_disj[of "(1 + 1) * ?c" "?a* (- ?t / ((1 + 1)*?c))+ ?r" 0] c' c'' order_less_not_sym[OF c''] by simp also have "\ \ - ?a*?t+ (1 + 1)*?c *?r < 0" using nonzero_mult_divide_cancel_left[OF c'] c - by (simp add: ring_simps diff_divide_distrib less_le del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib less_le del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using c order_less_not_sym[OF c] less_imp_neq[OF c] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } moreover {assume c: "?c < 0" and d: "?d=0" hence c': "(1 + 1)*?c \ 0" by simp from c have c'': "(1 + 1)*?c < 0" by (simp add: mult_less_0_iff) - from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: ring_simps) + from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?t / ((1 + 1)*?c) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?t / ((1 + 1)*?c))+ ?r < 0" by (simp add: r[of "- (?t / ((1 + 1)*?c))"]) also have "\ \ (1 + 1)*?c * (?a* (- ?t / ((1 + 1)*?c))+ ?r) > 0" using c order_less_not_sym[OF c''] less_imp_neq[OF c''] c'' mult_less_cancel_left_disj[of "(1 + 1) * ?c" 0 "?a* (- ?t / ((1 + 1)*?c))+ ?r"] by simp also have "\ \ ?a*?t - (1 + 1)*?c *?r < 0" using nonzero_mult_divide_cancel_left[OF c'] c order_less_not_sym[OF c''] less_imp_neq[OF c''] c'' - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using c order_less_not_sym[OF c] less_imp_neq[OF c] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } moreover moreover {assume c: "?c = 0" and d: "?d>0" from d have d'': "(1 + 1)*?d > 0" by (simp add: zero_less_mult_iff) from d have d': "(1 + 1)*?d \ 0" by simp - from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: ring_simps) + from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?s / ((1 + 1)*?d) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?s / ((1 + 1)*?d))+ ?r < 0" by (simp add: r[of "- (?s / ((1 + 1)*?d))"]) also have "\ \ (1 + 1)*?d * (?a* (- ?s / ((1 + 1)*?d))+ ?r) < 0" using d mult_less_cancel_left_disj[of "(1 + 1) * ?d" "?a* (- ?s / ((1 + 1)*?d))+ ?r" 0] d' d'' order_less_not_sym[OF d''] by simp also have "\ \ - ?a*?s+ (1 + 1)*?d *?r < 0" using nonzero_mult_divide_cancel_left[OF d'] d - by (simp add: ring_simps diff_divide_distrib less_le del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib less_le del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using d order_less_not_sym[OF d] less_imp_neq[OF d] - by (simp add: ring_simps ) } + by (simp add: field_simps) } moreover {assume c: "?c = 0" and d: "?d<0" hence d': "(1 + 1)*?d \ 0" by simp from d have d'': "(1 + 1)*?d < 0" by (simp add: mult_less_0_iff) - from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: ring_simps) + from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?s / ((1 + 1)*?d) # bs) (Lt (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?s / ((1 + 1)*?d))+ ?r < 0" by (simp add: r[of "- (?s / ((1 + 1)*?d))"]) also have "\ \ (1 + 1)*?d * (?a* (- ?s / ((1 + 1)*?d))+ ?r) > 0" using d order_less_not_sym[OF d''] less_imp_neq[OF d''] d'' mult_less_cancel_left_disj[of "(1 + 1) * ?d" 0 "?a* (- ?s / ((1 + 1)*?d))+ ?r"] by simp also have "\ \ ?a*?s - (1 + 1)*?d *?r < 0" using nonzero_mult_divide_cancel_left[OF d'] d order_less_not_sym[OF d''] less_imp_neq[OF d''] d'' - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstlt_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstlt_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using d order_less_not_sym[OF d] less_imp_neq[OF d] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } ultimately show ?thesis by blast qed @@ -2325,7 +2325,7 @@ from dc' have dc'': "\ (1 + 1)*?c *?d < 0" by simp from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r <= 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) @@ -2334,11 +2334,11 @@ using dc' dc'' mult_le_cancel_left[of "(1 + 1) * ?c * ?d" "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r" 0] by simp also have "\ \ ?a * (- (?d * ?t + ?c* ?s )) + (1 + 1)*?c*?d*?r <= 0" using nonzero_mult_divide_cancel_left[of "(1 + 1)*?c*?d"] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using dc c d nc nd dc' - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - by (simp add: ring_simps order_less_not_sym[OF dc])} + by (simp add: field_simps order_less_not_sym[OF dc])} moreover {assume dc: "?c*?d < 0" @@ -2347,7 +2347,7 @@ hence c:"?c \ 0" and d: "?d\ 0" by auto from add_frac_eq[OF c d, of "- ?t" "- ?s"] have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)" - by (simp add: ring_simps) + by (simp add: field_simps) have "?rhs \ Ifm vs (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a * (- (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)) + ?r <= 0" by (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"]) @@ -2357,78 +2357,78 @@ using dc' order_less_not_sym[OF dc'] mult_le_cancel_left[of "(1 + 1) * ?c * ?d" 0 "?a * (- (?d * ?t + ?c* ?s)/ ((1 + 1)*?c*?d)) + ?r"] by simp also have "\ \ ?a * ((?d * ?t + ?c* ?s )) - (1 + 1)*?c*?d*?r <= 0" using nonzero_mult_divide_cancel_left[of "(1 + 1)*?c*?d"] c d - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using dc c d nc nd - apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply (simp add: r[of "(- (?d * ?t) + - (?c *?s)) / ((1 + 1) * ?c * ?d)"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) - by (simp add: ring_simps order_less_not_sym[OF dc]) } + by (simp add: field_simps order_less_not_sym[OF dc]) } moreover {assume c: "?c > 0" and d: "?d=0" from c have c'': "(1 + 1)*?c > 0" by (simp add: zero_less_mult_iff) from c have c': "(1 + 1)*?c \ 0" by simp - from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: ring_simps) + from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?t / ((1 + 1)*?c) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?t / ((1 + 1)*?c))+ ?r <= 0" by (simp add: r[of "- (?t / ((1 + 1)*?c))"]) also have "\ \ (1 + 1)*?c * (?a* (- ?t / ((1 + 1)*?c))+ ?r) <= 0" using c mult_le_cancel_left[of "(1 + 1) * ?c" "?a* (- ?t / ((1 + 1)*?c))+ ?r" 0] c' c'' order_less_not_sym[OF c''] by simp also have "\ \ - ?a*?t+ (1 + 1)*?c *?r <= 0" using nonzero_mult_divide_cancel_left[OF c'] c - by (simp add: ring_simps diff_divide_distrib less_le del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib less_le del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using c order_less_not_sym[OF c] less_imp_neq[OF c] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } moreover {assume c: "?c < 0" and d: "?d=0" hence c': "(1 + 1)*?c \ 0" by simp from c have c'': "(1 + 1)*?c < 0" by (simp add: mult_less_0_iff) - from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: ring_simps) + from d have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?t / ((1 + 1)*?c)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?t / ((1 + 1)*?c) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?t / ((1 + 1)*?c))+ ?r <= 0" by (simp add: r[of "- (?t / ((1 + 1)*?c))"]) also have "\ \ (1 + 1)*?c * (?a* (- ?t / ((1 + 1)*?c))+ ?r) >= 0" using c order_less_not_sym[OF c''] less_imp_neq[OF c''] c'' mult_le_cancel_left[of "(1 + 1) * ?c" 0 "?a* (- ?t / ((1 + 1)*?c))+ ?r"] by simp also have "\ \ ?a*?t - (1 + 1)*?c *?r <= 0" using nonzero_mult_divide_cancel_left[OF c'] c order_less_not_sym[OF c''] less_imp_neq[OF c''] c'' - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?t / ((1 + 1)*?c))"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using c order_less_not_sym[OF c] less_imp_neq[OF c] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } moreover moreover {assume c: "?c = 0" and d: "?d>0" from d have d'': "(1 + 1)*?d > 0" by (simp add: zero_less_mult_iff) from d have d': "(1 + 1)*?d \ 0" by simp - from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: ring_simps) + from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?s / ((1 + 1)*?d) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?s / ((1 + 1)*?d))+ ?r <= 0" by (simp add: r[of "- (?s / ((1 + 1)*?d))"]) also have "\ \ (1 + 1)*?d * (?a* (- ?s / ((1 + 1)*?d))+ ?r) <= 0" using d mult_le_cancel_left[of "(1 + 1) * ?d" "?a* (- ?s / ((1 + 1)*?d))+ ?r" 0] d' d'' order_less_not_sym[OF d''] by simp also have "\ \ - ?a*?s+ (1 + 1)*?d *?r <= 0" using nonzero_mult_divide_cancel_left[OF d'] d - by (simp add: ring_simps diff_divide_distrib less_le del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib less_le del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using d order_less_not_sym[OF d] less_imp_neq[OF d] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } moreover {assume c: "?c = 0" and d: "?d<0" hence d': "(1 + 1)*?d \ 0" by simp from d have d'': "(1 + 1)*?d < 0" by (simp add: mult_less_0_iff) - from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: ring_simps) + from c have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - ?s / ((1 + 1)*?d)" by (simp add: field_simps) have "?rhs \ Ifm vs (- ?s / ((1 + 1)*?d) # bs) (Le (CNP 0 a r))" by (simp only: th) also have "\ \ ?a* (- ?s / ((1 + 1)*?d))+ ?r <= 0" by (simp add: r[of "- (?s / ((1 + 1)*?d))"]) also have "\ \ (1 + 1)*?d * (?a* (- ?s / ((1 + 1)*?d))+ ?r) >= 0" using d order_less_not_sym[OF d''] less_imp_neq[OF d''] d'' mult_le_cancel_left[of "(1 + 1) * ?d" 0 "?a* (- ?s / ((1 + 1)*?d))+ ?r"] by simp also have "\ \ ?a*?s - (1 + 1)*?d *?r <= 0" using nonzero_mult_divide_cancel_left[OF d'] d order_less_not_sym[OF d''] less_imp_neq[OF d''] d'' - by (simp add: ring_simps diff_divide_distrib del: left_distrib) + by (simp add: algebra_simps diff_divide_distrib del: left_distrib) finally have ?thesis using c d nc nd - apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstle_def Let_def evaldjf_ex ring_simps lt polyneg_norm polymul_norm) + apply(simp add: r[of "- (?s / ((1 + 1)*?d))"] msubstle_def Let_def evaldjf_ex field_simps lt polyneg_norm polymul_norm) apply (simp only: one_add_one_is_two[symmetric] of_int_add) using d order_less_not_sym[OF d] less_imp_neq[OF d] - by (simp add: ring_simps ) } + by (simp add: field_simps ) } ultimately show ?thesis by blast qed @@ -2519,7 +2519,7 @@ lemma remdps_set[simp]: "set (remdps xs) = set xs" by (induct xs rule: remdps.induct, auto) -lemma simpfm_lin: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma simpfm_lin: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "qfree p \ islin (simpfm p)" by (induct p rule: simpfm.induct, auto simp add: conj_lin disj_lin) @@ -2551,7 +2551,7 @@ {fix c t d s assume ctU: "(c,t) \ set ?U" and dsU: "(d,s) \ set ?U" from U_l ctU dsU have norm: "isnpoly c" "isnpoly d" by auto from msubst_I[OF lq norm, of vs x bs t s] msubst_I[OF lq norm(2,1), of vs x bs s t] - have "?I (msubst ?q ((c,t),(d,s))) = ?I (msubst ?q ((d,s),(c,t)))" by (simp add: ring_simps)} + have "?I (msubst ?q ((c,t),(d,s))) = ?I (msubst ?q ((d,s),(c,t)))" by (simp add: field_simps)} hence th0: "\x \ set ?U. \y \ set ?U. ?I (msubst ?q (x, y)) \ ?I (msubst ?q (y, x))" by clarsimp {fix x assume xUp: "x \ set ?Up" then obtain c t d s where ctU: "(c,t) \ set ?U" and dsU: "(d,s) \ set ?U" @@ -2616,7 +2616,7 @@ let ?s = "Itm vs (x # bs) s" let ?t = "Itm vs (x # bs) t" have eq2: "\(x::'a). x + x = (1 + 1) * x" - by (simp add: ring_simps) + by (simp add: field_simps) {assume "?c = 0 \ ?d = 0" with ct have ?D by simp} moreover @@ -2747,12 +2747,12 @@ using lp tnb by (induct p c t rule: msubstpos.induct, auto simp add: msubsteq2_nb msubstltpos_nb msubstlepos_nb) -lemma msubstneg_nb: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" and lp: "islin p" and tnb: "tmbound0 t" +lemma msubstneg_nb: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and lp: "islin p" and tnb: "tmbound0 t" shows "bound0 (msubstneg p c t)" using lp tnb by (induct p c t rule: msubstneg.induct, auto simp add: msubsteq2_nb msubstltneg_nb msubstleneg_nb) -lemma msubst2_nb: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" and lp: "islin p" and tnb: "tmbound0 t" +lemma msubst2_nb: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and lp: "islin p" and tnb: "tmbound0 t" shows "bound0 (msubst2 p c t)" using lp tnb by (simp add: msubst2_def msubstneg_nb msubstpos_nb conj_nb disj_nb lt_nb simpfm_bound0) @@ -2899,14 +2899,14 @@ by (auto simp add: msubst2_def lt[OF stupid(3)] lt[OF stupid(1)] mult_less_0_iff zero_less_mult_iff) from msubst2[OF lq norm2(1) z(1), of x bs] msubst2[OF lq norm2(2) z(2), of x bs] H - show ?rhs by (simp add: ring_simps) + show ?rhs by (simp add: field_simps) next assume H: ?rhs hence z: "\C (-2, 1) *\<^sub>p b *\<^sub>p d\\<^sub>p\<^bsup>vs\<^esup> \ 0" "\C (-2, 1) *\<^sub>p d *\<^sub>p b\\<^sub>p\<^bsup>vs\<^esup> \ 0" by (auto simp add: msubst2_def lt[OF stupid(4)] lt[OF stupid(2)] mult_less_0_iff zero_less_mult_iff) from msubst2[OF lq norm2(1) z(1), of x bs] msubst2[OF lq norm2(2) z(2), of x bs] H - show ?lhs by (simp add: ring_simps) + show ?lhs by (simp add: field_simps) qed} hence th0: "\x \ set ?U. \y \ set ?U. ?I (?s (x, y)) \ ?I (?s (y, x))" by clarsimp @@ -3156,54 +3156,54 @@ *} "Parametric QE for linear Arithmetic over fields, Version 2" -lemma "\(x::'a::{division_by_zero,linordered_field,number_ring}). y \ -1 \ (y + 1)*x < 0" - apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "y::'a::{division_by_zero,linordered_field,number_ring}") - apply (simp add: ring_simps) +lemma "\(x::'a::{linordered_field_inverse_zero, number_ring}). y \ -1 \ (y + 1)*x < 0" + apply (frpar type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "y::'a::{linordered_field_inverse_zero, number_ring}") + apply (simp add: field_simps) apply (rule spec[where x=y]) - apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "z::'a::{division_by_zero,linordered_field,number_ring}") + apply (frpar type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "z::'a::{linordered_field_inverse_zero, number_ring}") by simp text{* Collins/Jones Problem *} (* -lemma "\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0" +lemma "\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0" proof- - have "(\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \ (\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \ 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \ ?rhs") -by (simp add: ring_simps) + have "(\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \ (\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \ 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \ ?rhs") +by (simp add: field_simps) have "?rhs" - apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "a::'a::{division_by_zero,linordered_field,number_ring}" "b::'a::{division_by_zero,linordered_field,number_ring}") - apply (simp add: ring_simps) + apply (frpar type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "a::'a::{linordered_field_inverse_zero, number_ring}" "b::'a::{linordered_field_inverse_zero, number_ring}") + apply (simp add: field_simps) oops *) (* -lemma "ALL (x::'a::{division_by_zero,linordered_field,number_ring}) y. (1 - t)*x \ (1+t)*y \ (1 - t)*y \ (1+t)*x --> 0 \ y" -apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "t::'a::{division_by_zero,linordered_field,number_ring}") +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (1 - t)*x \ (1+t)*y \ (1 - t)*y \ (1+t)*x --> 0 \ y" +apply (frpar type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "t::'a::{linordered_field_inverse_zero, number_ring}") oops *) -lemma "\(x::'a::{division_by_zero,linordered_field,number_ring}). y \ -1 \ (y + 1)*x < 0" - apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "y::'a::{division_by_zero,linordered_field,number_ring}") - apply (simp add: ring_simps) +lemma "\(x::'a::{linordered_field_inverse_zero, number_ring}). y \ -1 \ (y + 1)*x < 0" + apply (frpar2 type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "y::'a::{linordered_field_inverse_zero, number_ring}") + apply (simp add: field_simps) apply (rule spec[where x=y]) - apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "z::'a::{division_by_zero,linordered_field,number_ring}") + apply (frpar2 type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "z::'a::{linordered_field_inverse_zero, number_ring}") by simp text{* Collins/Jones Problem *} (* -lemma "\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0" +lemma "\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0" proof- - have "(\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \ (\(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \ r < 1 \ 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \ 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \ ?rhs") -by (simp add: ring_simps) + have "(\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \ (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \ (\(r::'a::{linordered_field_inverse_zero, number_ring}). 0 < r \ r < 1 \ 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \ 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \ ?rhs") +by (simp add: field_simps) have "?rhs" - apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "a::'a::{division_by_zero,linordered_field,number_ring}" "b::'a::{division_by_zero,linordered_field,number_ring}") + apply (frpar2 type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "a::'a::{linordered_field_inverse_zero, number_ring}" "b::'a::{linordered_field_inverse_zero, number_ring}") apply simp oops *) (* -lemma "ALL (x::'a::{division_by_zero,linordered_field,number_ring}) y. (1 - t)*x \ (1+t)*y \ (1 - t)*y \ (1+t)*x --> 0 \ y" -apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "t::'a::{division_by_zero,linordered_field,number_ring}") +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (1 - t)*x \ (1+t)*y \ (1 - t)*y \ (1+t)*x --> 0 \ y" +apply (frpar2 type: "'a::{linordered_field_inverse_zero, number_ring}" pars: "t::'a::{linordered_field_inverse_zero, number_ring}") apply (simp add: field_simps linorder_neq_iff[symmetric]) apply ferrack oops diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Polynomial_List.thy --- a/src/HOL/Decision_Procs/Polynomial_List.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Polynomial_List.thy Tue May 04 20:30:22 2010 +0200 @@ -283,11 +283,11 @@ apply (drule_tac x = "%m. if m = Suc n then a else i m" in spec, safe) apply (drule poly_mult_eq_zero_disj [THEN iffD1], safe) apply (drule_tac x = "Suc (length q)" in spec) -apply (auto simp add: ring_simps) +apply (auto simp add: field_simps) apply (drule_tac x = xa in spec) -apply (clarsimp simp add: ring_simps) +apply (clarsimp simp add: field_simps) apply (drule_tac x = m in spec) -apply (auto simp add:ring_simps) +apply (auto simp add:field_simps) done lemmas poly_roots_index_lemma1 = conjI [THEN poly_roots_index_lemma0, standard] @@ -327,7 +327,7 @@ apply (drule_tac x = "a#i" in spec) apply (auto simp only: poly_mult List.list.size) apply (drule_tac x = xa in spec) -apply (clarsimp simp add: ring_simps) +apply (clarsimp simp add: field_simps) done lemmas poly_roots_index_lemma2 = conjI [THEN poly_roots_index_lemma, standard] @@ -413,7 +413,7 @@ by (auto intro!: ext) lemma poly_add_minus_zero_iff: "(poly (p +++ -- q) = poly []) = (poly p = poly q)" -by (auto simp add: ring_simps poly_add poly_minus_def fun_eq poly_cmult) +by (auto simp add: field_simps poly_add poly_minus_def fun_eq poly_cmult) lemma poly_add_minus_mult_eq: "poly (p *** q +++ --(p *** r)) = poly (p *** (q +++ -- r))" by (auto simp add: poly_add poly_minus_def fun_eq poly_mult poly_cmult right_distrib) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy --- a/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy Tue May 04 20:30:22 2010 +0200 @@ -230,7 +230,7 @@ subsection{* Semantics of the polynomial representation *} -consts Ipoly :: "'a list \ poly \ 'a::{ring_char_0,power,division_by_zero,field}" +consts Ipoly :: "'a list \ poly \ 'a::{field_char_0, field_inverse_zero, power}" primrec "Ipoly bs (C c) = INum c" "Ipoly bs (Bound n) = bs!n" @@ -241,7 +241,7 @@ "Ipoly bs (Pw t n) = (Ipoly bs t) ^ n" "Ipoly bs (CN c n p) = (Ipoly bs c) + (bs!n)*(Ipoly bs p)" abbreviation - Ipoly_syntax :: "poly \ 'a list \'a::{ring_char_0,power,division_by_zero,field}" ("\_\\<^sub>p\<^bsup>_\<^esup>") + Ipoly_syntax :: "poly \ 'a list \'a::{field_char_0, field_inverse_zero, power}" ("\_\\<^sub>p\<^bsup>_\<^esup>") where "\p\\<^sub>p\<^bsup>bs\<^esup> \ Ipoly bs p" lemma Ipoly_CInt: "Ipoly bs (C (i,1)) = of_int i" @@ -322,7 +322,7 @@ qed auto lemma polyadd[simp]: "Ipoly bs (polyadd (p,q)) = (Ipoly bs p) + (Ipoly bs q)" -by (induct p q rule: polyadd.induct, auto simp add: Let_def ring_simps right_distrib[symmetric] simp del: right_distrib) +by (induct p q rule: polyadd.induct, auto simp add: Let_def field_simps right_distrib[symmetric] simp del: right_distrib) lemma polyadd_norm: "\ isnpoly p ; isnpoly q\ \ isnpoly (polyadd(p,q))" using polyadd_normh[of "p" "0" "q" "0"] isnpoly_def by simp @@ -394,7 +394,7 @@ qed simp_all lemma polymul_properties: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and nq: "isnpolyh q n1" and m: "m \ min n0 n1" shows "isnpolyh (p *\<^sub>p q) (min n0 n1)" and "(p *\<^sub>p q = 0\<^sub>p) = (p = 0\<^sub>p \ q = 0\<^sub>p)" @@ -565,22 +565,22 @@ qed auto lemma polymul[simp]: "Ipoly bs (p *\<^sub>p q) = (Ipoly bs p) * (Ipoly bs q)" -by(induct p q rule: polymul.induct, auto simp add: ring_simps) +by(induct p q rule: polymul.induct, auto simp add: field_simps) lemma polymul_normh: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\isnpolyh p n0 ; isnpolyh q n1\ \ isnpolyh (p *\<^sub>p q) (min n0 n1)" using polymul_properties(1) by blast lemma polymul_eq0_iff: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\ isnpolyh p n0 ; isnpolyh q n1\ \ (p *\<^sub>p q = 0\<^sub>p) = (p = 0\<^sub>p \ q = 0\<^sub>p) " using polymul_properties(2) by blast lemma polymul_degreen: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\ isnpolyh p n0 ; isnpolyh q n1 ; m \ min n0 n1\ \ degreen (p *\<^sub>p q) m = (if (p = 0\<^sub>p \ q = 0\<^sub>p) then 0 else degreen p m + degreen q m)" using polymul_properties(3) by blast lemma polymul_norm: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\ isnpoly p; isnpoly q\ \ isnpoly (polymul (p,q))" using polymul_normh[of "p" "0" "q" "0"] isnpoly_def by simp @@ -591,7 +591,7 @@ by (induct p arbitrary: n0, auto) lemma monic_eqI: assumes np: "isnpolyh p n0" - shows "INum (headconst p) * Ipoly bs (fst (monic p)) = (Ipoly bs p ::'a::{ring_char_0,power,division_by_zero,field})" + shows "INum (headconst p) * Ipoly bs (fst (monic p)) = (Ipoly bs p ::'a::{field_char_0, field_inverse_zero, power})" unfolding monic_def Let_def proof(cases "headconst p = 0\<^sub>N", simp_all add: headconst_zero[OF np]) let ?h = "headconst p" @@ -629,13 +629,13 @@ lemma polysub_norm: "\ isnpoly p; isnpoly q\ \ isnpoly (polysub(p,q))" using polyadd_norm polyneg_norm by (simp add: polysub_def) -lemma polysub_same_0[simp]: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma polysub_same_0[simp]: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "isnpolyh p n0 \ polysub (p, p) = 0\<^sub>p" unfolding polysub_def split_def fst_conv snd_conv by (induct p arbitrary: n0,auto simp add: Let_def Nsub0[simplified Nsub_def]) lemma polysub_0: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\ isnpolyh p n0 ; isnpolyh q n1\ \ (p -\<^sub>p q = 0\<^sub>p) = (p = q)" unfolding polysub_def split_def fst_conv snd_conv apply (induct p q arbitrary: n0 n1 rule:polyadd.induct, simp_all add: Nsub0[simplified Nsub_def]) @@ -657,7 +657,7 @@ done text{* polypow is a power function and preserves normal forms *} -lemma polypow[simp]: "Ipoly bs (polypow n p) = ((Ipoly bs p :: 'a::{ring_char_0,division_by_zero,field})) ^ n" +lemma polypow[simp]: "Ipoly bs (polypow n p) = ((Ipoly bs p :: 'a::{field_char_0, field_inverse_zero})) ^ n" proof(induct n rule: polypow.induct) case 1 thus ?case by simp next @@ -688,7 +688,7 @@ qed lemma polypow_normh: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "isnpolyh p n \ isnpolyh (polypow k p) n" proof (induct k arbitrary: n rule: polypow.induct) case (2 k n) @@ -701,17 +701,17 @@ qed auto lemma polypow_norm: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "isnpoly p \ isnpoly (polypow k p)" by (simp add: polypow_normh isnpoly_def) text{* Finally the whole normalization*} -lemma polynate[simp]: "Ipoly bs (polynate p) = (Ipoly bs p :: 'a ::{ring_char_0,division_by_zero,field})" +lemma polynate[simp]: "Ipoly bs (polynate p) = (Ipoly bs p :: 'a ::{field_char_0, field_inverse_zero})" by (induct p rule:polynate.induct, auto) lemma polynate_norm[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "isnpoly (polynate p)" by (induct p rule: polynate.induct, simp_all add: polyadd_norm polymul_norm polysub_norm polyneg_norm polypow_norm) (simp_all add: isnpoly_def) @@ -736,29 +736,29 @@ shows "isnpolyh (funpow k f p) n" using f np by (induct k arbitrary: p, auto) -lemma funpow_shift1: "(Ipoly bs (funpow n shift1 p) :: 'a :: {ring_char_0,division_by_zero,field}) = Ipoly bs (Mul (Pw (Bound 0) n) p)" +lemma funpow_shift1: "(Ipoly bs (funpow n shift1 p) :: 'a :: {field_char_0, field_inverse_zero}) = Ipoly bs (Mul (Pw (Bound 0) n) p)" by (induct n arbitrary: p, simp_all add: shift1_isnpoly shift1 power_Suc ) lemma shift1_isnpolyh: "isnpolyh p n0 \ p\ 0\<^sub>p \ isnpolyh (shift1 p) 0" using isnpolyh_mono[where n="n0" and n'="0" and p="p"] by (simp add: shift1_def) lemma funpow_shift1_1: - "(Ipoly bs (funpow n shift1 p) :: 'a :: {ring_char_0,division_by_zero,field}) = Ipoly bs (funpow n shift1 1\<^sub>p *\<^sub>p p)" + "(Ipoly bs (funpow n shift1 p) :: 'a :: {field_char_0, field_inverse_zero}) = Ipoly bs (funpow n shift1 1\<^sub>p *\<^sub>p p)" by (simp add: funpow_shift1) lemma poly_cmul[simp]: "Ipoly bs (poly_cmul c p) = Ipoly bs (Mul (C c) p)" -by (induct p arbitrary: n0 rule: poly_cmul.induct, auto simp add: ring_simps) +by (induct p arbitrary: n0 rule: poly_cmul.induct, auto simp add: field_simps) lemma behead: assumes np: "isnpolyh p n" - shows "Ipoly bs (Add (Mul (head p) (Pw (Bound 0) (degree p))) (behead p)) = (Ipoly bs p :: 'a :: {ring_char_0,division_by_zero,field})" + shows "Ipoly bs (Add (Mul (head p) (Pw (Bound 0) (degree p))) (behead p)) = (Ipoly bs p :: 'a :: {field_char_0, field_inverse_zero})" using np proof (induct p arbitrary: n rule: behead.induct) case (1 c p n) hence pn: "isnpolyh p n" by simp from prems(2)[OF pn] have th:"Ipoly bs (Add (Mul (head p) (Pw (Bound 0) (degree p))) (behead p)) = Ipoly bs p" . then show ?case using "1.hyps" apply (simp add: Let_def,cases "behead p = 0\<^sub>p") - by (simp_all add: th[symmetric] ring_simps power_Suc) + by (simp_all add: th[symmetric] field_simps power_Suc) qed (auto simp add: Let_def) lemma behead_isnpolyh: @@ -981,7 +981,7 @@ by (simp add: head_eq_headn0) lemma isnpolyh_zero_iff: - assumes nq: "isnpolyh p n0" and eq :"\bs. wf_bs bs p \ \p\\<^sub>p\<^bsup>bs\<^esup> = (0::'a::{ring_char_0,power,division_by_zero,field})" + assumes nq: "isnpolyh p n0" and eq :"\bs. wf_bs bs p \ \p\\<^sub>p\<^bsup>bs\<^esup> = (0::'a::{field_char_0, field_inverse_zero, power})" shows "p = 0\<^sub>p" using nq eq proof (induct "maxindex p" arbitrary: p n0 rule: less_induct) @@ -1033,7 +1033,7 @@ lemma isnpolyh_unique: assumes np:"isnpolyh p n0" and nq: "isnpolyh q n1" - shows "(\bs. \p\\<^sub>p\<^bsup>bs\<^esup> = (\q\\<^sub>p\<^bsup>bs\<^esup> :: 'a::{ring_char_0,power,division_by_zero,field})) \ p = q" + shows "(\bs. \p\\<^sub>p\<^bsup>bs\<^esup> = (\q\\<^sub>p\<^bsup>bs\<^esup> :: 'a::{field_char_0, field_inverse_zero, power})) \ p = q" proof(auto) assume H: "\bs. (\p\\<^sub>p\<^bsup>bs\<^esup> ::'a)= \q\\<^sub>p\<^bsup>bs\<^esup>" hence "\bs.\p -\<^sub>p q\\<^sub>p\<^bsup>bs\<^esup>= (0::'a)" by simp @@ -1046,50 +1046,50 @@ text{* consequenses of unicity on the algorithms for polynomial normalization *} -lemma polyadd_commute: assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" +lemma polyadd_commute: assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and nq: "isnpolyh q n1" shows "p +\<^sub>p q = q +\<^sub>p p" using isnpolyh_unique[OF polyadd_normh[OF np nq] polyadd_normh[OF nq np]] by simp lemma zero_normh: "isnpolyh 0\<^sub>p n" by simp lemma one_normh: "isnpolyh 1\<^sub>p n" by simp lemma polyadd_0[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" shows "p +\<^sub>p 0\<^sub>p = p" and "0\<^sub>p +\<^sub>p p = p" using isnpolyh_unique[OF polyadd_normh[OF np zero_normh] np] isnpolyh_unique[OF polyadd_normh[OF zero_normh np] np] by simp_all lemma polymul_1[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" shows "p *\<^sub>p 1\<^sub>p = p" and "1\<^sub>p *\<^sub>p p = p" using isnpolyh_unique[OF polymul_normh[OF np one_normh] np] isnpolyh_unique[OF polymul_normh[OF one_normh np] np] by simp_all lemma polymul_0[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" shows "p *\<^sub>p 0\<^sub>p = 0\<^sub>p" and "0\<^sub>p *\<^sub>p p = 0\<^sub>p" using isnpolyh_unique[OF polymul_normh[OF np zero_normh] zero_normh] isnpolyh_unique[OF polymul_normh[OF zero_normh np] zero_normh] by simp_all lemma polymul_commute: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np:"isnpolyh p n0" and nq: "isnpolyh q n1" shows "p *\<^sub>p q = q *\<^sub>p p" -using isnpolyh_unique[OF polymul_normh[OF np nq] polymul_normh[OF nq np], where ?'a = "'a\{ring_char_0,power,division_by_zero,field}"] by simp +using isnpolyh_unique[OF polymul_normh[OF np nq] polymul_normh[OF nq np], where ?'a = "'a\{field_char_0, field_inverse_zero, power}"] by simp declare polyneg_polyneg[simp] lemma isnpolyh_polynate_id[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np:"isnpolyh p n0" shows "polynate p = p" - using isnpolyh_unique[where ?'a= "'a::{ring_char_0,division_by_zero,field}", OF polynate_norm[of p, unfolded isnpoly_def] np] polynate[where ?'a = "'a::{ring_char_0,division_by_zero,field}"] by simp + using isnpolyh_unique[where ?'a= "'a::{field_char_0, field_inverse_zero}", OF polynate_norm[of p, unfolded isnpoly_def] np] polynate[where ?'a = "'a::{field_char_0, field_inverse_zero}"] by simp lemma polynate_idempotent[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "polynate (polynate p) = polynate p" using isnpolyh_polynate_id[OF polynate_norm[of p, unfolded isnpoly_def]] . lemma poly_nate_polypoly': "poly_nate bs p = polypoly' bs (polynate p)" unfolding poly_nate_def polypoly'_def .. -lemma poly_nate_poly: shows "poly (poly_nate bs p) = (\x:: 'a ::{ring_char_0,division_by_zero,field}. \p\\<^sub>p\<^bsup>x # bs\<^esup>)" +lemma poly_nate_poly: shows "poly (poly_nate bs p) = (\x:: 'a ::{field_char_0, field_inverse_zero}. \p\\<^sub>p\<^bsup>x # bs\<^esup>)" using polypoly'_poly[OF polynate_norm[unfolded isnpoly_def], symmetric, of bs p] unfolding poly_nate_polypoly' by (auto intro: ext) @@ -1116,7 +1116,7 @@ qed lemma degree_polysub_samehead: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and nq: "isnpolyh q n1" and h: "head p = head q" and d: "degree p = degree q" shows "degree (p -\<^sub>p q) < degree p \ (p -\<^sub>p q = 0\<^sub>p)" @@ -1226,7 +1226,7 @@ done lemma polymul_head_polyeq: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "\isnpolyh p n0; isnpolyh q n1 ; p \ 0\<^sub>p ; q \ 0\<^sub>p \ \ head (p *\<^sub>p q) = head p *\<^sub>p head q" proof (induct p q arbitrary: n0 n1 rule: polymul.induct) case (2 a b c' n' p' n0 n1) @@ -1300,7 +1300,7 @@ by (induct p arbitrary: n0 rule: polyneg.induct, auto) lemma degree_polymul: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and nq: "isnpolyh q n1" shows "degree (p *\<^sub>p q) \ degree p + degree q" using polymul_degreen[OF np nq, where m="0"] degree_eq_degreen0 by simp @@ -1344,7 +1344,7 @@ qed lemma polydivide_aux_properties: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and ns: "isnpolyh s n1" and ap: "head p = a" and ndp: "degree p = n" and pnz: "p \ 0\<^sub>p" shows "polydivide_aux_dom (a,n,p,k,s) \ @@ -1415,19 +1415,19 @@ from polyadd_normh[OF polymul_normh[OF np polyadd_normh[OF polymul_normh[OF nakk' nxdn] nq]] nr'] have nqr': "isnpolyh (p *\<^sub>p (?akk' *\<^sub>p ?xdn +\<^sub>p q) +\<^sub>p r) 0" by simp - from asp have "\ (bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a^\<^sub>p (k' - k) *\<^sub>p (s -\<^sub>p ?p')) = + from asp have "\ (bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a^\<^sub>p (k' - k) *\<^sub>p (s -\<^sub>p ?p')) = Ipoly bs (p *\<^sub>p q +\<^sub>p r)" by simp - hence " \(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a^\<^sub>p (k' - k)*\<^sub>p s) = + hence " \(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a^\<^sub>p (k' - k)*\<^sub>p s) = Ipoly bs (a^\<^sub>p (k' - k)) * Ipoly bs ?p' + Ipoly bs p * Ipoly bs q + Ipoly bs r" - by (simp add: ring_simps) - hence " \(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = + by (simp add: field_simps) + hence " \(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = Ipoly bs (a^\<^sub>p (k' - k)) * Ipoly bs (funpow (degree s - n) shift1 1\<^sub>p *\<^sub>p p) + Ipoly bs p * Ipoly bs q + Ipoly bs r" by (auto simp only: funpow_shift1_1) - hence "\(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = + hence "\(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = Ipoly bs p * (Ipoly bs (a^\<^sub>p (k' - k)) * Ipoly bs (funpow (degree s - n) shift1 1\<^sub>p) - + Ipoly bs q) + Ipoly bs r" by (simp add: ring_simps) - hence "\(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = + + Ipoly bs q) + Ipoly bs r" by (simp add: field_simps) + hence "\(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = Ipoly bs (p *\<^sub>p ((a^\<^sub>p (k' - k)) *\<^sub>p (funpow (degree s - n) shift1 1\<^sub>p) +\<^sub>p q) +\<^sub>p r)" by simp with isnpolyh_unique[OF nakks' nqr'] have "a ^\<^sub>p (k' - k) *\<^sub>p s = @@ -1444,9 +1444,9 @@ apply (simp) by (rule polydivide_aux_real_domintros, simp_all) have dom: ?dths apply (rule polydivide_aux_real_domintros) using ba dn' domsp by simp_all - from spz isnpolyh_unique[OF polysub_normh[OF ns np'], where q="0\<^sub>p", symmetric, where ?'a = "'a::{ring_char_0,division_by_zero,field}"] - have " \(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs s = Ipoly bs ?p'" by simp - hence "\(bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs s = Ipoly bs (?xdn *\<^sub>p p)" using np nxdn apply simp + from spz isnpolyh_unique[OF polysub_normh[OF ns np'], where q="0\<^sub>p", symmetric, where ?'a = "'a::{field_char_0, field_inverse_zero}"] + have " \(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs s = Ipoly bs ?p'" by simp + hence "\(bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs s = Ipoly bs (?xdn *\<^sub>p p)" using np nxdn apply simp by (simp only: funpow_shift1_1) simp hence sp': "s = ?xdn *\<^sub>p p" using isnpolyh_unique[OF ns polymul_normh[OF nxdn np]] by blast {assume h1: "polydivide_aux (a,n,p,k,s) = (k',r)" @@ -1501,17 +1501,17 @@ and dr: "degree r = 0 \ degree r < degree p" and qr: "a ^\<^sub>p (k' - Suc k) *\<^sub>p ((a *\<^sub>p s) -\<^sub>p (?b *\<^sub>p ?p')) = p *\<^sub>p q +\<^sub>p r" by auto from kk' have kk'':"Suc (k' - Suc k) = k' - k" by arith - {fix bs:: "'a::{ring_char_0,division_by_zero,field} list" + {fix bs:: "'a::{field_char_0, field_inverse_zero} list" from qr isnpolyh_unique[OF polypow_normh[OF head_isnpolyh[OF np], where k="k' - Suc k", simplified ap] nasbp', symmetric] have "Ipoly bs (a ^\<^sub>p (k' - Suc k) *\<^sub>p ((a *\<^sub>p s) -\<^sub>p (?b *\<^sub>p ?p'))) = Ipoly bs (p *\<^sub>p q +\<^sub>p r)" by simp hence "Ipoly bs a ^ (Suc (k' - Suc k)) * Ipoly bs s = Ipoly bs p * Ipoly bs q + Ipoly bs a ^ (k' - Suc k) * Ipoly bs ?b * Ipoly bs ?p' + Ipoly bs r" - by (simp add: ring_simps power_Suc) + by (simp add: field_simps power_Suc) hence "Ipoly bs a ^ (k' - k) * Ipoly bs s = Ipoly bs p * Ipoly bs q + Ipoly bs a ^ (k' - Suc k) * Ipoly bs ?b * Ipoly bs ?xdn * Ipoly bs p + Ipoly bs r" by (simp add:kk'' funpow_shift1_1[where n="degree s - n" and p="p"]) hence "Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = Ipoly bs p * (Ipoly bs q + Ipoly bs a ^ (k' - Suc k) * Ipoly bs ?b * Ipoly bs ?xdn) + Ipoly bs r" - by (simp add: ring_simps)} - hence ieq:"\(bs :: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = + by (simp add: field_simps)} + hence ieq:"\(bs :: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a ^\<^sub>p (k' - k) *\<^sub>p s) = Ipoly bs (p *\<^sub>p (q +\<^sub>p (a ^\<^sub>p (k' - Suc k) *\<^sub>p ?b *\<^sub>p ?xdn)) +\<^sub>p r)" by auto let ?q = "q +\<^sub>p (a ^\<^sub>p (k' - Suc k) *\<^sub>p ?b *\<^sub>p ?xdn)" from polyadd_normh[OF nq polymul_normh[OF polymul_normh[OF polypow_normh[OF head_isnpolyh[OF np], where k="k' - Suc k"] head_isnpolyh[OF ns], simplified ap ] nxdn]] @@ -1532,17 +1532,17 @@ apply (simp) by (rule polydivide_aux_real_domintros, simp_all) have dom: ?dths using sz ba dn' domsp by - (rule polydivide_aux_real_domintros, simp_all) - {fix bs :: "'a::{ring_char_0,division_by_zero,field} list" + {fix bs :: "'a::{field_char_0, field_inverse_zero} list" from isnpolyh_unique[OF nth, where ?'a="'a" and q="0\<^sub>p",simplified,symmetric] spz have "Ipoly bs (a*\<^sub>p s) = Ipoly bs ?b * Ipoly bs ?p'" by simp hence "Ipoly bs (a*\<^sub>p s) = Ipoly bs (?b *\<^sub>p ?xdn) * Ipoly bs p" by (simp add: funpow_shift1_1[where n="degree s - n" and p="p"]) hence "Ipoly bs (a*\<^sub>p s) = Ipoly bs (p *\<^sub>p (?b *\<^sub>p ?xdn))" by simp } - hence hth: "\ (bs:: 'a::{ring_char_0,division_by_zero,field} list). Ipoly bs (a*\<^sub>p s) = Ipoly bs (p *\<^sub>p (?b *\<^sub>p ?xdn))" .. + hence hth: "\ (bs:: 'a::{field_char_0, field_inverse_zero} list). Ipoly bs (a*\<^sub>p s) = Ipoly bs (p *\<^sub>p (?b *\<^sub>p ?xdn))" .. from hth have asq: "a *\<^sub>p s = p *\<^sub>p (?b *\<^sub>p ?xdn)" - using isnpolyh_unique[where ?'a = "'a::{ring_char_0,division_by_zero,field}", OF polymul_normh[OF head_isnpolyh[OF np] ns] + using isnpolyh_unique[where ?'a = "'a::{field_char_0, field_inverse_zero}", OF polymul_normh[OF head_isnpolyh[OF np] ns] polymul_normh[OF np polymul_normh[OF head_isnpolyh[OF ns] nxdn]], simplified ap] by simp {assume h1: "polydivide_aux (a,n,p,k,s) = (k', r)" @@ -1566,7 +1566,7 @@ qed lemma polydivide_properties: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" and np: "isnpolyh p n0" and ns: "isnpolyh s n1" and pnz: "p \ 0\<^sub>p" shows "(\ k r. polydivide s p = (k,r) \ (\nr. isnpolyh r nr) \ (degree r = 0 \ degree r < degree p) \ (\q n1. isnpolyh q n1 \ ((polypow k (head p)) *\<^sub>p s = p *\<^sub>p q +\<^sub>p r)))" @@ -1698,11 +1698,11 @@ definition "swapnorm n m t = polynate (swap n m t)" lemma swapnorm: assumes nbs: "n < length bs" and mbs: "m < length bs" - shows "((Ipoly bs (swapnorm n m t) :: 'a\{ring_char_0,division_by_zero,field})) = Ipoly ((bs[n:= bs!m])[m:= bs!n]) t" + shows "((Ipoly bs (swapnorm n m t) :: 'a\{field_char_0, field_inverse_zero})) = Ipoly ((bs[n:= bs!m])[m:= bs!n]) t" using swap[OF prems] swapnorm_def by simp lemma swapnorm_isnpoly[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "isnpoly (swapnorm n m p)" unfolding swapnorm_def by simp diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Decision_Procs/ex/Dense_Linear_Order_Ex.thy --- a/src/HOL/Decision_Procs/ex/Dense_Linear_Order_Ex.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Decision_Procs/ex/Dense_Linear_Order_Ex.thy Tue May 04 20:30:22 2010 +0200 @@ -7,147 +7,147 @@ begin lemma - "\(y::'a::{linordered_field,number_ring, division_by_zero}) <2. x + 3* y < 0 \ x - y >0" + "\(y::'a::{linordered_field_inverse_zero, number_ring}) <2. x + 3* y < 0 \ x - y >0" by ferrack -lemma "~ (ALL x (y::'a::{linordered_field,number_ring, division_by_zero}). x < y --> 10*x < 11*y)" +lemma "~ (ALL x (y::'a::{linordered_field_inverse_zero, number_ring}). x < y --> 10*x < 11*y)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (10*(x + 5*y + -1) < 60*y)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. x < y --> (10*(x + 5*y + -1) < 60*y)" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. x ~= y --> x < y" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y. x ~= y --> x < y" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (x ~= y & 10*x ~= 9*y & 10*x < y) --> x < y" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y. (x ~= y & 10*x ~= 9*y & 10*x < y) --> x < y" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (x ~= y & 5*x <= y) --> 500*x <= 100*y" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (x ~= y & 5*x <= y) --> 500*x <= 100*y" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX (y::'a::{linordered_field,number_ring, division_by_zero}). 4*x + 3*y <= 0 & 4*x + 3*y >= -1)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX (y::'a::{linordered_field_inverse_zero, number_ring}). 4*x + 3*y <= 0 & 4*x + 3*y >= -1)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) < 0. (EX (y::'a::{linordered_field,number_ring, division_by_zero}) > 0. 7*x + y > 0 & x - y <= 9)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) < 0. (EX (y::'a::{linordered_field_inverse_zero, number_ring}) > 0. 7*x + y > 0 & x - y <= 9)" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (0 < x & x < 1) --> (ALL y > 1. x + y ~= 1)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}). (0 < x & x < 1) --> (ALL y > 1. x + y ~= 1)" by ferrack -lemma "EX x. (ALL (y::'a::{linordered_field,number_ring, division_by_zero}). y < 2 --> 2*(y - x) \ 0 )" +lemma "EX x. (ALL (y::'a::{linordered_field_inverse_zero, number_ring}). y < 2 --> 2*(y - x) \ 0 )" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). x < 10 | x > 20 | (EX y. y>= 0 & y <= 10 & x+y = 20)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). x < 10 | x > 20 | (EX y. y>= 0 & y <= 10 & x+y = 20)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + y < z --> y >= z --> x < 0" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y z. x + y < z --> y >= z --> x < 0" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + 7*y < 5* z & 5*y >= 7*z & x < 0" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z. x + 7*y < 5* z & 5*y >= 7*z & x < 0" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. abs (x + y) <= z --> (abs z = z)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y z. abs (x + y) <= z --> (abs z = z)" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + 7*y - 5* z < 0 & 5*y + 7*z + 3*x < 0" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z. x + 7*y - 5* z < 0 & 5*y + 7*z + 3*x < 0" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (abs (5*x+3*y+z) <= 5*x+3*y+z & abs (5*x+3*y+z) >= - (5*x+3*y+z)) | (abs (5*x+3*y+z) >= 5*x+3*y+z & abs (5*x+3*y+z) <= - (5*x+3*y+z))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y z. (abs (5*x+3*y+z) <= 5*x+3*y+z & abs (5*x+3*y+z) >= - (5*x+3*y+z)) | (abs (5*x+3*y+z) >= 5*x+3*y+z & abs (5*x+3*y+z) <= - (5*x+3*y+z))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. x < y --> (EX z>0. x+z = y)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. x < y --> (EX z>0. x+z = y)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z>0. abs (x - y) <= z )" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (EX z>0. abs (x - y) <= z )" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z>=0. abs (3*x+7*y) <= 2*z + 1)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y. (ALL z>=0. abs (3*x+7*y) <= 2*z + 1)" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero})>0. (ALL y. (EX z. 13* abs z \ abs (12*y - x) & 5*x - 3*(abs y) <= 7*z))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring})>0. (ALL y. (EX z. 13* abs z \ abs (12*y - x) & 5*x - 3*(abs y) <= 7*z))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). abs (4*x + 17) < 4 & (ALL y . abs (x*34 - 34*y - 9) \ 0 \ (EX z. 5*x - 3*abs y <= 7*z))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}). abs (4*x + 17) < 4 & (ALL y . abs (x*34 - 34*y - 9) \ 0 \ (EX z. 5*x - 3*abs y <= 7*z))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y > abs (23*x - 9). (ALL z > abs (3*y - 19* abs x). x+z > 2*y))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y > abs (23*x - 9). (ALL z > abs (3*y - 19* abs x). x+z > 2*y))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y< abs (3*x - 1). (ALL z >= (3*abs x - 1). abs (12*x - 13*y + 19*z) > abs (23*x) ))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y< abs (3*x - 1). (ALL z >= (3*abs x - 1). abs (12*x - 13*y + 19*z) > abs (23*x) ))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). abs x < 100 & (ALL y > x. (EX z<2*y - x. 5*x - 3*y <= 7*z))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}). abs x < 100 & (ALL y > x. (EX z<2*y - x. 5*x - 3*y <= 7*z))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 7*x<3*y --> 5*y < 7*z --> z < 2*w --> 7*(2*w-x) > 2*y" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y z w. 7*x<3*y --> 5*y < 7*z --> z < 2*w --> 7*(2*w-x) > 2*y" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + abs (y - 8*x + z) <= 89" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z w. 5*x + 3*z - 17*w + abs (y - 8*x + z) <= 89" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + 7* (y - 8*x + z) <= max y (7*z - x + w)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z w. 5*x + 3*z - 17*w + 7* (y - 8*x + z) <= max y (7*z - x + w)" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (EX w >= (x+y+z). w <= abs x + abs y + abs z)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y z. (EX w >= (x+y+z). w <= abs x + abs y + abs z)" by ferrack -lemma "~(ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y z w. 3* x + z*4 = 3*y & x + y < z & x> w & 3*x < w + y))" +lemma "~(ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y z w. 3* x + z*4 = 3*y & x + y < z & x> w & 3*x < w + y))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z w. abs (x-y) = (z-w) & z*1234 < 233*x & w ~= y)" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (EX z w. abs (x-y) = (z-w) & z*1234 < 233*x & w ~= y)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (ALL w >= abs (x+y+z). w >= abs x + abs y + abs z)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z. (ALL w >= abs (x+y+z). w >= abs x + abs y + abs z)" by ferrack -lemma "EX z. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX w >= (x+y+z). w <= abs x + abs y + abs z))" +lemma "EX z. (ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (EX w >= (x+y+z). w <= abs x + abs y + abs z))" by ferrack -lemma "EX z. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}) < abs z. (EX y w. x< y & x < z & x> w & 3*x < w + y))" +lemma "EX z. (ALL (x::'a::{linordered_field_inverse_zero, number_ring}) < abs z. (EX y w. x< y & x < z & x> w & 3*x < w + y))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z. (ALL w. abs (x-y) = abs (z-w) --> z < x & w ~= y))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}) y. (EX z. (ALL w. abs (x-y) = abs (z-w) --> z < x & w ~= y))" by ferrack -lemma "EX y. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)))" +lemma "EX y. (ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) z. (ALL w >= 13*x - 4*z. (EX y. w >= abs x + abs y + z))" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) z. (ALL w >= 13*x - 4*z. (EX y. w >= abs x + abs y + z))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (ALL y < x. (EX z > (x+y). +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}). (ALL y < x. (EX z > (x+y). (ALL w. 5*w + 10*x - z >= y --> w + 7*x + 3*z >= 2*y)))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (ALL y. (EX z > y. +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}). (ALL y. (EX z > y. (ALL w . w < 13 --> w + 10*x - z >= y --> 5*w + 7*x + 13*z >= 2*y)))" by ferrack -lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)" +lemma "EX (x::'a::{linordered_field_inverse_zero, number_ring}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (y - x) < w)))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (y - x) < w)))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (x + z) < w - y)))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (x + z) < w - y)))" by ferrack -lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. abs y ~= abs x & (ALL z> max x y. (EX w. w ~= y & w ~= z & 3*w - z >= x + y)))" +lemma "ALL (x::'a::{linordered_field_inverse_zero, number_ring}). (EX y. abs y ~= abs x & (ALL z> max x y. (EX w. w ~= y & w ~= z & 3*w - z >= x + y)))" by ferrack end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Divides.thy --- a/src/HOL/Divides.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Divides.thy Tue May 04 20:30:22 2010 +0200 @@ -379,6 +379,8 @@ class ring_div = semiring_div + comm_ring_1 begin +subclass ring_1_no_zero_divisors .. + text {* Negation respects modular equivalence. *} lemma mod_minus_eq: "(- a) mod b = (- (a mod b)) mod b" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Fields.thy --- a/src/HOL/Fields.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Fields.thy Tue May 04 20:30:22 2010 +0200 @@ -96,61 +96,53 @@ "\b \ 0; c \ 0\ \ (a * c) / (c * b) = a / b" using nonzero_mult_divide_mult_cancel_right [of b c a] by (simp add: mult_ac) -lemma add_divide_eq_iff: +lemma add_divide_eq_iff [field_simps]: "z \ 0 \ x + y / z = (z * x + y) / z" by (simp add: add_divide_distrib) -lemma divide_add_eq_iff: +lemma divide_add_eq_iff [field_simps]: "z \ 0 \ x / z + y = (x + z * y) / z" by (simp add: add_divide_distrib) -lemma diff_divide_eq_iff: +lemma diff_divide_eq_iff [field_simps]: "z \ 0 \ x - y / z = (z * x - y) / z" by (simp add: diff_divide_distrib) -lemma divide_diff_eq_iff: +lemma divide_diff_eq_iff [field_simps]: "z \ 0 \ x / z - y = (x - z * y) / z" by (simp add: diff_divide_distrib) -lemmas field_eq_simps[no_atp] = algebra_simps - (* pull / out*) - add_divide_eq_iff divide_add_eq_iff - diff_divide_eq_iff divide_diff_eq_iff - (* multiply eqn *) - nonzero_eq_divide_eq nonzero_divide_eq_eq - times_divide_eq_left times_divide_eq_right - -text{*An example:*} -lemma "\a\b; c\d; e\f\ \ ((a-b)*(c-d)*(e-f))/((c-d)*(e-f)*(a-b)) = 1" -apply(subgoal_tac "(c-d)*(e-f)*(a-b) \ 0") - apply(simp add:field_eq_simps) -apply(simp) -done - lemma diff_frac_eq: "y \ 0 \ z \ 0 \ x / y - w / z = (x * z - w * y) / (y * z)" - by (simp add: field_eq_simps times_divide_eq) + by (simp add: field_simps) lemma frac_eq_eq: "y \ 0 \ z \ 0 \ (x / y = w / z) = (x * z = w * y)" - by (simp add: field_eq_simps times_divide_eq) + by (simp add: field_simps) end +class field_inverse_zero = field + + assumes field_inverse_zero: "inverse 0 = 0" +begin + +subclass division_ring_inverse_zero proof +qed (fact field_inverse_zero) + text{*This version builds in division by zero while also re-orienting the right-hand side.*} lemma inverse_mult_distrib [simp]: - "inverse(a*b) = inverse(a) * inverse(b::'a::{field,division_by_zero})" - proof cases - assume "a \ 0 & b \ 0" - thus ?thesis by (simp add: nonzero_inverse_mult_distrib mult_ac) - next - assume "~ (a \ 0 & b \ 0)" - thus ?thesis by force - qed + "inverse (a * b) = inverse a * inverse b" +proof cases + assume "a \ 0 & b \ 0" + thus ?thesis by (simp add: nonzero_inverse_mult_distrib mult_ac) +next + assume "~ (a \ 0 & b \ 0)" + thus ?thesis by force +qed lemma inverse_divide [simp]: - "inverse (a/b) = b / (a::'a::{field,division_by_zero})" + "inverse (a / b) = b / a" by (simp add: divide_inverse mult_commute) @@ -161,86 +153,88 @@ because the latter are covered by a simproc. *} lemma mult_divide_mult_cancel_left: - "c\0 ==> (c*a) / (c*b) = a / (b::'a::{field,division_by_zero})" + "c \ 0 \ (c * a) / (c * b) = a / b" apply (cases "b = 0") apply simp_all done lemma mult_divide_mult_cancel_right: - "c\0 ==> (a*c) / (b*c) = a / (b::'a::{field,division_by_zero})" + "c \ 0 \ (a * c) / (b * c) = a / b" apply (cases "b = 0") apply simp_all done -lemma divide_divide_eq_right [simp,no_atp]: - "a / (b/c) = (a*c) / (b::'a::{field,division_by_zero})" -by (simp add: divide_inverse mult_ac) +lemma divide_divide_eq_right [simp, no_atp]: + "a / (b / c) = (a * c) / b" + by (simp add: divide_inverse mult_ac) -lemma divide_divide_eq_left [simp,no_atp]: - "(a / b) / (c::'a::{field,division_by_zero}) = a / (b*c)" -by (simp add: divide_inverse mult_assoc) +lemma divide_divide_eq_left [simp, no_atp]: + "(a / b) / c = a / (b * c)" + by (simp add: divide_inverse mult_assoc) text {*Special Cancellation Simprules for Division*} -lemma mult_divide_mult_cancel_left_if[simp,no_atp]: -fixes c :: "'a :: {field,division_by_zero}" -shows "(c*a) / (c*b) = (if c=0 then 0 else a/b)" -by (simp add: mult_divide_mult_cancel_left) +lemma mult_divide_mult_cancel_left_if [simp,no_atp]: + shows "(c * a) / (c * b) = (if c = 0 then 0 else a / b)" + by (simp add: mult_divide_mult_cancel_left) text {* Division and Unary Minus *} -lemma minus_divide_right: "- (a/b) = a / -(b::'a::{field,division_by_zero})" -by (simp add: divide_inverse) +lemma minus_divide_right: + "- (a / b) = a / - b" + by (simp add: divide_inverse) lemma divide_minus_right [simp, no_atp]: - "a / -(b::'a::{field,division_by_zero}) = -(a / b)" -by (simp add: divide_inverse) + "a / - b = - (a / b)" + by (simp add: divide_inverse) lemma minus_divide_divide: - "(-a)/(-b) = a / (b::'a::{field,division_by_zero})" + "(- a) / (- b) = a / b" apply (cases "b=0", simp) apply (simp add: nonzero_minus_divide_divide) done lemma eq_divide_eq: - "((a::'a::{field,division_by_zero}) = b/c) = (if c\0 then a*c = b else a=0)" -by (simp add: nonzero_eq_divide_eq) + "a = b / c \ (if c \ 0 then a * c = b else a = 0)" + by (simp add: nonzero_eq_divide_eq) lemma divide_eq_eq: - "(b/c = (a::'a::{field,division_by_zero})) = (if c\0 then b = a*c else a=0)" -by (force simp add: nonzero_divide_eq_eq) + "b / c = a \ (if c \ 0 then b = a * c else a = 0)" + by (force simp add: nonzero_divide_eq_eq) lemma inverse_eq_1_iff [simp]: - "(inverse x = 1) = (x = (1::'a::{field,division_by_zero}))" -by (insert inverse_eq_iff_eq [of x 1], simp) + "inverse x = 1 \ x = 1" + by (insert inverse_eq_iff_eq [of x 1], simp) -lemma divide_eq_0_iff [simp,no_atp]: - "(a/b = 0) = (a=0 | b=(0::'a::{field,division_by_zero}))" -by (simp add: divide_inverse) +lemma divide_eq_0_iff [simp, no_atp]: + "a / b = 0 \ a = 0 \ b = 0" + by (simp add: divide_inverse) -lemma divide_cancel_right [simp,no_atp]: - "(a/c = b/c) = (c = 0 | a = (b::'a::{field,division_by_zero}))" -apply (cases "c=0", simp) -apply (simp add: divide_inverse) -done +lemma divide_cancel_right [simp, no_atp]: + "a / c = b / c \ c = 0 \ a = b" + apply (cases "c=0", simp) + apply (simp add: divide_inverse) + done -lemma divide_cancel_left [simp,no_atp]: - "(c/a = c/b) = (c = 0 | a = (b::'a::{field,division_by_zero}))" -apply (cases "c=0", simp) -apply (simp add: divide_inverse) -done +lemma divide_cancel_left [simp, no_atp]: + "c / a = c / b \ c = 0 \ a = b" + apply (cases "c=0", simp) + apply (simp add: divide_inverse) + done -lemma divide_eq_1_iff [simp,no_atp]: - "(a/b = 1) = (b \ 0 & a = (b::'a::{field,division_by_zero}))" -apply (cases "b=0", simp) -apply (simp add: right_inverse_eq) -done +lemma divide_eq_1_iff [simp, no_atp]: + "a / b = 1 \ b \ 0 \ a = b" + apply (cases "b=0", simp) + apply (simp add: right_inverse_eq) + done -lemma one_eq_divide_iff [simp,no_atp]: - "(1 = a/b) = (b \ 0 & a = (b::'a::{field,division_by_zero}))" -by (simp add: eq_commute [of 1]) +lemma one_eq_divide_iff [simp, no_atp]: + "1 = a / b \ b \ 0 \ a = b" + by (simp add: eq_commute [of 1]) + +end text {* Ordered Fields *} @@ -391,7 +385,7 @@ "a < 0 \ b < 0 \ inverse a \ inverse b \ b \ a" by (blast intro: le_imp_inverse_le_neg dest: inverse_le_imp_le_neg) -lemma pos_le_divide_eq: "0 < c ==> (a \ b/c) = (a*c \ b)" +lemma pos_le_divide_eq [field_simps]: "0 < c ==> (a \ b/c) = (a*c \ b)" proof - assume less: "0 b/c) = (a*c \ (b/c)*c)" @@ -401,7 +395,7 @@ finally show ?thesis . qed -lemma neg_le_divide_eq: "c < 0 ==> (a \ b/c) = (b \ a*c)" +lemma neg_le_divide_eq [field_simps]: "c < 0 ==> (a \ b/c) = (b \ a*c)" proof - assume less: "c<0" hence "(a \ b/c) = ((b/c)*c \ a*c)" @@ -411,7 +405,7 @@ finally show ?thesis . qed -lemma pos_less_divide_eq: +lemma pos_less_divide_eq [field_simps]: "0 < c ==> (a < b/c) = (a*c < b)" proof - assume less: "0 (a < b/c) = (b < a*c)" proof - assume less: "c<0" @@ -433,7 +427,7 @@ finally show ?thesis . qed -lemma pos_divide_less_eq: +lemma pos_divide_less_eq [field_simps]: "0 < c ==> (b/c < a) = (b < a*c)" proof - assume less: "0 (b/c < a) = (a*c < b)" proof - assume less: "c<0" @@ -455,7 +449,7 @@ finally show ?thesis . qed -lemma pos_divide_le_eq: "0 < c ==> (b/c \ a) = (b \ a*c)" +lemma pos_divide_le_eq [field_simps]: "0 < c ==> (b/c \ a) = (b \ a*c)" proof - assume less: "0 a) = ((b/c)*c \ a*c)" @@ -465,7 +459,7 @@ finally show ?thesis . qed -lemma neg_divide_le_eq: "c < 0 ==> (b/c \ a) = (a*c \ b)" +lemma neg_divide_le_eq [field_simps]: "c < 0 ==> (b/c \ a) = (a*c \ b)" proof - assume less: "c<0" hence "(b/c \ a) = (a*c \ (b/c)*c)" @@ -475,24 +469,15 @@ finally show ?thesis . qed -text{* Lemmas @{text field_simps} multiply with denominators in in(equations) -if they can be proved to be non-zero (for equations) or positive/negative -(for inequations). Can be too aggressive and is therefore separate from the -more benign @{text algebra_simps}. *} - -lemmas field_simps[no_atp] = field_eq_simps - (* multiply ineqn *) - pos_divide_less_eq neg_divide_less_eq - pos_less_divide_eq neg_less_divide_eq - pos_divide_le_eq neg_divide_le_eq - pos_le_divide_eq neg_le_divide_eq - text{* Lemmas @{text sign_simps} is a first attempt to automate proofs of positivity/negativity needed for @{text field_simps}. Have not added @{text sign_simps} to @{text field_simps} because the former can lead to case explosions. *} -lemmas sign_simps[no_atp] = group_simps +lemmas sign_simps [no_atp] = algebra_simps + zero_less_mult_iff mult_less_0_iff + +lemmas (in -) sign_simps [no_atp] = algebra_simps zero_less_mult_iff mult_less_0_iff (* Only works once linear arithmetic is installed: @@ -658,37 +643,40 @@ end +class linordered_field_inverse_zero = linordered_field + field_inverse_zero +begin + lemma le_divide_eq: "(a \ b/c) = (if 0 < c then a*c \ b else if c < 0 then b \ a*c - else a \ (0::'a::{linordered_field,division_by_zero}))" + else a \ 0)" apply (cases "c=0", simp) apply (force simp add: pos_le_divide_eq neg_le_divide_eq linorder_neq_iff) done lemma inverse_positive_iff_positive [simp]: - "(0 < inverse a) = (0 < (a::'a::{linordered_field,division_by_zero}))" + "(0 < inverse a) = (0 < a)" apply (cases "a = 0", simp) apply (blast intro: inverse_positive_imp_positive positive_imp_inverse_positive) done lemma inverse_negative_iff_negative [simp]: - "(inverse a < 0) = (a < (0::'a::{linordered_field,division_by_zero}))" + "(inverse a < 0) = (a < 0)" apply (cases "a = 0", simp) apply (blast intro: inverse_negative_imp_negative negative_imp_inverse_negative) done lemma inverse_nonnegative_iff_nonnegative [simp]: - "(0 \ inverse a) = (0 \ (a::'a::{linordered_field,division_by_zero}))" -by (simp add: linorder_not_less [symmetric]) + "0 \ inverse a \ 0 \ a" + by (simp add: not_less [symmetric]) lemma inverse_nonpositive_iff_nonpositive [simp]: - "(inverse a \ 0) = (a \ (0::'a::{linordered_field,division_by_zero}))" -by (simp add: linorder_not_less [symmetric]) + "inverse a \ 0 \ a \ 0" + by (simp add: not_less [symmetric]) lemma one_less_inverse_iff: - "(1 < inverse x) = (0 < x & x < (1::'a::{linordered_field,division_by_zero}))" + "1 < inverse x \ 0 < x \ x < 1" proof cases assume "0 < x" with inverse_less_iff_less [OF zero_less_one, of x] @@ -698,7 +686,7 @@ have "~ (1 < inverse x)" proof assume "1 < inverse x" - also with notless have "... \ 0" by (simp add: linorder_not_less) + also with notless have "... \ 0" by simp also have "... < 1" by (rule zero_less_one) finally show False by auto qed @@ -706,62 +694,69 @@ qed lemma one_le_inverse_iff: - "(1 \ inverse x) = (0 < x & x \ (1::'a::{linordered_field,division_by_zero}))" -by (force simp add: order_le_less one_less_inverse_iff) + "1 \ inverse x \ 0 < x \ x \ 1" +proof (cases "x = 1") + case True then show ?thesis by simp +next + case False then have "inverse x \ 1" by simp + then have "1 \ inverse x" by blast + then have "1 \ inverse x \ 1 < inverse x" by (simp add: le_less) + with False show ?thesis by (auto simp add: one_less_inverse_iff) +qed lemma inverse_less_1_iff: - "(inverse x < 1) = (x \ 0 | 1 < (x::'a::{linordered_field,division_by_zero}))" -by (simp add: linorder_not_le [symmetric] one_le_inverse_iff) + "inverse x < 1 \ x \ 0 \ 1 < x" + by (simp add: not_le [symmetric] one_le_inverse_iff) lemma inverse_le_1_iff: - "(inverse x \ 1) = (x \ 0 | 1 \ (x::'a::{linordered_field,division_by_zero}))" -by (simp add: linorder_not_less [symmetric] one_less_inverse_iff) + "inverse x \ 1 \ x \ 0 \ 1 \ x" + by (simp add: not_less [symmetric] one_less_inverse_iff) lemma divide_le_eq: "(b/c \ a) = (if 0 < c then b \ a*c else if c < 0 then a*c \ b - else 0 \ (a::'a::{linordered_field,division_by_zero}))" + else 0 \ a)" apply (cases "c=0", simp) -apply (force simp add: pos_divide_le_eq neg_divide_le_eq linorder_neq_iff) +apply (force simp add: pos_divide_le_eq neg_divide_le_eq) done lemma less_divide_eq: "(a < b/c) = (if 0 < c then a*c < b else if c < 0 then b < a*c - else a < (0::'a::{linordered_field,division_by_zero}))" + else a < 0)" apply (cases "c=0", simp) -apply (force simp add: pos_less_divide_eq neg_less_divide_eq linorder_neq_iff) +apply (force simp add: pos_less_divide_eq neg_less_divide_eq) done lemma divide_less_eq: "(b/c < a) = (if 0 < c then b < a*c else if c < 0 then a*c < b - else 0 < (a::'a::{linordered_field,division_by_zero}))" + else 0 < a)" apply (cases "c=0", simp) -apply (force simp add: pos_divide_less_eq neg_divide_less_eq linorder_neq_iff) +apply (force simp add: pos_divide_less_eq neg_divide_less_eq) done text {*Division and Signs*} lemma zero_less_divide_iff: - "((0::'a::{linordered_field,division_by_zero}) < a/b) = (0 < a & 0 < b | a < 0 & b < 0)" + "(0 < a/b) = (0 < a & 0 < b | a < 0 & b < 0)" by (simp add: divide_inverse zero_less_mult_iff) lemma divide_less_0_iff: - "(a/b < (0::'a::{linordered_field,division_by_zero})) = + "(a/b < 0) = (0 < a & b < 0 | a < 0 & 0 < b)" by (simp add: divide_inverse mult_less_0_iff) lemma zero_le_divide_iff: - "((0::'a::{linordered_field,division_by_zero}) \ a/b) = + "(0 \ a/b) = (0 \ a & 0 \ b | a \ 0 & b \ 0)" by (simp add: divide_inverse zero_le_mult_iff) lemma divide_le_0_iff: - "(a/b \ (0::'a::{linordered_field,division_by_zero})) = + "(a/b \ 0) = (0 \ a & b \ 0 | a \ 0 & 0 \ b)" by (simp add: divide_inverse mult_le_0_iff) @@ -770,143 +765,133 @@ text{*Simplify expressions equated with 1*} lemma zero_eq_1_divide_iff [simp,no_atp]: - "((0::'a::{linordered_field,division_by_zero}) = 1/a) = (a = 0)" + "(0 = 1/a) = (a = 0)" apply (cases "a=0", simp) apply (auto simp add: nonzero_eq_divide_eq) done lemma one_divide_eq_0_iff [simp,no_atp]: - "(1/a = (0::'a::{linordered_field,division_by_zero})) = (a = 0)" + "(1/a = 0) = (a = 0)" apply (cases "a=0", simp) apply (insert zero_neq_one [THEN not_sym]) apply (auto simp add: nonzero_divide_eq_eq) done text{*Simplify expressions such as @{text "0 < 1/x"} to @{text "0 < x"}*} -lemmas zero_less_divide_1_iff = zero_less_divide_iff [of 1, simplified] -lemmas divide_less_0_1_iff = divide_less_0_iff [of 1, simplified] -lemmas zero_le_divide_1_iff = zero_le_divide_iff [of 1, simplified] -lemmas divide_le_0_1_iff = divide_le_0_iff [of 1, simplified] + +lemma zero_le_divide_1_iff [simp, no_atp]: + "0 \ 1 / a \ 0 \ a" + by (simp add: zero_le_divide_iff) -declare zero_less_divide_1_iff [simp,no_atp] -declare divide_less_0_1_iff [simp,no_atp] -declare zero_le_divide_1_iff [simp,no_atp] -declare divide_le_0_1_iff [simp,no_atp] +lemma zero_less_divide_1_iff [simp, no_atp]: + "0 < 1 / a \ 0 < a" + by (simp add: zero_less_divide_iff) + +lemma divide_le_0_1_iff [simp, no_atp]: + "1 / a \ 0 \ a \ 0" + by (simp add: divide_le_0_iff) + +lemma divide_less_0_1_iff [simp, no_atp]: + "1 / a < 0 \ a < 0" + by (simp add: divide_less_0_iff) lemma divide_right_mono: - "[|a \ b; 0 \ c|] ==> a/c \ b/(c::'a::{linordered_field,division_by_zero})" -by (force simp add: divide_strict_right_mono order_le_less) + "[|a \ b; 0 \ c|] ==> a/c \ b/c" +by (force simp add: divide_strict_right_mono le_less) -lemma divide_right_mono_neg: "(a::'a::{linordered_field,division_by_zero}) <= b +lemma divide_right_mono_neg: "a <= b ==> c <= 0 ==> b / c <= a / c" apply (drule divide_right_mono [of _ _ "- c"]) apply auto done -lemma divide_left_mono_neg: "(a::'a::{linordered_field,division_by_zero}) <= b +lemma divide_left_mono_neg: "a <= b ==> c <= 0 ==> 0 < a * b ==> c / a <= c / b" apply (drule divide_left_mono [of _ _ "- c"]) apply (auto simp add: mult_commute) done - - text{*Simplify quotients that are compared with the value 1.*} lemma le_divide_eq_1 [no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(1 \ b / a) = ((0 < a & a \ b) | (a < 0 & b \ a))" + "(1 \ b / a) = ((0 < a & a \ b) | (a < 0 & b \ a))" by (auto simp add: le_divide_eq) lemma divide_le_eq_1 [no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(b / a \ 1) = ((0 < a & b \ a) | (a < 0 & a \ b) | a=0)" + "(b / a \ 1) = ((0 < a & b \ a) | (a < 0 & a \ b) | a=0)" by (auto simp add: divide_le_eq) lemma less_divide_eq_1 [no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(1 < b / a) = ((0 < a & a < b) | (a < 0 & b < a))" + "(1 < b / a) = ((0 < a & a < b) | (a < 0 & b < a))" by (auto simp add: less_divide_eq) lemma divide_less_eq_1 [no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(b / a < 1) = ((0 < a & b < a) | (a < 0 & a < b) | a=0)" + "(b / a < 1) = ((0 < a & b < a) | (a < 0 & a < b) | a=0)" by (auto simp add: divide_less_eq) text {*Conditional Simplification Rules: No Case Splits*} lemma le_divide_eq_1_pos [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "0 < a \ (1 \ b/a) = (a \ b)" + "0 < a \ (1 \ b/a) = (a \ b)" by (auto simp add: le_divide_eq) lemma le_divide_eq_1_neg [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "a < 0 \ (1 \ b/a) = (b \ a)" + "a < 0 \ (1 \ b/a) = (b \ a)" by (auto simp add: le_divide_eq) lemma divide_le_eq_1_pos [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "0 < a \ (b/a \ 1) = (b \ a)" + "0 < a \ (b/a \ 1) = (b \ a)" by (auto simp add: divide_le_eq) lemma divide_le_eq_1_neg [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "a < 0 \ (b/a \ 1) = (a \ b)" + "a < 0 \ (b/a \ 1) = (a \ b)" by (auto simp add: divide_le_eq) lemma less_divide_eq_1_pos [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "0 < a \ (1 < b/a) = (a < b)" + "0 < a \ (1 < b/a) = (a < b)" by (auto simp add: less_divide_eq) lemma less_divide_eq_1_neg [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "a < 0 \ (1 < b/a) = (b < a)" + "a < 0 \ (1 < b/a) = (b < a)" by (auto simp add: less_divide_eq) lemma divide_less_eq_1_pos [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "0 < a \ (b/a < 1) = (b < a)" + "0 < a \ (b/a < 1) = (b < a)" by (auto simp add: divide_less_eq) lemma divide_less_eq_1_neg [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "a < 0 \ b/a < 1 <-> a < b" + "a < 0 \ b/a < 1 <-> a < b" by (auto simp add: divide_less_eq) lemma eq_divide_eq_1 [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(1 = b/a) = ((a \ 0 & a = b))" + "(1 = b/a) = ((a \ 0 & a = b))" by (auto simp add: eq_divide_eq) lemma divide_eq_eq_1 [simp,no_atp]: - fixes a :: "'a :: {linordered_field,division_by_zero}" - shows "(b/a = 1) = ((a \ 0 & a = b))" + "(b/a = 1) = ((a \ 0 & a = b))" by (auto simp add: divide_eq_eq) lemma abs_inverse [simp]: - "\inverse (a::'a::{linordered_field,division_by_zero})\ = + "\inverse a\ = inverse \a\" apply (cases "a=0", simp) apply (simp add: nonzero_abs_inverse) done lemma abs_divide [simp]: - "\a / (b::'a::{linordered_field,division_by_zero})\ = \a\ / \b\" + "\a / b\ = \a\ / \b\" apply (cases "b=0", simp) apply (simp add: nonzero_abs_divide) done -lemma abs_div_pos: "(0::'a::{linordered_field,division_by_zero}) < y ==> +lemma abs_div_pos: "0 < y ==> \x\ / y = \x / y\" apply (subst abs_divide) apply (simp add: order_less_imp_le) done lemma field_le_mult_one_interval: - fixes x :: "'a\{linordered_field,division_by_zero}" assumes *: "\z. \ 0 < z ; z < 1 \ \ z * x \ y" shows "x \ y" proof (cases "0 < x") @@ -922,6 +907,8 @@ finally show ?thesis . qed +end + code_modulename SML Fields Arith diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Finite_Set.thy --- a/src/HOL/Finite_Set.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Finite_Set.thy Tue May 04 20:30:22 2010 +0200 @@ -509,13 +509,8 @@ subsection {* Class @{text finite} *} -setup {* Sign.add_path "finite" *} -- {*FIXME: name tweaking*} class finite = assumes finite_UNIV: "finite (UNIV \ 'a set)" -setup {* Sign.parent_path *} -hide_const finite - -context finite begin lemma finite [simp]: "finite (A \ 'a set)" @@ -1734,12 +1729,10 @@ qed lemma insert [simp]: - assumes "finite A" and "x \ A" - shows "F (insert x A) = (if A = {} then x else x * F A)" -proof (cases "A = {}") - case True then show ?thesis by simp -next - case False then obtain b where "b \ A" by blast + assumes "finite A" and "x \ A" and "A \ {}" + shows "F (insert x A) = x * F A" +proof - + from `A \ {}` obtain b where "b \ A" by blast then obtain B where *: "A = insert b B" "b \ B" by (blast dest: mk_disjoint_insert) with `finite A` have "finite B" by simp interpret fold: folding "op *" "\a b. fold (op *) b a" proof @@ -1833,8 +1826,6 @@ (simp_all add: assoc in_idem `finite A`) qed -declare insert [simp del] - lemma eq_fold_idem': assumes "finite A" shows "F (insert a A) = fold (op *) a A" @@ -1844,13 +1835,13 @@ qed lemma insert_idem [simp]: - assumes "finite A" - shows "F (insert x A) = (if A = {} then x else x * F A)" + assumes "finite A" and "A \ {}" + shows "F (insert x A) = x * F A" proof (cases "x \ A") - case False with `finite A` show ?thesis by (rule insert) + case False from `finite A` `x \ A` `A \ {}` show ?thesis by (rule insert) next - case True then have "A \ {}" by auto - with `finite A` show ?thesis by (simp add: in_idem insert_absorb True) + case True + from `finite A` `A \ {}` show ?thesis by (simp add: in_idem insert_absorb True) qed lemma union_idem: diff -r aace7a969410 -r 8629ac3efb19 src/HOL/FunDef.thy --- a/src/HOL/FunDef.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/FunDef.thy Tue May 04 20:30:22 2010 +0200 @@ -314,8 +314,8 @@ ML_val -- "setup inactive" {* - Context.theory_map (Function_Common.set_termination_prover (ScnpReconstruct.decomp_scnp - [ScnpSolve.MAX, ScnpSolve.MIN, ScnpSolve.MS])) + Context.theory_map (Function_Common.set_termination_prover + (ScnpReconstruct.decomp_scnp_tac [ScnpSolve.MAX, ScnpSolve.MIN, ScnpSolve.MS])) *} end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/GCD.thy --- a/src/HOL/GCD.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/GCD.thy Tue May 04 20:30:22 2010 +0200 @@ -1034,11 +1034,11 @@ thus "fst (bezw m n) * int m + snd (bezw m n) * int n = int (gcd m n)" apply (simp add: bezw_non_0 gcd_non_0_nat) apply (erule subst) - apply (simp add: ring_simps) + apply (simp add: field_simps) apply (subst mod_div_equality [of m n, symmetric]) (* applying simp here undoes the last substitution! what is procedure cancel_div_mod? *) - apply (simp only: ring_simps zadd_int [symmetric] + apply (simp only: field_simps zadd_int [symmetric] zmult_int [symmetric]) done qed @@ -1389,7 +1389,7 @@ show "lcm (lcm n m) p = lcm n (lcm m p)" by (rule lcm_unique_nat [THEN iffD1]) (metis dvd.order_trans lcm_unique_nat) show "lcm m n = lcm n m" - by (simp add: lcm_nat_def gcd_commute_nat ring_simps) + by (simp add: lcm_nat_def gcd_commute_nat field_simps) qed interpretation lcm_int!: abel_semigroup "lcm :: int \ int \ int" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Groebner_Basis.thy --- a/src/HOL/Groebner_Basis.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Groebner_Basis.thy Tue May 04 20:30:22 2010 +0200 @@ -473,21 +473,21 @@ interpretation class_fieldgb: fieldgb "op +" "op *" "op ^" "0::'a::{field,number_ring}" "1" "op -" "uminus" "op /" "inverse" apply (unfold_locales) by (simp_all add: divide_inverse) -lemma divide_Numeral1: "(x::'a::{field,number_ring}) / Numeral1 = x" by simp -lemma divide_Numeral0: "(x::'a::{field,number_ring, division_by_zero}) / Numeral0 = 0" +lemma divide_Numeral1: "(x::'a::{field, number_ring}) / Numeral1 = x" by simp +lemma divide_Numeral0: "(x::'a::{field_inverse_zero, number_ring}) / Numeral0 = 0" by simp -lemma mult_frac_frac: "((x::'a::{field,division_by_zero}) / y) * (z / w) = (x*z) / (y*w)" +lemma mult_frac_frac: "((x::'a::field_inverse_zero) / y) * (z / w) = (x*z) / (y*w)" by simp -lemma mult_frac_num: "((x::'a::{field, division_by_zero}) / y) * z = (x*z) / y" +lemma mult_frac_num: "((x::'a::field_inverse_zero) / y) * z = (x*z) / y" by simp -lemma mult_num_frac: "((x::'a::{field, division_by_zero}) / y) * z = (x*z) / y" +lemma mult_num_frac: "((x::'a::field_inverse_zero) / y) * z = (x*z) / y" by simp lemma Numeral1_eq1_nat: "(1::nat) = Numeral1" by simp -lemma add_frac_num: "y\ 0 \ (x::'a::{field, division_by_zero}) / y + z = (x + z*y) / y" +lemma add_frac_num: "y\ 0 \ (x::'a::field_inverse_zero) / y + z = (x + z*y) / y" by (simp add: add_divide_distrib) -lemma add_num_frac: "y\ 0 \ z + (x::'a::{field, division_by_zero}) / y = (x + z*y) / y" +lemma add_num_frac: "y\ 0 \ z + (x::'a::field_inverse_zero) / y = (x + z*y) / y" by (simp add: add_divide_distrib) ML {* diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Groups.thy --- a/src/HOL/Groups.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Groups.thy Tue May 04 20:30:22 2010 +0200 @@ -12,13 +12,13 @@ subsection {* Fact collections *} ML {* -structure Algebra_Simps = Named_Thms( - val name = "algebra_simps" - val description = "algebra simplification rules" +structure Ac_Simps = Named_Thms( + val name = "ac_simps" + val description = "associativity and commutativity simplification rules" ) *} -setup Algebra_Simps.setup +setup Ac_Simps.setup text{* The rewrites accumulated in @{text algebra_simps} deal with the classical algebraic structures of groups, rings and family. They simplify @@ -29,15 +29,28 @@ Of course it also works for fields, but it knows nothing about multiplicative inverses or division. This is catered for by @{text field_simps}. *} - ML {* -structure Ac_Simps = Named_Thms( - val name = "ac_simps" - val description = "associativity and commutativity simplification rules" +structure Algebra_Simps = Named_Thms( + val name = "algebra_simps" + val description = "algebra simplification rules" ) *} -setup Ac_Simps.setup +setup Algebra_Simps.setup + +text{* Lemmas @{text field_simps} multiply with denominators in (in)equations +if they can be proved to be non-zero (for equations) or positive/negative +(for inequations). Can be too aggressive and is therefore separate from the +more benign @{text algebra_simps}. *} + +ML {* +structure Field_Simps = Named_Thms( + val name = "field_simps" + val description = "algebra simplification rules for fields" +) +*} + +setup Field_Simps.setup subsection {* Abstract structures *} @@ -139,13 +152,13 @@ subsection {* Semigroups and Monoids *} class semigroup_add = plus + - assumes add_assoc [algebra_simps]: "(a + b) + c = a + (b + c)" + assumes add_assoc [algebra_simps, field_simps]: "(a + b) + c = a + (b + c)" sublocale semigroup_add < add!: semigroup plus proof qed (fact add_assoc) class ab_semigroup_add = semigroup_add + - assumes add_commute [algebra_simps]: "a + b = b + a" + assumes add_commute [algebra_simps, field_simps]: "a + b = b + a" sublocale ab_semigroup_add < add!: abel_semigroup plus proof qed (fact add_commute) @@ -153,7 +166,7 @@ context ab_semigroup_add begin -lemmas add_left_commute [algebra_simps] = add.left_commute +lemmas add_left_commute [algebra_simps, field_simps] = add.left_commute theorems add_ac = add_assoc add_commute add_left_commute @@ -162,13 +175,13 @@ theorems add_ac = add_assoc add_commute add_left_commute class semigroup_mult = times + - assumes mult_assoc [algebra_simps]: "(a * b) * c = a * (b * c)" + assumes mult_assoc [algebra_simps, field_simps]: "(a * b) * c = a * (b * c)" sublocale semigroup_mult < mult!: semigroup times proof qed (fact mult_assoc) class ab_semigroup_mult = semigroup_mult + - assumes mult_commute [algebra_simps]: "a * b = b * a" + assumes mult_commute [algebra_simps, field_simps]: "a * b = b * a" sublocale ab_semigroup_mult < mult!: abel_semigroup times proof qed (fact mult_commute) @@ -176,7 +189,7 @@ context ab_semigroup_mult begin -lemmas mult_left_commute [algebra_simps] = mult.left_commute +lemmas mult_left_commute [algebra_simps, field_simps] = mult.left_commute theorems mult_ac = mult_assoc mult_commute mult_left_commute @@ -371,7 +384,7 @@ lemma add_diff_cancel: "a + b - b = a" by (simp add: diff_minus add_assoc) -declare diff_minus[symmetric, algebra_simps] +declare diff_minus[symmetric, algebra_simps, field_simps] lemma eq_neg_iff_add_eq_0: "a = - b \ a + b = 0" proof @@ -402,7 +415,7 @@ then show "b = c" by simp qed -lemma uminus_add_conv_diff[algebra_simps]: +lemma uminus_add_conv_diff[algebra_simps, field_simps]: "- a + b = b - a" by (simp add:diff_minus add_commute) @@ -414,22 +427,22 @@ "- (a - b) = b - a" by (simp add: diff_minus add_commute) -lemma add_diff_eq[algebra_simps]: "a + (b - c) = (a + b) - c" +lemma add_diff_eq[algebra_simps, field_simps]: "a + (b - c) = (a + b) - c" by (simp add: diff_minus add_ac) -lemma diff_add_eq[algebra_simps]: "(a - b) + c = (a + c) - b" +lemma diff_add_eq[algebra_simps, field_simps]: "(a - b) + c = (a + c) - b" by (simp add: diff_minus add_ac) -lemma diff_eq_eq[algebra_simps]: "a - b = c \ a = c + b" +lemma diff_eq_eq[algebra_simps, field_simps]: "a - b = c \ a = c + b" by (auto simp add: diff_minus add_assoc) -lemma eq_diff_eq[algebra_simps]: "a = c - b \ a + b = c" +lemma eq_diff_eq[algebra_simps, field_simps]: "a = c - b \ a + b = c" by (auto simp add: diff_minus add_assoc) -lemma diff_diff_eq[algebra_simps]: "(a - b) - c = a - (b + c)" +lemma diff_diff_eq[algebra_simps, field_simps]: "(a - b) - c = a - (b + c)" by (simp add: diff_minus add_ac) -lemma diff_diff_eq2[algebra_simps]: "a - (b - c) = (a + c) - b" +lemma diff_diff_eq2[algebra_simps, field_simps]: "a - (b - c) = (a + c) - b" by (simp add: diff_minus add_ac) lemma eq_iff_diff_eq_0: "a = b \ a - b = 0" @@ -750,35 +763,29 @@ finally show ?thesis . qed -lemma diff_less_eq[algebra_simps]: "a - b < c \ a < c + b" +lemma diff_less_eq[algebra_simps, field_simps]: "a - b < c \ a < c + b" apply (subst less_iff_diff_less_0 [of a]) apply (rule less_iff_diff_less_0 [of _ c, THEN ssubst]) apply (simp add: diff_minus add_ac) done -lemma less_diff_eq[algebra_simps]: "a < c - b \ a + b < c" +lemma less_diff_eq[algebra_simps, field_simps]: "a < c - b \ a + b < c" apply (subst less_iff_diff_less_0 [of "a + b"]) apply (subst less_iff_diff_less_0 [of a]) apply (simp add: diff_minus add_ac) done -lemma diff_le_eq[algebra_simps]: "a - b \ c \ a \ c + b" +lemma diff_le_eq[algebra_simps, field_simps]: "a - b \ c \ a \ c + b" by (auto simp add: le_less diff_less_eq diff_add_cancel add_diff_cancel) -lemma le_diff_eq[algebra_simps]: "a \ c - b \ a + b \ c" +lemma le_diff_eq[algebra_simps, field_simps]: "a \ c - b \ a + b \ c" by (auto simp add: le_less less_diff_eq diff_add_cancel add_diff_cancel) lemma le_iff_diff_le_0: "a \ b \ a - b \ 0" by (simp add: algebra_simps) -text{*Legacy - use @{text algebra_simps} *} -lemmas group_simps[no_atp] = algebra_simps - end -text{*Legacy - use @{text algebra_simps} *} -lemmas group_simps[no_atp] = algebra_simps - class linordered_ab_semigroup_add = linorder + ordered_ab_semigroup_add diff -r aace7a969410 -r 8629ac3efb19 src/HOL/HOL.thy --- a/src/HOL/HOL.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/HOL.thy Tue May 04 20:30:22 2010 +0200 @@ -40,7 +40,7 @@ subsubsection {* Core syntax *} classes type -defaultsort type +default_sort type setup {* Object_Logic.add_base_sort @{sort type} *} arities @@ -73,7 +73,7 @@ local consts - If :: "[bool, 'a, 'a] => 'a" ("(if (_)/ then (_)/ else (_))" 10) + If :: "[bool, 'a, 'a] => 'a" ("(if (_)/ then (_)/ else (_))" [0, 0, 10] 10) subsubsection {* Additional concrete syntax *} @@ -118,7 +118,7 @@ "_bind" :: "[pttrn, 'a] => letbind" ("(2_ =/ _)" 10) "" :: "letbind => letbinds" ("_") "_binds" :: "[letbind, letbinds] => letbinds" ("_;/ _") - "_Let" :: "[letbinds, 'a] => 'a" ("(let (_)/ in (_))" 10) + "_Let" :: "[letbinds, 'a] => 'a" ("(let (_)/ in (_))" [0, 10] 10) "_case_syntax":: "['a, cases_syn] => 'b" ("(case _ of/ _)" 10) "_case1" :: "['a, 'b] => case_syn" ("(2_ =>/ _)" 10) @@ -1491,9 +1491,9 @@ setup {* Induct.setup #> Context.theory_map (Induct.map_simpset (fn ss => ss - setmksimps (Simpdata.mksimps Simpdata.mksimps_pairs #> + setmksimps (fn ss => Simpdata.mksimps Simpdata.mksimps_pairs ss #> map (Simplifier.rewrite_rule (map Thm.symmetric - @{thms induct_rulify_fallback induct_true_def induct_false_def}))) + @{thms induct_rulify_fallback}))) addsimprocs [Simplifier.simproc @{theory} "swap_induct_false" ["induct_false ==> PROP P ==> PROP Q"] @@ -1869,7 +1869,7 @@ proof assume "PROP ?ofclass" show "PROP ?eq" - by (tactic {* ALLGOALS (rtac (Drule.unconstrainTs @{thm equals_eq})) *}) + by (tactic {* ALLGOALS (rtac (Thm.unconstrain_allTs @{thm equals_eq})) *}) (fact `PROP ?ofclass`) next assume "PROP ?eq" @@ -1886,7 +1886,6 @@ *} hide_const (open) eq -hide_const eq text {* Cases *} @@ -1962,6 +1961,10 @@ subsubsection {* Evaluation and normalization by evaluation *} +text {* Avoid some named infixes in evaluation environment *} + +code_reserved Eval oo ooo oooo upto downto orf andf mem mem_int mem_string + setup {* Value.add_evaluator ("SML", Codegen.eval_term o ProofContext.theory_of) *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Hoare/Hoare_Logic.thy --- a/src/HOL/Hoare/Hoare_Logic.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Hoare/Hoare_Logic.thy Tue May 04 20:30:22 2010 +0200 @@ -27,18 +27,19 @@ types 'a sem = "'a => 'a => bool" -consts iter :: "nat => 'a bexp => 'a sem => 'a sem" -primrec -"iter 0 b S = (%s s'. s ~: b & (s=s'))" -"iter (Suc n) b S = (%s s'. s : b & (? s''. S s s'' & iter n b S s'' s'))" +inductive Sem :: "'a com \ 'a sem" +where + "Sem (Basic f) s (f s)" +| "Sem c1 s s'' \ Sem c2 s'' s' \ Sem (c1;c2) s s'" +| "s \ b \ Sem c1 s s' \ Sem (IF b THEN c1 ELSE c2 FI) s s'" +| "s \ b \ Sem c2 s s' \ Sem (IF b THEN c1 ELSE c2 FI) s s'" +| "s \ b \ Sem (While b x c) s s" +| "s \ b \ Sem c s s'' \ Sem (While b x c) s'' s' \ + Sem (While b x c) s s'" -consts Sem :: "'a com => 'a sem" -primrec -"Sem(Basic f) s s' = (s' = f s)" -"Sem(c1;c2) s s' = (? s''. Sem c1 s s'' & Sem c2 s'' s')" -"Sem(IF b THEN c1 ELSE c2 FI) s s' = ((s : b --> Sem c1 s s') & - (s ~: b --> Sem c2 s s'))" -"Sem(While b x c) s s' = (? n. iter n b (Sem c) s s')" +inductive_cases [elim!]: + "Sem (Basic f) s s'" "Sem (c1;c2) s s'" + "Sem (IF b THEN c1 ELSE c2 FI) s s'" definition Valid :: "'a bexp \ 'a com \ 'a bexp \ bool" where "Valid p c q == !s s'. Sem c s s' --> s : p --> s' : q" @@ -209,19 +210,18 @@ \ Valid w c1 q \ Valid w' c2 q \ Valid p (Cond b c1 c2) q" by (auto simp:Valid_def) -lemma iter_aux: "! s s'. Sem c s s' --> s : I & s : b --> s' : I ==> - (\s s'. s : I \ iter n b (Sem c) s s' \ s' : I & s' ~: b)"; -apply(induct n) - apply clarsimp -apply(simp (no_asm_use)) -apply blast -done +lemma While_aux: + assumes "Sem (WHILE b INV {i} DO c OD) s s'" + shows "\s s'. Sem c s s' \ s \ I \ s \ b \ s' \ I \ + s \ I \ s' \ I \ s' \ b" + using assms + by (induct "WHILE b INV {i} DO c OD" s s') auto lemma WhileRule: "p \ i \ Valid (i \ b) c i \ i \ (-b) \ q \ Valid p (While b i c) q" apply (clarsimp simp:Valid_def) -apply(drule iter_aux) - prefer 2 apply assumption +apply(drule While_aux) + apply assumption apply blast apply blast done diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Hoare/Hoare_Logic_Abort.thy --- a/src/HOL/Hoare/Hoare_Logic_Abort.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Hoare/Hoare_Logic_Abort.thy Tue May 04 20:30:22 2010 +0200 @@ -25,22 +25,23 @@ types 'a sem = "'a option => 'a option => bool" -consts iter :: "nat => 'a bexp => 'a sem => 'a sem" -primrec -"iter 0 b S = (\s s'. s \ Some ` b \ s=s')" -"iter (Suc n) b S = - (\s s'. s \ Some ` b \ (\s''. S s s'' \ iter n b S s'' s'))" +inductive Sem :: "'a com \ 'a sem" +where + "Sem (Basic f) None None" +| "Sem (Basic f) (Some s) (Some (f s))" +| "Sem Abort s None" +| "Sem c1 s s'' \ Sem c2 s'' s' \ Sem (c1;c2) s s'" +| "Sem (IF b THEN c1 ELSE c2 FI) None None" +| "s \ b \ Sem c1 (Some s) s' \ Sem (IF b THEN c1 ELSE c2 FI) (Some s) s'" +| "s \ b \ Sem c2 (Some s) s' \ Sem (IF b THEN c1 ELSE c2 FI) (Some s) s'" +| "Sem (While b x c) None None" +| "s \ b \ Sem (While b x c) (Some s) (Some s)" +| "s \ b \ Sem c (Some s) s'' \ Sem (While b x c) s'' s' \ + Sem (While b x c) (Some s) s'" -consts Sem :: "'a com => 'a sem" -primrec -"Sem(Basic f) s s' = (case s of None \ s' = None | Some t \ s' = Some(f t))" -"Sem Abort s s' = (s' = None)" -"Sem(c1;c2) s s' = (\s''. Sem c1 s s'' \ Sem c2 s'' s')" -"Sem(IF b THEN c1 ELSE c2 FI) s s' = - (case s of None \ s' = None - | Some t \ ((t \ b \ Sem c1 s s') \ (t \ b \ Sem c2 s s')))" -"Sem(While b x c) s s' = - (if s = None then s' = None else \n. iter n b (Sem c) s s')" +inductive_cases [elim!]: + "Sem (Basic f) s s'" "Sem (c1;c2) s s'" + "Sem (IF b THEN c1 ELSE c2 FI) s s'" definition Valid :: "'a bexp \ 'a com \ 'a bexp \ bool" where "Valid p c q == \s s'. Sem c s s' \ s : Some ` p \ s' : Some ` q" @@ -212,23 +213,20 @@ \ Valid w c1 q \ Valid w' c2 q \ Valid p (Cond b c1 c2) q" by (fastsimp simp:Valid_def image_def) -lemma iter_aux: - "! s s'. Sem c s s' \ s \ Some ` (I \ b) \ s' \ Some ` I \ - (\s s'. s \ Some ` I \ iter n b (Sem c) s s' \ s' \ Some ` (I \ -b))"; -apply(unfold image_def) -apply(induct n) - apply clarsimp -apply(simp (no_asm_use)) -apply blast -done +lemma While_aux: + assumes "Sem (WHILE b INV {i} DO c OD) s s'" + shows "\s s'. Sem c s s' \ s \ Some ` (I \ b) \ s' \ Some ` I \ + s \ Some ` I \ s' \ Some ` (I \ -b)" + using assms + by (induct "WHILE b INV {i} DO c OD" s s') auto lemma WhileRule: "p \ i \ Valid (i \ b) c i \ i \ (-b) \ q \ Valid p (While b i c) q" apply(simp add:Valid_def) apply(simp (no_asm) add:image_def) apply clarify -apply(drule iter_aux) - prefer 2 apply assumption +apply(drule While_aux) + apply assumption apply blast apply blast done diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Imperative_HOL/Heap.thy --- a/src/HOL/Imperative_HOL/Heap.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Imperative_HOL/Heap.thy Tue May 04 20:30:22 2010 +0200 @@ -216,6 +216,9 @@ and unequal_arrs [simp]: "a \ a' \ a =!!= a'" unfolding noteq_refs_def noteq_arrs_def by auto +lemma noteq_refs_irrefl: "r =!= r \ False" + unfolding noteq_refs_def by auto + lemma present_new_ref: "ref_present r h \ r =!= fst (ref v h)" by (simp add: ref_present_def new_ref_def ref_def Let_def noteq_refs_def) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Import/HOL/real.imp --- a/src/HOL/Import/HOL/real.imp Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Import/HOL/real.imp Tue May 04 20:30:22 2010 +0200 @@ -251,7 +251,7 @@ "REAL_INV_INV" > "Rings.inverse_inverse_eq" "REAL_INV_EQ_0" > "Rings.inverse_nonzero_iff_nonzero" "REAL_INV_1OVER" > "Rings.inverse_eq_divide" - "REAL_INV_0" > "Rings.division_by_zero_class.inverse_zero" + "REAL_INV_0" > "Rings.division_ring_inverse_zero_class.inverse_zero" "REAL_INVINV" > "Rings.nonzero_inverse_inverse_eq" "REAL_INV1" > "Rings.inverse_1" "REAL_INJ" > "RealDef.real_of_nat_inject" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Import/HOL/realax.imp --- a/src/HOL/Import/HOL/realax.imp Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Import/HOL/realax.imp Tue May 04 20:30:22 2010 +0200 @@ -101,7 +101,7 @@ "REAL_LT_MUL" > "Rings.mult_pos_pos" "REAL_LT_IADD" > "Groups.add_strict_left_mono" "REAL_LDISTRIB" > "Rings.ring_eq_simps_2" - "REAL_INV_0" > "Rings.division_by_zero_class.inverse_zero" + "REAL_INV_0" > "Rings.division_ring_inverse_zero_class.inverse_zero" "REAL_ADD_SYM" > "Finite_Set.AC_add.f.AC_2" "REAL_ADD_LINV" > "HOL4Compat.REAL_ADD_LINV" "REAL_ADD_LID" > "Finite_Set.AC_add.f_e.left_ident" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Import/proof_kernel.ML --- a/src/HOL/Import/proof_kernel.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Import/proof_kernel.ML Tue May 04 20:30:22 2010 +0200 @@ -213,7 +213,7 @@ fun smart_string_of_cterm ct = let val thy = Thm.theory_of_cterm ct - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val {t,T,...} = rep_cterm ct (* Hack to avoid parse errors with Trueprop *) val ct = (cterm_of thy (HOLogic.dest_Trueprop t) @@ -1249,7 +1249,7 @@ let val hol4rews1 = map (Thm.transfer thy) (HOL4Rewrites.get thy) val hol4ss = Simplifier.global_context thy empty_ss - setmksimps single addsimps hol4rews1 + setmksimps (K single) addsimps hol4rews1 in Thm.transfer thy (Simplifier.full_rewrite hol4ss (cterm_of thy t)) end @@ -2065,7 +2065,7 @@ let val (HOLThm args) = norm_hthm (theory_of_thm th) hth in - apsnd strip_shyps args + apsnd Thm.strip_shyps args end fun to_isa_term tm = tm diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Import/shuffler.ML --- a/src/HOL/Import/shuffler.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Import/shuffler.ML Tue May 04 20:30:22 2010 +0200 @@ -489,7 +489,7 @@ let val norms = ShuffleData.get thy val ss = Simplifier.global_context thy empty_ss - setmksimps single + setmksimps (K single) addsimps (map (Thm.transfer thy) norms) addsimprocs [quant_simproc thy, eta_expand_simproc thy,eta_contract_simproc thy] fun chain f th = @@ -502,7 +502,7 @@ t |> disamb_bound thy |> chain (Simplifier.full_rewrite ss) |> chain eta_conversion - |> strip_shyps + |> Thm.strip_shyps val _ = message ("norm_term: " ^ (string_of_thm th)) in th diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Int.thy --- a/src/HOL/Int.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Int.thy Tue May 04 20:30:22 2010 +0200 @@ -324,27 +324,6 @@ end -context linordered_idom -begin - -lemma of_int_le_iff [simp]: - "of_int w \ of_int z \ w \ z" -by (cases w, cases z, simp add: of_int le minus algebra_simps of_nat_add [symmetric] del: of_nat_add) - -text{*Special cases where either operand is zero*} -lemmas of_int_0_le_iff [simp] = of_int_le_iff [of 0, simplified] -lemmas of_int_le_0_iff [simp] = of_int_le_iff [of _ 0, simplified] - -lemma of_int_less_iff [simp]: - "of_int w < of_int z \ w < z" - by (simp add: not_le [symmetric] linorder_not_le [symmetric]) - -text{*Special cases where either operand is zero*} -lemmas of_int_0_less_iff [simp] = of_int_less_iff [of 0, simplified] -lemmas of_int_less_0_iff [simp] = of_int_less_iff [of _ 0, simplified] - -end - text{*Class for unital rings with characteristic zero. Includes non-ordered rings like the complex numbers.*} class ring_char_0 = ring_1 + semiring_char_0 @@ -358,13 +337,47 @@ done text{*Special cases where either operand is zero*} -lemmas of_int_0_eq_iff [simp] = of_int_eq_iff [of 0, simplified] -lemmas of_int_eq_0_iff [simp] = of_int_eq_iff [of _ 0, simplified] +lemma of_int_eq_0_iff [simp]: + "of_int z = 0 \ z = 0" + using of_int_eq_iff [of z 0] by simp + +lemma of_int_0_eq_iff [simp]: + "0 = of_int z \ z = 0" + using of_int_eq_iff [of 0 z] by simp end +context linordered_idom +begin + text{*Every @{text linordered_idom} has characteristic zero.*} -subclass (in linordered_idom) ring_char_0 by intro_locales +subclass ring_char_0 .. + +lemma of_int_le_iff [simp]: + "of_int w \ of_int z \ w \ z" + by (cases w, cases z, simp add: of_int le minus algebra_simps of_nat_add [symmetric] del: of_nat_add) + +lemma of_int_less_iff [simp]: + "of_int w < of_int z \ w < z" + by (simp add: less_le order_less_le) + +lemma of_int_0_le_iff [simp]: + "0 \ of_int z \ 0 \ z" + using of_int_le_iff [of 0 z] by simp + +lemma of_int_le_0_iff [simp]: + "of_int z \ 0 \ z \ 0" + using of_int_le_iff [of z 0] by simp + +lemma of_int_0_less_iff [simp]: + "0 < of_int z \ 0 < z" + using of_int_less_iff [of 0 z] by simp + +lemma of_int_less_0_iff [simp]: + "of_int z < 0 \ z < 0" + using of_int_less_iff [of z 0] by simp + +end lemma of_int_eq_id [simp]: "of_int = id" proof @@ -1995,15 +2008,15 @@ text{*Division By @{text "-1"}*} lemma divide_minus1 [simp]: - "x/-1 = -(x::'a::{field,division_by_zero,number_ring})" + "x/-1 = -(x::'a::{field_inverse_zero, number_ring})" by simp lemma minus1_divide [simp]: - "-1 / (x::'a::{field,division_by_zero,number_ring}) = - (1/x)" + "-1 / (x::'a::{field_inverse_zero, number_ring}) = - (1/x)" by (simp add: divide_inverse) lemma half_gt_zero_iff: - "(0 < r/2) = (0 < (r::'a::{linordered_field,division_by_zero,number_ring}))" + "(0 < r/2) = (0 < (r::'a::{linordered_field_inverse_zero,number_ring}))" by auto lemmas half_gt_zero [simp] = half_gt_zero_iff [THEN iffD2, standard] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/IsaMakefile --- a/src/HOL/IsaMakefile Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/IsaMakefile Tue May 04 20:30:22 2010 +0200 @@ -282,8 +282,7 @@ $(SRC)/Provers/Arith/extract_common_term.ML \ $(SRC)/Tools/Metis/metis.ML \ Tools/ATP_Manager/atp_manager.ML \ - Tools/ATP_Manager/atp_minimal.ML \ - Tools/ATP_Manager/atp_wrapper.ML \ + Tools/ATP_Manager/atp_systems.ML \ Tools/Groebner_Basis/groebner.ML \ Tools/Groebner_Basis/misc.ML \ Tools/Groebner_Basis/normalizer.ML \ @@ -319,6 +318,7 @@ Tools/Sledgehammer/meson_tactic.ML \ Tools/Sledgehammer/metis_tactics.ML \ Tools/Sledgehammer/sledgehammer_fact_filter.ML \ + Tools/Sledgehammer/sledgehammer_fact_minimizer.ML \ Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML \ Tools/Sledgehammer/sledgehammer_fol_clause.ML \ Tools/Sledgehammer/sledgehammer_hol_clause.ML \ @@ -423,7 +423,7 @@ Library/Nat_Bijection.thy $(SRC)/Tools/float.ML \ $(SRC)/HOL/Tools/float_arith.ML Library/positivstellensatz.ML \ Library/reify_data.ML Library/reflection.ML Library/LaTeXsugar.thy \ - Library/OptionalSugar.thy \ + Library/OptionalSugar.thy Library/Convex.thy \ Library/Predicate_Compile_Quickcheck.thy Library/SML_Quickcheck.thy @cd Library; $(ISABELLE_TOOL) usedir $(OUT)/HOL Library @@ -1080,18 +1080,25 @@ $(OUT)/HOL-Multivariate_Analysis: $(OUT)/HOL-SMT \ Multivariate_Analysis/ROOT.ML \ Multivariate_Analysis/document/root.tex \ + Multivariate_Analysis/Brouwer_Fixpoint.thy \ + Multivariate_Analysis/Convex_Euclidean_Space.thy \ + Multivariate_Analysis/Derivative.thy \ + Multivariate_Analysis/Determinants.thy \ + Multivariate_Analysis/Euclidean_Space.thy \ + Multivariate_Analysis/Fashoda.thy \ + Multivariate_Analysis/Finite_Cartesian_Product.thy \ + Multivariate_Analysis/Integration.thy \ + Multivariate_Analysis/Integration.cert \ Multivariate_Analysis/L2_Norm.thy \ Multivariate_Analysis/Multivariate_Analysis.thy \ - Multivariate_Analysis/Determinants.thy \ - Multivariate_Analysis/Finite_Cartesian_Product.thy \ - Multivariate_Analysis/Euclidean_Space.thy \ + Multivariate_Analysis/Operator_Norm.thy \ + Multivariate_Analysis/Path_Connected.thy \ + Multivariate_Analysis/Real_Integration.thy \ Multivariate_Analysis/Topology_Euclidean_Space.thy \ - Multivariate_Analysis/Convex_Euclidean_Space.thy \ - Multivariate_Analysis/Brouwer_Fixpoint.thy \ - Multivariate_Analysis/Derivative.thy \ - Multivariate_Analysis/Integration.thy \ - Multivariate_Analysis/Integration.cert \ - Multivariate_Analysis/Real_Integration.thy + Multivariate_Analysis/Vec1.thy Library/Glbs.thy \ + Library/Inner_Product.thy Library/Numeral_Type.thy \ + Library/Convex.thy Library/FrechetDeriv.thy \ + Library/Product_Vector.thy Library/Product_plus.thy @cd Multivariate_Analysis; $(ISABELLE_TOOL) usedir -b -g true $(OUT)/HOL-SMT HOL-Multivariate_Analysis @@ -1105,7 +1112,10 @@ Probability/Borel.thy Probability/Measure.thy \ Probability/Lebesgue.thy Probability/Product_Measure.thy \ Probability/Probability_Space.thy Probability/Information.thy \ - Probability/ex/Dining_Cryptographers.thy + Probability/ex/Dining_Cryptographers.thy Library/FuncSet.thy \ + Library/Convex.thy Library/Product_Vector.thy \ + Library/Product_plus.thy Library/Inner_Product.thy \ + Library/Nat_Bijection.thy @cd Probability; $(ISABELLE_TOOL) usedir -b -g true $(OUT)/HOL HOL-Probability @@ -1295,8 +1305,8 @@ HOL-Quotient_Examples: HOL $(LOG)/HOL-Quotient_Examples.gz $(LOG)/HOL-Quotient_Examples.gz: $(OUT)/HOL \ - Quotient_Examples/FSet.thy \ - Quotient_Examples/LarryInt.thy Quotient_Examples/LarryDatatype.thy + Quotient_Examples/FSet.thy Quotient_Examples/Quotient_Int.thy \ + Quotient_Examples/Quotient_Message.thy @$(ISABELLE_TOOL) usedir $(OUT)/HOL Quotient_Examples diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Lattices.thy --- a/src/HOL/Lattices.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Lattices.thy Tue May 04 20:30:22 2010 +0200 @@ -67,8 +67,8 @@ text {* Dual lattice *} lemma dual_semilattice: - "semilattice_inf (op \) (op >) sup" -by (rule semilattice_inf.intro, rule dual_order) + "class.semilattice_inf (op \) (op >) sup" +by (rule class.semilattice_inf.intro, rule dual_order) (unfold_locales, simp_all add: sup_least) end @@ -235,8 +235,8 @@ begin lemma dual_lattice: - "lattice (op \) (op >) sup inf" - by (rule lattice.intro, rule dual_semilattice, rule semilattice_sup.intro, rule dual_order) + "class.lattice (op \) (op >) sup inf" + by (rule class.lattice.intro, rule dual_semilattice, rule class.semilattice_sup.intro, rule dual_order) (unfold_locales, auto) lemma inf_sup_absorb: "x \ (x \ y) = x" @@ -347,8 +347,8 @@ by(simp add: inf_sup_aci inf_sup_distrib1) lemma dual_distrib_lattice: - "distrib_lattice (op \) (op >) sup inf" - by (rule distrib_lattice.intro, rule dual_lattice) + "class.distrib_lattice (op \) (op >) sup inf" + by (rule class.distrib_lattice.intro, rule dual_lattice) (unfold_locales, fact inf_sup_distrib1) lemmas sup_inf_distrib = @@ -365,13 +365,9 @@ subsection {* Bounded lattices and boolean algebras *} -class bounded_lattice = lattice + top + bot +class bounded_lattice_bot = lattice + bot begin -lemma dual_bounded_lattice: - "bounded_lattice (op \) (op >) (op \) (op \) \ \" - by unfold_locales (auto simp add: less_le_not_le) - lemma inf_bot_left [simp]: "\ \ x = \" by (rule inf_absorb1) simp @@ -380,6 +376,23 @@ "x \ \ = \" by (rule inf_absorb2) simp +lemma sup_bot_left [simp]: + "\ \ x = x" + by (rule sup_absorb2) simp + +lemma sup_bot_right [simp]: + "x \ \ = x" + by (rule sup_absorb1) simp + +lemma sup_eq_bot_iff [simp]: + "x \ y = \ \ x = \ \ y = \" + by (simp add: eq_iff) + +end + +class bounded_lattice_top = lattice + top +begin + lemma sup_top_left [simp]: "\ \ x = \" by (rule sup_absorb1) simp @@ -396,21 +409,18 @@ "x \ \ = x" by (rule inf_absorb1) simp -lemma sup_bot_left [simp]: - "\ \ x = x" - by (rule sup_absorb2) simp - -lemma sup_bot_right [simp]: - "x \ \ = x" - by (rule sup_absorb1) simp - lemma inf_eq_top_iff [simp]: "x \ y = \ \ x = \ \ y = \" by (simp add: eq_iff) -lemma sup_eq_bot_iff [simp]: - "x \ y = \ \ x = \ \ y = \" - by (simp add: eq_iff) +end + +class bounded_lattice = bounded_lattice_bot + bounded_lattice_top +begin + +lemma dual_bounded_lattice: + "class.bounded_lattice (op \) (op >) (op \) (op \) \ \" + by unfold_locales (auto simp add: less_le_not_le) end @@ -421,8 +431,8 @@ begin lemma dual_boolean_algebra: - "boolean_algebra (\x y. x \ - y) uminus (op \) (op >) (op \) (op \) \ \" - by (rule boolean_algebra.intro, rule dual_bounded_lattice, rule dual_distrib_lattice) + "class.boolean_algebra (\x y. x \ - y) uminus (op \) (op >) (op \) (op \) \ \" + by (rule class.boolean_algebra.intro, rule dual_bounded_lattice, rule dual_distrib_lattice) (unfold_locales, auto simp add: inf_compl_bot sup_compl_top diff_eq) lemma compl_inf_bot: diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Lazy_Sequence.thy --- a/src/HOL/Lazy_Sequence.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Lazy_Sequence.thy Tue May 04 20:30:22 2010 +0200 @@ -123,41 +123,18 @@ subsection {* Code setup *} -ML {* -signature LAZY_SEQUENCE = -sig - datatype 'a lazy_sequence = Lazy_Sequence of unit -> ('a * 'a lazy_sequence) option - val yield : 'a lazy_sequence -> ('a * 'a lazy_sequence) option - val yieldn : int -> 'a lazy_sequence -> ('a list * 'a lazy_sequence) - val map : ('a -> 'b) -> 'a lazy_sequence -> 'b lazy_sequence -end; - -structure Lazy_Sequence : LAZY_SEQUENCE = -struct - -@{code_datatype lazy_sequence = Lazy_Sequence} - -val yield = @{code yield} +fun anamorph :: "('a \ ('b \ 'a) option) \ code_numeral \ 'a \ 'b list \ 'a" where + "anamorph f k x = (if k = 0 then ([], x) + else case f x of None \ ([], x) | Some (v, y) \ + let (vs, z) = anamorph f (k - 1) y + in (v # vs, z))" -fun anamorph f k x = (if k = 0 then ([], x) - else case f x - of NONE => ([], x) - | SOME (v, y) => let - val (vs, z) = anamorph f (k - 1) y - in (v :: vs, z) end); - -fun yieldn S = anamorph yield S; +definition yieldn :: "code_numeral \ 'a lazy_sequence \ 'a list \ 'a lazy_sequence" where + "yieldn = anamorph yield" -val map = @{code map} - -end; -*} - -code_reserved Eval Lazy_Sequence - -code_type lazy_sequence (Eval "_/ Lazy'_Sequence.lazy'_sequence") - -code_const Lazy_Sequence (Eval "Lazy'_Sequence.Lazy'_Sequence") +code_reflect Lazy_Sequence + datatypes lazy_sequence = Lazy_Sequence + functions map yield yieldn section {* With Hit Bound Value *} text {* assuming in negative context *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Abstract_Rat.thy --- a/src/HOL/Library/Abstract_Rat.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Abstract_Rat.thy Tue May 04 20:30:22 2010 +0200 @@ -5,7 +5,7 @@ header {* Abstract rational numbers *} theory Abstract_Rat -imports GCD Main +imports Complex_Main begin types Num = "int \ int" @@ -184,7 +184,7 @@ lemma isnormNum_unique[simp]: assumes na: "isnormNum x" and nb: "isnormNum y" - shows "((INum x ::'a::{ring_char_0,field, division_by_zero}) = INum y) = (x = y)" (is "?lhs = ?rhs") + shows "((INum x ::'a::{field_char_0, field_inverse_zero}) = INum y) = (x = y)" (is "?lhs = ?rhs") proof have "\ a b a' b'. x = (a,b) \ y = (a',b')" by auto then obtain a b a' b' where xy[simp]: "x = (a,b)" "y=(a',b')" by blast @@ -217,11 +217,11 @@ qed -lemma isnormNum0[simp]: "isnormNum x \ (INum x = (0::'a::{ring_char_0, field,division_by_zero})) = (x = 0\<^sub>N)" +lemma isnormNum0[simp]: "isnormNum x \ (INum x = (0::'a::{field_char_0, field_inverse_zero})) = (x = 0\<^sub>N)" unfolding INum_int(2)[symmetric] by (rule isnormNum_unique, simp_all) -lemma of_int_div_aux: "d ~= 0 ==> ((of_int x)::'a::{field, ring_char_0}) / (of_int d) = +lemma of_int_div_aux: "d ~= 0 ==> ((of_int x)::'a::field_char_0) / (of_int d) = of_int (x div d) + (of_int (x mod d)) / ((of_int d)::'a)" proof - assume "d ~= 0" @@ -238,14 +238,14 @@ qed lemma of_int_div: "(d::int) ~= 0 ==> d dvd n ==> - (of_int(n div d)::'a::{field, ring_char_0}) = of_int n / of_int d" + (of_int(n div d)::'a::field_char_0) = of_int n / of_int d" apply (frule of_int_div_aux [of d n, where ?'a = 'a]) apply simp apply (simp add: dvd_eq_mod_eq_0) done -lemma normNum[simp]: "INum (normNum x) = (INum x :: 'a::{ring_char_0,field, division_by_zero})" +lemma normNum[simp]: "INum (normNum x) = (INum x :: 'a::{field_char_0, field_inverse_zero})" proof- have "\ a b. x = (a,b)" by auto then obtain a b where x[simp]: "x = (a,b)" by blast @@ -260,7 +260,7 @@ ultimately show ?thesis by blast qed -lemma INum_normNum_iff: "(INum x ::'a::{field, division_by_zero, ring_char_0}) = INum y \ normNum x = normNum y" (is "?lhs = ?rhs") +lemma INum_normNum_iff: "(INum x ::'a::{field_char_0, field_inverse_zero}) = INum y \ normNum x = normNum y" (is "?lhs = ?rhs") proof - have "normNum x = normNum y \ (INum (normNum x) :: 'a) = INum (normNum y)" by (simp del: normNum) @@ -268,7 +268,7 @@ finally show ?thesis by simp qed -lemma Nadd[simp]: "INum (x +\<^sub>N y) = INum x + (INum y :: 'a :: {ring_char_0,division_by_zero,field})" +lemma Nadd[simp]: "INum (x +\<^sub>N y) = INum x + (INum y :: 'a :: {field_char_0, field_inverse_zero})" proof- let ?z = "0:: 'a" have " \ a b. x = (a,b)" " \ a' b'. y = (a',b')" by auto @@ -300,7 +300,7 @@ ultimately show ?thesis by blast qed -lemma Nmul[simp]: "INum (x *\<^sub>N y) = INum x * (INum y:: 'a :: {ring_char_0,division_by_zero,field}) " +lemma Nmul[simp]: "INum (x *\<^sub>N y) = INum x * (INum y:: 'a :: {field_char_0, field_inverse_zero}) " proof- let ?z = "0::'a" have " \ a b. x = (a,b)" " \ a' b'. y = (a',b')" by auto @@ -323,16 +323,16 @@ lemma Nneg[simp]: "INum (~\<^sub>N x) = - (INum x ::'a:: field)" by (simp add: Nneg_def split_def INum_def) -lemma Nsub[simp]: shows "INum (x -\<^sub>N y) = INum x - (INum y:: 'a :: {ring_char_0,division_by_zero,field})" +lemma Nsub[simp]: shows "INum (x -\<^sub>N y) = INum x - (INum y:: 'a :: {field_char_0, field_inverse_zero})" by (simp add: Nsub_def split_def) -lemma Ninv[simp]: "INum (Ninv x) = (1::'a :: {division_by_zero,field}) / (INum x)" +lemma Ninv[simp]: "INum (Ninv x) = (1::'a :: field_inverse_zero) / (INum x)" by (simp add: Ninv_def INum_def split_def) -lemma Ndiv[simp]: "INum (x \
\<^sub>N y) = INum x / (INum y ::'a :: {ring_char_0, division_by_zero,field})" by (simp add: Ndiv_def) +lemma Ndiv[simp]: "INum (x \
\<^sub>N y) = INum x / (INum y ::'a :: {field_char_0, field_inverse_zero})" by (simp add: Ndiv_def) lemma Nlt0_iff[simp]: assumes nx: "isnormNum x" - shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})< 0) = 0>\<^sub>N x " + shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero})< 0) = 0>\<^sub>N x " proof- have " \ a b. x = (a,b)" by simp then obtain a b where x[simp]:"x = (a,b)" by blast @@ -345,7 +345,7 @@ qed lemma Nle0_iff[simp]:assumes nx: "isnormNum x" - shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) \ 0) = 0\\<^sub>N x" + shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero}) \ 0) = 0\\<^sub>N x" proof- have " \ a b. x = (a,b)" by simp then obtain a b where x[simp]:"x = (a,b)" by blast @@ -357,7 +357,7 @@ ultimately show ?thesis by blast qed -lemma Ngt0_iff[simp]:assumes nx: "isnormNum x" shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})> 0) = 0<\<^sub>N x" +lemma Ngt0_iff[simp]:assumes nx: "isnormNum x" shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero})> 0) = 0<\<^sub>N x" proof- have " \ a b. x = (a,b)" by simp then obtain a b where x[simp]:"x = (a,b)" by blast @@ -369,7 +369,7 @@ ultimately show ?thesis by blast qed lemma Nge0_iff[simp]:assumes nx: "isnormNum x" - shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) \ 0) = 0\\<^sub>N x" + shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero}) \ 0) = 0\\<^sub>N x" proof- have " \ a b. x = (a,b)" by simp then obtain a b where x[simp]:"x = (a,b)" by blast @@ -382,7 +382,7 @@ qed lemma Nlt_iff[simp]: assumes nx: "isnormNum x" and ny: "isnormNum y" - shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) < INum y) = (x <\<^sub>N y)" + shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero}) < INum y) = (x <\<^sub>N y)" proof- let ?z = "0::'a" have "((INum x ::'a) < INum y) = (INum (x -\<^sub>N y) < ?z)" using nx ny by simp @@ -391,7 +391,7 @@ qed lemma Nle_iff[simp]: assumes nx: "isnormNum x" and ny: "isnormNum y" - shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})\ INum y) = (x \\<^sub>N y)" + shows "((INum x :: 'a :: {field_char_0, linordered_field_inverse_zero})\ INum y) = (x \\<^sub>N y)" proof- have "((INum x ::'a) \ INum y) = (INum (x -\<^sub>N y) \ (0::'a))" using nx ny by simp also have "\ = (0\\<^sub>N (x -\<^sub>N y))" using Nle0_iff[OF Nsub_normN[OF ny]] by simp @@ -399,7 +399,7 @@ qed lemma Nadd_commute: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "x +\<^sub>N y = y +\<^sub>N x" proof- have n: "isnormNum (x +\<^sub>N y)" "isnormNum (y +\<^sub>N x)" by simp_all @@ -408,7 +408,7 @@ qed lemma [simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "(0, b) +\<^sub>N y = normNum y" and "(a, 0) +\<^sub>N y = normNum y" and "x +\<^sub>N (0, b) = normNum x" @@ -420,7 +420,7 @@ done lemma normNum_nilpotent_aux[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" assumes nx: "isnormNum x" shows "normNum x = x" proof- @@ -432,7 +432,7 @@ qed lemma normNum_nilpotent[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "normNum (normNum x) = normNum x" by simp @@ -440,11 +440,11 @@ by (simp_all add: normNum_def) lemma normNum_Nadd: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "normNum (x +\<^sub>N y) = x +\<^sub>N y" by simp lemma Nadd_normNum1[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "normNum x +\<^sub>N y = x +\<^sub>N y" proof- have n: "isnormNum (normNum x +\<^sub>N y)" "isnormNum (x +\<^sub>N y)" by simp_all @@ -454,7 +454,7 @@ qed lemma Nadd_normNum2[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "x +\<^sub>N normNum y = x +\<^sub>N y" proof- have n: "isnormNum (x +\<^sub>N normNum y)" "isnormNum (x +\<^sub>N y)" by simp_all @@ -464,7 +464,7 @@ qed lemma Nadd_assoc: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" shows "x +\<^sub>N y +\<^sub>N z = x +\<^sub>N (y +\<^sub>N z)" proof- have n: "isnormNum (x +\<^sub>N y +\<^sub>N z)" "isnormNum (x +\<^sub>N (y +\<^sub>N z))" by simp_all @@ -476,7 +476,7 @@ by (simp add: Nmul_def split_def Let_def gcd_commute_int mult_commute) lemma Nmul_assoc: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" assumes nx: "isnormNum x" and ny:"isnormNum y" and nz:"isnormNum z" shows "x *\<^sub>N y *\<^sub>N z = x *\<^sub>N (y *\<^sub>N z)" proof- @@ -487,7 +487,7 @@ qed lemma Nsub0: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" assumes x: "isnormNum x" and y:"isnormNum y" shows "(x -\<^sub>N y = 0\<^sub>N) = (x = y)" proof- { fix h :: 'a @@ -502,7 +502,7 @@ by (simp_all add: Nmul_def Let_def split_def) lemma Nmul_eq0[simp]: - assumes "SORT_CONSTRAINT('a::{ring_char_0,division_by_zero,field})" + assumes "SORT_CONSTRAINT('a::{field_char_0, field_inverse_zero})" assumes nx:"isnormNum x" and ny: "isnormNum y" shows "(x*\<^sub>N y = 0\<^sub>N) = (x = 0\<^sub>N \ y = 0\<^sub>N)" proof- diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Binomial.thy --- a/src/HOL/Library/Binomial.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Binomial.thy Tue May 04 20:30:22 2010 +0200 @@ -236,10 +236,10 @@ have th1: "(\n\{1\nat..n}. a + of_nat n) = (\n\{0\nat..n - 1}. a + 1 + of_nat n)" apply (rule setprod_reindex_cong[where f = "Suc"]) - using n0 by (auto simp add: expand_fun_eq ring_simps) + using n0 by (auto simp add: expand_fun_eq field_simps) have ?thesis apply (simp add: pochhammer_def) unfolding setprod_insert[OF th0, unfolded eq] - using th1 by (simp add: ring_simps)} + using th1 by (simp add: field_simps)} ultimately show ?thesis by blast qed @@ -378,10 +378,10 @@ by simp from n h th0 have "fact k * fact (n - k) * (n choose k) = k * (fact h * fact (m - h) * (m choose h)) + (m - h) * (fact k * fact (m - k) * (m choose k))" - by (simp add: ring_simps) + by (simp add: field_simps) also have "\ = (k + (m - h)) * fact m" using H[rule_format, OF mn hm'] H[rule_format, OF mn km] - by (simp add: ring_simps) + by (simp add: field_simps) finally have ?ths using h n km by simp} moreover have "n=0 \ k = 0 \ k = n \ (EX m h. n=Suc m \ k = Suc h \ h < m)" using kn by presburger ultimately show ?ths by blast @@ -391,13 +391,13 @@ assumes kn: "k \ n" shows "(of_nat (n choose k) :: 'a::field_char_0) = of_nat (fact n) / (of_nat (fact k) * of_nat (fact (n - k)))" using binomial_fact_lemma[OF kn] - by (simp add: field_eq_simps of_nat_mult [symmetric]) + by (simp add: field_simps of_nat_mult [symmetric]) lemma binomial_gbinomial: "of_nat (n choose k) = of_nat n gchoose k" proof- {assume kn: "k > n" from kn binomial_eq_0[OF kn] have ?thesis - by (simp add: gbinomial_pochhammer field_eq_simps + by (simp add: gbinomial_pochhammer field_simps pochhammer_of_nat_eq_0_iff)} moreover {assume "k=0" then have ?thesis by simp} @@ -414,13 +414,13 @@ apply clarsimp apply (presburger) apply presburger - by (simp add: expand_fun_eq ring_simps of_nat_add[symmetric] del: of_nat_add) + by (simp add: expand_fun_eq field_simps of_nat_add[symmetric] del: of_nat_add) have th0: "finite {1..n - Suc h}" "finite {n - h .. n}" "{1..n - Suc h} \ {n - h .. n} = {}" and eq3: "{1..n - Suc h} \ {n - h .. n} = {1..n}" using h kn by auto from eq[symmetric] have ?thesis using kn apply (simp add: binomial_fact[OF kn, where ?'a = 'a] - gbinomial_pochhammer field_eq_simps pochhammer_Suc_setprod) + gbinomial_pochhammer field_simps pochhammer_Suc_setprod) apply (simp add: pochhammer_Suc_setprod fact_altdef_nat h of_nat_setprod setprod_timesf[symmetric] eq' del: One_nat_def power_Suc) unfolding setprod_Un_disjoint[OF th0, unfolded eq3, of "of_nat:: nat \ 'a"] eq[unfolded h] unfolding mult_assoc[symmetric] @@ -449,9 +449,9 @@ have "?r = ((- 1) ^n * pochhammer (- a) n / of_nat (fact n)) * (of_nat n - (- a + of_nat n))" unfolding gbinomial_pochhammer pochhammer_Suc fact_Suc of_nat_mult right_diff_distrib power_Suc - by (simp add: field_eq_simps del: of_nat_Suc) + by (simp add: field_simps del: of_nat_Suc) also have "\ = ?l" unfolding gbinomial_pochhammer - by (simp add: ring_simps) + by (simp add: field_simps) finally show ?thesis .. qed @@ -482,17 +482,17 @@ have "of_nat (fact (Suc k)) * (a gchoose k + (a gchoose (Suc k))) = ((a gchoose Suc h) * of_nat (fact (Suc h)) * of_nat (Suc k)) + (\i\{0\nat..Suc h}. a - of_nat i)" unfolding h - apply (simp add: ring_simps del: fact_Suc) + apply (simp add: field_simps del: fact_Suc) unfolding gbinomial_mult_fact' apply (subst fact_Suc) unfolding of_nat_mult apply (subst mult_commute) unfolding mult_assoc unfolding gbinomial_mult_fact - by (simp add: ring_simps) + by (simp add: field_simps) also have "\ = (\i\{0..h}. a - of_nat i) * (a + 1)" unfolding gbinomial_mult_fact' setprod_nat_ivl_Suc - by (simp add: ring_simps h) + by (simp add: field_simps h) also have "\ = (\i\{0..k}. (a + 1) - of_nat i)" using eq0 unfolding h setprod_nat_ivl_1_Suc diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Bit.thy --- a/src/HOL/Library/Bit.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Bit.thy Tue May 04 20:30:22 2010 +0200 @@ -49,7 +49,7 @@ subsection {* Type @{typ bit} forms a field *} -instantiation bit :: "{field, division_by_zero}" +instantiation bit :: field_inverse_zero begin definition plus_bit_def: diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Convex.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Library/Convex.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,617 @@ +(* Title: HOL/Library/Convex.thy + Author: Armin Heller, TU Muenchen + Author: Johannes Hoelzl, TU Muenchen +*) + +header {* Convexity in real vector spaces *} + +theory Convex +imports Product_Vector +begin + +subsection {* Convexity. *} + +definition + convex :: "'a::real_vector set \ bool" where + "convex s \ (\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s)" + +lemma convex_alt: + "convex s \ (\x\s. \y\s. \u. 0 \ u \ u \ 1 \ ((1 - u) *\<^sub>R x + u *\<^sub>R y) \ s)" + (is "_ \ ?alt") +proof + assume alt[rule_format]: ?alt + { fix x y and u v :: real assume mem: "x \ s" "y \ s" + assume "0 \ u" "0 \ v" "u + v = 1" + moreover hence "u = 1 - v" by auto + ultimately have "u *\<^sub>R x + v *\<^sub>R y \ s" using alt[OF mem] by auto } + thus "convex s" unfolding convex_def by auto +qed (auto simp: convex_def) + +lemma mem_convex: + assumes "convex s" "a \ s" "b \ s" "0 \ u" "u \ 1" + shows "((1 - u) *\<^sub>R a + u *\<^sub>R b) \ s" + using assms unfolding convex_alt by auto + +lemma convex_empty[intro]: "convex {}" + unfolding convex_def by simp + +lemma convex_singleton[intro]: "convex {a}" + unfolding convex_def by (auto simp: scaleR_left_distrib[symmetric]) + +lemma convex_UNIV[intro]: "convex UNIV" + unfolding convex_def by auto + +lemma convex_Inter: "(\s\f. convex s) ==> convex(\ f)" + unfolding convex_def by auto + +lemma convex_Int: "convex s \ convex t \ convex (s \ t)" + unfolding convex_def by auto + +lemma convex_halfspace_le: "convex {x. inner a x \ b}" + unfolding convex_def + by (auto simp: inner_add inner_scaleR intro!: convex_bound_le) + +lemma convex_halfspace_ge: "convex {x. inner a x \ b}" +proof - + have *:"{x. inner a x \ b} = {x. inner (-a) x \ -b}" by auto + show ?thesis unfolding * using convex_halfspace_le[of "-a" "-b"] by auto +qed + +lemma convex_hyperplane: "convex {x. inner a x = b}" +proof- + have *:"{x. inner a x = b} = {x. inner a x \ b} \ {x. inner a x \ b}" by auto + show ?thesis using convex_halfspace_le convex_halfspace_ge + by (auto intro!: convex_Int simp: *) +qed + +lemma convex_halfspace_lt: "convex {x. inner a x < b}" + unfolding convex_def + by (auto simp: convex_bound_lt inner_add) + +lemma convex_halfspace_gt: "convex {x. inner a x > b}" + using convex_halfspace_lt[of "-a" "-b"] by auto + +lemma convex_real_interval: + fixes a b :: "real" + shows "convex {a..}" and "convex {..b}" + and "convex {a<..}" and "convex {.. inner 1 x}" by auto + thus 1: "convex {a..}" by (simp only: convex_halfspace_ge) + have "{..b} = {x. inner 1 x \ b}" by auto + thus 2: "convex {..b}" by (simp only: convex_halfspace_le) + have "{a<..} = {x. a < inner 1 x}" by auto + thus 3: "convex {a<..}" by (simp only: convex_halfspace_gt) + have "{.. {..b}" by auto + thus "convex {a..b}" by (simp only: convex_Int 1 2) + have "{a<..b} = {a<..} \ {..b}" by auto + thus "convex {a<..b}" by (simp only: convex_Int 3 2) + have "{a.. {.. {.. i \ s. a i) = 1" + assumes "\ i. i \ s \ a i \ 0" and "\ i. i \ s \ y i \ C" + shows "(\ j \ s. a j *\<^sub>R y j) \ C" +using assms +proof (induct s arbitrary:a rule:finite_induct) + case empty thus ?case by auto +next + case (insert i s) note asms = this + { assume "a i = 1" + hence "(\ j \ s. a j) = 0" + using asms by auto + hence "\ j. j \ s \ a j = 0" + using setsum_nonneg_0[where 'b=real] asms by fastsimp + hence ?case using asms by auto } + moreover + { assume asm: "a i \ 1" + from asms have yai: "y i \ C" "a i \ 0" by auto + have fis: "finite (insert i s)" using asms by auto + hence ai1: "a i \ 1" using setsum_nonneg_leq_bound[of "insert i s" a 1] asms by simp + hence "a i < 1" using asm by auto + hence i0: "1 - a i > 0" by auto + let "?a j" = "a j / (1 - a i)" + { fix j assume "j \ s" + hence "?a j \ 0" + using i0 asms divide_nonneg_pos + by fastsimp } note a_nonneg = this + have "(\ j \ insert i s. a j) = 1" using asms by auto + hence "(\ j \ s. a j) = 1 - a i" using setsum.insert asms by fastsimp + hence "(\ j \ s. a j) / (1 - a i) = 1" using i0 by auto + hence a1: "(\ j \ s. ?a j) = 1" unfolding divide.setsum by simp + from this asms + have "(\j\s. ?a j *\<^sub>R y j) \ C" using a_nonneg by fastsimp + hence "a i *\<^sub>R y i + (1 - a i) *\<^sub>R (\ j \ s. ?a j *\<^sub>R y j) \ C" + using asms[unfolded convex_def, rule_format] yai ai1 by auto + hence "a i *\<^sub>R y i + (\ j \ s. (1 - a i) *\<^sub>R (?a j *\<^sub>R y j)) \ C" + using scaleR_right.setsum[of "(1 - a i)" "\ j. ?a j *\<^sub>R y j" s] by auto + hence "a i *\<^sub>R y i + (\ j \ s. a j *\<^sub>R y j) \ C" using i0 by auto + hence ?case using setsum.insert asms by auto } + ultimately show ?case by auto +qed + +lemma convex: + shows "convex s \ (\(k::nat) u x. (\i. 1\i \ i\k \ 0 \ u i \ x i \s) \ (setsum u {1..k} = 1) + \ setsum (\i. u i *\<^sub>R x i) {1..k} \ s)" +proof safe + fix k :: nat fix u :: "nat \ real" fix x + assume "convex s" + "\i. 1 \ i \ i \ k \ 0 \ u i \ x i \ s" + "setsum u {1..k} = 1" + from this convex_setsum[of "{1 .. k}" s] + show "(\j\{1 .. k}. u j *\<^sub>R x j) \ s" by auto +next + assume asm: "\k u x. (\ i :: nat. 1 \ i \ i \ k \ 0 \ u i \ x i \ s) \ setsum u {1..k} = 1 + \ (\i = 1..k. u i *\<^sub>R (x i :: 'a)) \ s" + { fix \ :: real fix x y :: 'a assume xy: "x \ s" "y \ s" assume mu: "\ \ 0" "\ \ 1" + let "?u i" = "if (i :: nat) = 1 then \ else 1 - \" + let "?x i" = "if (i :: nat) = 1 then x else y" + have "{1 :: nat .. 2} \ - {x. x = 1} = {2}" by auto + hence card: "card ({1 :: nat .. 2} \ - {x. x = 1}) = 1" by simp + hence "setsum ?u {1 .. 2} = 1" + using setsum_cases[of "{(1 :: nat) .. 2}" "\ x. x = 1" "\ x. \" "\ x. 1 - \"] + by auto + from this asm[rule_format, of "2" ?u ?x] + have s: "(\j \ {1..2}. ?u j *\<^sub>R ?x j) \ s" + using mu xy by auto + have grarr: "(\j \ {Suc (Suc 0)..2}. ?u j *\<^sub>R ?x j) = (1 - \) *\<^sub>R y" + using setsum_head_Suc[of "Suc (Suc 0)" 2 "\ j. (1 - \) *\<^sub>R y"] by auto + from setsum_head_Suc[of "Suc 0" 2 "\ j. ?u j *\<^sub>R ?x j", simplified this] + have "(\j \ {1..2}. ?u j *\<^sub>R ?x j) = \ *\<^sub>R x + (1 - \) *\<^sub>R y" by auto + hence "(1 - \) *\<^sub>R y + \ *\<^sub>R x \ s" using s by (auto simp:add_commute) } + thus "convex s" unfolding convex_alt by auto +qed + + +lemma convex_explicit: + fixes s :: "'a::real_vector set" + shows "convex s \ + (\t u. finite t \ t \ s \ (\x\t. 0 \ u x) \ setsum u t = 1 \ setsum (\x. u x *\<^sub>R x) t \ s)" +proof safe + fix t fix u :: "'a \ real" + assume "convex s" "finite t" + "t \ s" "\x\t. 0 \ u x" "setsum u t = 1" + thus "(\x\t. u x *\<^sub>R x) \ s" + using convex_setsum[of t s u "\ x. x"] by auto +next + assume asm0: "\t. \ u. finite t \ t \ s \ (\x\t. 0 \ u x) + \ setsum u t = 1 \ (\x\t. u x *\<^sub>R x) \ s" + show "convex s" + unfolding convex_alt + proof safe + fix x y fix \ :: real + assume asm: "x \ s" "y \ s" "0 \ \" "\ \ 1" + { assume "x \ y" + hence "(1 - \) *\<^sub>R x + \ *\<^sub>R y \ s" + using asm0[rule_format, of "{x, y}" "\ z. if z = x then 1 - \ else \"] + asm by auto } + moreover + { assume "x = y" + hence "(1 - \) *\<^sub>R x + \ *\<^sub>R y \ s" + using asm0[rule_format, of "{x, y}" "\ z. 1"] + asm by (auto simp:field_simps real_vector.scale_left_diff_distrib) } + ultimately show "(1 - \) *\<^sub>R x + \ *\<^sub>R y \ s" by blast + qed +qed + +lemma convex_finite: assumes "finite s" + shows "convex s \ (\u. (\x\s. 0 \ u x) \ setsum u s = 1 + \ setsum (\x. u x *\<^sub>R x) s \ s)" + unfolding convex_explicit +proof (safe elim!: conjE) + fix t u assume sum: "\u. (\x\s. 0 \ u x) \ setsum u s = 1 \ (\x\s. u x *\<^sub>R x) \ s" + and as: "finite t" "t \ s" "\x\t. 0 \ u x" "setsum u t = (1::real)" + have *:"s \ t = t" using as(2) by auto + have if_distrib_arg: "\P f g x. (if P then f else g) x = (if P then f x else g x)" by simp + show "(\x\t. u x *\<^sub>R x) \ s" + using sum[THEN spec[where x="\x. if x\t then u x else 0"]] as * + by (auto simp: assms setsum_cases if_distrib if_distrib_arg) +qed (erule_tac x=s in allE, erule_tac x=u in allE, auto) + +definition + convex_on :: "'a::real_vector set \ ('a \ real) \ bool" where + "convex_on s f \ + (\x\s. \y\s. \u\0. \v\0. u + v = 1 \ f (u *\<^sub>R x + v *\<^sub>R y) \ u * f x + v * f y)" + +lemma convex_on_subset: "convex_on t f \ s \ t \ convex_on s f" + unfolding convex_on_def by auto + +lemma convex_add[intro]: + assumes "convex_on s f" "convex_on s g" + shows "convex_on s (\x. f x + g x)" +proof- + { fix x y assume "x\s" "y\s" moreover + fix u v ::real assume "0 \ u" "0 \ v" "u + v = 1" + ultimately have "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ (u * f x + v * f y) + (u * g x + v * g y)" + using assms unfolding convex_on_def by (auto simp add:add_mono) + hence "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ u * (f x + g x) + v * (f y + g y)" by (simp add: field_simps) } + thus ?thesis unfolding convex_on_def by auto +qed + +lemma convex_cmul[intro]: + assumes "0 \ (c::real)" "convex_on s f" + shows "convex_on s (\x. c * f x)" +proof- + have *:"\u c fx v fy ::real. u * (c * fx) + v * (c * fy) = c * (u * fx + v * fy)" by (simp add: field_simps) + show ?thesis using assms(2) and mult_mono1[OF _ assms(1)] unfolding convex_on_def and * by auto +qed + +lemma convex_lower: + assumes "convex_on s f" "x\s" "y \ s" "0 \ u" "0 \ v" "u + v = 1" + shows "f (u *\<^sub>R x + v *\<^sub>R y) \ max (f x) (f y)" +proof- + let ?m = "max (f x) (f y)" + have "u * f x + v * f y \ u * max (f x) (f y) + v * max (f x) (f y)" + using assms(4,5) by(auto simp add: mult_mono1 add_mono) + also have "\ = max (f x) (f y)" using assms(6) unfolding distrib[THEN sym] by auto + finally show ?thesis + using assms unfolding convex_on_def by fastsimp +qed + +lemma convex_distance[intro]: + fixes s :: "'a::real_normed_vector set" + shows "convex_on s (\x. dist a x)" +proof(auto simp add: convex_on_def dist_norm) + fix x y assume "x\s" "y\s" + fix u v ::real assume "0 \ u" "0 \ v" "u + v = 1" + have "a = u *\<^sub>R a + v *\<^sub>R a" unfolding scaleR_left_distrib[THEN sym] and `u+v=1` by simp + hence *:"a - (u *\<^sub>R x + v *\<^sub>R y) = (u *\<^sub>R (a - x)) + (v *\<^sub>R (a - y))" + by (auto simp add: algebra_simps) + show "norm (a - (u *\<^sub>R x + v *\<^sub>R y)) \ u * norm (a - x) + v * norm (a - y)" + unfolding * using norm_triangle_ineq[of "u *\<^sub>R (a - x)" "v *\<^sub>R (a - y)"] + using `0 \ u` `0 \ v` by auto +qed + +subsection {* Arithmetic operations on sets preserve convexity. *} +lemma convex_scaling: + assumes "convex s" + shows"convex ((\x. c *\<^sub>R x) ` s)" +using assms unfolding convex_def image_iff +proof safe + fix x xa y xb :: "'a::real_vector" fix u v :: real + assume asm: "\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s" + "xa \ s" "xb \ s" "0 \ u" "0 \ v" "u + v = 1" + show "\x\s. u *\<^sub>R c *\<^sub>R xa + v *\<^sub>R c *\<^sub>R xb = c *\<^sub>R x" + using bexI[of _ "u *\<^sub>R xa +v *\<^sub>R xb"] asm by (auto simp add: algebra_simps) +qed + +lemma convex_negations: "convex s \ convex ((\x. -x)` s)" +using assms unfolding convex_def image_iff +proof safe + fix x xa y xb :: "'a::real_vector" fix u v :: real + assume asm: "\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s" + "xa \ s" "xb \ s" "0 \ u" "0 \ v" "u + v = 1" + show "\x\s. u *\<^sub>R - xa + v *\<^sub>R - xb = - x" + using bexI[of _ "u *\<^sub>R xa +v *\<^sub>R xb"] asm by auto +qed + +lemma convex_sums: + assumes "convex s" "convex t" + shows "convex {x + y| x y. x \ s \ y \ t}" +using assms unfolding convex_def image_iff +proof safe + fix xa xb ya yb assume xy:"xa\s" "xb\s" "ya\t" "yb\t" + fix u v ::real assume uv:"0 \ u" "0 \ v" "u + v = 1" + show "\x y. u *\<^sub>R (xa + ya) + v *\<^sub>R (xb + yb) = x + y \ x \ s \ y \ t" + using exI[of _ "u *\<^sub>R xa + v *\<^sub>R xb"] exI[of _ "u *\<^sub>R ya + v *\<^sub>R yb"] + assms[unfolded convex_def] uv xy by (auto simp add:scaleR_right_distrib) +qed + +lemma convex_differences: + assumes "convex s" "convex t" + shows "convex {x - y| x y. x \ s \ y \ t}" +proof - + have "{x - y| x y. x \ s \ y \ t} = {x + y |x y. x \ s \ y \ uminus ` t}" + proof safe + fix x x' y assume "x' \ s" "y \ t" + thus "\x y'. x' - y = x + y' \ x \ s \ y' \ uminus ` t" + using exI[of _ x'] exI[of _ "-y"] by auto + next + fix x x' y y' assume "x' \ s" "y' \ t" + thus "\x y. x' + - y' = x - y \ x \ s \ y \ t" + using exI[of _ x'] exI[of _ y'] by auto + qed + thus ?thesis using convex_sums[OF assms(1) convex_negations[OF assms(2)]] by auto +qed + +lemma convex_translation: assumes "convex s" shows "convex ((\x. a + x) ` s)" +proof- have "{a + y |y. y \ s} = (\x. a + x) ` s" by auto + thus ?thesis using convex_sums[OF convex_singleton[of a] assms] by auto qed + +lemma convex_affinity: assumes "convex s" shows "convex ((\x. a + c *\<^sub>R x) ` s)" +proof- have "(\x. a + c *\<^sub>R x) ` s = op + a ` op *\<^sub>R c ` s" by auto + thus ?thesis using convex_translation[OF convex_scaling[OF assms], of a c] by auto qed + +lemma convex_linear_image: + assumes c:"convex s" and l:"bounded_linear f" + shows "convex(f ` s)" +proof(auto simp add: convex_def) + interpret f: bounded_linear f by fact + fix x y assume xy:"x \ s" "y \ s" + fix u v ::real assume uv:"0 \ u" "0 \ v" "u + v = 1" + show "u *\<^sub>R f x + v *\<^sub>R f y \ f ` s" unfolding image_iff + using bexI[of _ "u *\<^sub>R x + v *\<^sub>R y"] f.add f.scaleR + c[unfolded convex_def] xy uv by auto +qed + + +lemma pos_is_convex: + shows "convex {0 :: real <..}" +unfolding convex_alt +proof safe + fix y x \ :: real + assume asms: "y > 0" "x > 0" "\ \ 0" "\ \ 1" + { assume "\ = 0" + hence "\ *\<^sub>R x + (1 - \) *\<^sub>R y = y" by simp + hence "\ *\<^sub>R x + (1 - \) *\<^sub>R y > 0" using asms by simp } + moreover + { assume "\ = 1" + hence "\ *\<^sub>R x + (1 - \) *\<^sub>R y > 0" using asms by simp } + moreover + { assume "\ \ 1" "\ \ 0" + hence "\ > 0" "(1 - \) > 0" using asms by auto + hence "\ *\<^sub>R x + (1 - \) *\<^sub>R y > 0" using asms + using add_nonneg_pos[of "\ *\<^sub>R x" "(1 - \) *\<^sub>R y"] + real_mult_order by auto fastsimp } + ultimately show "(1 - \) *\<^sub>R y + \ *\<^sub>R x > 0" using assms by fastsimp +qed + +lemma convex_on_setsum: + fixes a :: "'a \ real" + fixes y :: "'a \ 'b::real_vector" + fixes f :: "'b \ real" + assumes "finite s" "s \ {}" + assumes "convex_on C f" + assumes "convex C" + assumes "(\ i \ s. a i) = 1" + assumes "\ i. i \ s \ a i \ 0" + assumes "\ i. i \ s \ y i \ C" + shows "f (\ i \ s. a i *\<^sub>R y i) \ (\ i \ s. a i * f (y i))" +using assms +proof (induct s arbitrary:a rule:finite_ne_induct) + case (singleton i) + hence ai: "a i = 1" by auto + thus ?case by auto +next + case (insert i s) note asms = this + hence "convex_on C f" by simp + from this[unfolded convex_on_def, rule_format] + have conv: "\ x y \. \x \ C; y \ C; 0 \ \; \ \ 1\ + \ f (\ *\<^sub>R x + (1 - \) *\<^sub>R y) \ \ * f x + (1 - \) * f y" + by simp + { assume "a i = 1" + hence "(\ j \ s. a j) = 0" + using asms by auto + hence "\ j. j \ s \ a j = 0" + using setsum_nonneg_0[where 'b=real] asms by fastsimp + hence ?case using asms by auto } + moreover + { assume asm: "a i \ 1" + from asms have yai: "y i \ C" "a i \ 0" by auto + have fis: "finite (insert i s)" using asms by auto + hence ai1: "a i \ 1" using setsum_nonneg_leq_bound[of "insert i s" a] asms by simp + hence "a i < 1" using asm by auto + hence i0: "1 - a i > 0" by auto + let "?a j" = "a j / (1 - a i)" + { fix j assume "j \ s" + hence "?a j \ 0" + using i0 asms divide_nonneg_pos + by fastsimp } note a_nonneg = this + have "(\ j \ insert i s. a j) = 1" using asms by auto + hence "(\ j \ s. a j) = 1 - a i" using setsum.insert asms by fastsimp + hence "(\ j \ s. a j) / (1 - a i) = 1" using i0 by auto + hence a1: "(\ j \ s. ?a j) = 1" unfolding divide.setsum by simp + have "convex C" using asms by auto + hence asum: "(\ j \ s. ?a j *\<^sub>R y j) \ C" + using asms convex_setsum[OF `finite s` + `convex C` a1 a_nonneg] by auto + have asum_le: "f (\ j \ s. ?a j *\<^sub>R y j) \ (\ j \ s. ?a j * f (y j))" + using a_nonneg a1 asms by blast + have "f (\ j \ insert i s. a j *\<^sub>R y j) = f ((\ j \ s. a j *\<^sub>R y j) + a i *\<^sub>R y i)" + using setsum.insert[of s i "\ j. a j *\<^sub>R y j", OF `finite s` `i \ s`] asms + by (auto simp only:add_commute) + also have "\ = f (((1 - a i) * inverse (1 - a i)) *\<^sub>R (\ j \ s. a j *\<^sub>R y j) + a i *\<^sub>R y i)" + using i0 by auto + also have "\ = f ((1 - a i) *\<^sub>R (\ j \ s. (a j * inverse (1 - a i)) *\<^sub>R y j) + a i *\<^sub>R y i)" + using scaleR_right.setsum[of "inverse (1 - a i)" "\ j. a j *\<^sub>R y j" s, symmetric] by (auto simp:algebra_simps) + also have "\ = f ((1 - a i) *\<^sub>R (\ j \ s. ?a j *\<^sub>R y j) + a i *\<^sub>R y i)" + by (auto simp:real_divide_def) + also have "\ \ (1 - a i) *\<^sub>R f ((\ j \ s. ?a j *\<^sub>R y j)) + a i * f (y i)" + using conv[of "y i" "(\ j \ s. ?a j *\<^sub>R y j)" "a i", OF yai(1) asum yai(2) ai1] + by (auto simp add:add_commute) + also have "\ \ (1 - a i) * (\ j \ s. ?a j * f (y j)) + a i * f (y i)" + using add_right_mono[OF mult_left_mono[of _ _ "1 - a i", + OF asum_le less_imp_le[OF i0]], of "a i * f (y i)"] by simp + also have "\ = (\ j \ s. (1 - a i) * ?a j * f (y j)) + a i * f (y i)" + unfolding mult_right.setsum[of "1 - a i" "\ j. ?a j * f (y j)"] using i0 by auto + also have "\ = (\ j \ s. a j * f (y j)) + a i * f (y i)" using i0 by auto + also have "\ = (\ j \ insert i s. a j * f (y j))" using asms by auto + finally have "f (\ j \ insert i s. a j *\<^sub>R y j) \ (\ j \ insert i s. a j * f (y j))" + by simp } + ultimately show ?case by auto +qed + +lemma convex_on_alt: + fixes C :: "'a::real_vector set" + assumes "convex C" + shows "convex_on C f = + (\ x \ C. \ y \ C. \ \ :: real. \ \ 0 \ \ \ 1 + \ f (\ *\<^sub>R x + (1 - \) *\<^sub>R y) \ \ * f x + (1 - \) * f y)" +proof safe + fix x y fix \ :: real + assume asms: "convex_on C f" "x \ C" "y \ C" "0 \ \" "\ \ 1" + from this[unfolded convex_on_def, rule_format] + have "\ u v. \0 \ u; 0 \ v; u + v = 1\ \ f (u *\<^sub>R x + v *\<^sub>R y) \ u * f x + v * f y" by auto + from this[of "\" "1 - \", simplified] asms + show "f (\ *\<^sub>R x + (1 - \) *\<^sub>R y) + \ \ * f x + (1 - \) * f y" by auto +next + assume asm: "\x\C. \y\C. \\. 0 \ \ \ \ \ 1 \ f (\ *\<^sub>R x + (1 - \) *\<^sub>R y) \ \ * f x + (1 - \) * f y" + {fix x y fix u v :: real + assume lasm: "x \ C" "y \ C" "u \ 0" "v \ 0" "u + v = 1" + hence[simp]: "1 - u = v" by auto + from asm[rule_format, of x y u] + have "f (u *\<^sub>R x + v *\<^sub>R y) \ u * f x + v * f y" using lasm by auto } + thus "convex_on C f" unfolding convex_on_def by auto +qed + + +lemma pos_convex_function: + fixes f :: "real \ real" + assumes "convex C" + assumes leq: "\ x y. \x \ C ; y \ C\ \ f' x * (y - x) \ f y - f x" + shows "convex_on C f" +unfolding convex_on_alt[OF assms(1)] +using assms +proof safe + fix x y \ :: real + let ?x = "\ *\<^sub>R x + (1 - \) *\<^sub>R y" + assume asm: "convex C" "x \ C" "y \ C" "\ \ 0" "\ \ 1" + hence "1 - \ \ 0" by auto + hence xpos: "?x \ C" using asm unfolding convex_alt by fastsimp + have geq: "\ * (f x - f ?x) + (1 - \) * (f y - f ?x) + \ \ * f' ?x * (x - ?x) + (1 - \) * f' ?x * (y - ?x)" + using add_mono[OF mult_mono1[OF leq[OF xpos asm(2)] `\ \ 0`] + mult_mono1[OF leq[OF xpos asm(3)] `1 - \ \ 0`]] by auto + hence "\ * f x + (1 - \) * f y - f ?x \ 0" + by (auto simp add:field_simps) + thus "f (\ *\<^sub>R x + (1 - \) *\<^sub>R y) \ \ * f x + (1 - \) * f y" + using convex_on_alt by auto +qed + +lemma atMostAtLeast_subset_convex: + fixes C :: "real set" + assumes "convex C" + assumes "x \ C" "y \ C" "x < y" + shows "{x .. y} \ C" +proof safe + fix z assume zasm: "z \ {x .. y}" + { assume asm: "x < z" "z < y" + let "?\" = "(y - z) / (y - x)" + have "0 \ ?\" "?\ \ 1" using assms asm by (auto simp add:field_simps) + hence comb: "?\ * x + (1 - ?\) * y \ C" + using assms iffD1[OF convex_alt, rule_format, of C y x ?\] by (simp add:algebra_simps) + have "?\ * x + (1 - ?\) * y = (y - z) * x / (y - x) + (1 - (y - z) / (y - x)) * y" + by (auto simp add:field_simps) + also have "\ = ((y - z) * x + (y - x - (y - z)) * y) / (y - x)" + using assms unfolding add_divide_distrib by (auto simp:field_simps) + also have "\ = z" + using assms by (auto simp:field_simps) + finally have "z \ C" + using comb by auto } note less = this + show "z \ C" using zasm less assms + unfolding atLeastAtMost_iff le_less by auto +qed + +lemma f''_imp_f': + fixes f :: "real \ real" + assumes "convex C" + assumes f': "\ x. x \ C \ DERIV f x :> (f' x)" + assumes f'': "\ x. x \ C \ DERIV f' x :> (f'' x)" + assumes pos: "\ x. x \ C \ f'' x \ 0" + assumes "x \ C" "y \ C" + shows "f' x * (y - x) \ f y - f x" +using assms +proof - + { fix x y :: real assume asm: "x \ C" "y \ C" "y > x" + hence ge: "y - x > 0" "y - x \ 0" by auto + from asm have le: "x - y < 0" "x - y \ 0" by auto + then obtain z1 where z1: "z1 > x" "z1 < y" "f y - f x = (y - x) * f' z1" + using subsetD[OF atMostAtLeast_subset_convex[OF `convex C` `x \ C` `y \ C` `x < y`], + THEN f', THEN MVT2[OF `x < y`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] + by auto + hence "z1 \ C" using atMostAtLeast_subset_convex + `convex C` `x \ C` `y \ C` `x < y` by fastsimp + from z1 have z1': "f x - f y = (x - y) * f' z1" + by (simp add:field_simps) + obtain z2 where z2: "z2 > x" "z2 < z1" "f' z1 - f' x = (z1 - x) * f'' z2" + using subsetD[OF atMostAtLeast_subset_convex[OF `convex C` `x \ C` `z1 \ C` `x < z1`], + THEN f'', THEN MVT2[OF `x < z1`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1 + by auto + obtain z3 where z3: "z3 > z1" "z3 < y" "f' y - f' z1 = (y - z1) * f'' z3" + using subsetD[OF atMostAtLeast_subset_convex[OF `convex C` `z1 \ C` `y \ C` `z1 < y`], + THEN f'', THEN MVT2[OF `z1 < y`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1 + by auto + have "f' y - (f x - f y) / (x - y) = f' y - f' z1" + using asm z1' by auto + also have "\ = (y - z1) * f'' z3" using z3 by auto + finally have cool': "f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3" by simp + have A': "y - z1 \ 0" using z1 by auto + have "z3 \ C" using z3 asm atMostAtLeast_subset_convex + `convex C` `x \ C` `z1 \ C` `x < z1` by fastsimp + hence B': "f'' z3 \ 0" using assms by auto + from A' B' have "(y - z1) * f'' z3 \ 0" using mult_nonneg_nonneg by auto + from cool' this have "f' y - (f x - f y) / (x - y) \ 0" by auto + from mult_right_mono_neg[OF this le(2)] + have "f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \ 0 * (x - y)" + unfolding diff_def using real_add_mult_distrib by auto + hence "f' y * (x - y) - (f x - f y) \ 0" using le by auto + hence res: "f' y * (x - y) \ f x - f y" by auto + have "(f y - f x) / (y - x) - f' x = f' z1 - f' x" + using asm z1 by auto + also have "\ = (z1 - x) * f'' z2" using z2 by auto + finally have cool: "(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2" by simp + have A: "z1 - x \ 0" using z1 by auto + have "z2 \ C" using z2 z1 asm atMostAtLeast_subset_convex + `convex C` `z1 \ C` `y \ C` `z1 < y` by fastsimp + hence B: "f'' z2 \ 0" using assms by auto + from A B have "(z1 - x) * f'' z2 \ 0" using mult_nonneg_nonneg by auto + from cool this have "(f y - f x) / (y - x) - f' x \ 0" by auto + from mult_right_mono[OF this ge(2)] + have "(f y - f x) / (y - x) * (y - x) - f' x * (y - x) \ 0 * (y - x)" + unfolding diff_def using real_add_mult_distrib by auto + hence "f y - f x - f' x * (y - x) \ 0" using ge by auto + hence "f y - f x \ f' x * (y - x)" "f' y * (x - y) \ f x - f y" + using res by auto } note less_imp = this + { fix x y :: real assume "x \ C" "y \ C" "x \ y" + hence"f y - f x \ f' x * (y - x)" + unfolding neq_iff using less_imp by auto } note neq_imp = this + moreover + { fix x y :: real assume asm: "x \ C" "y \ C" "x = y" + hence "f y - f x \ f' x * (y - x)" by auto } + ultimately show ?thesis using assms by blast +qed + +lemma f''_ge0_imp_convex: + fixes f :: "real \ real" + assumes conv: "convex C" + assumes f': "\ x. x \ C \ DERIV f x :> (f' x)" + assumes f'': "\ x. x \ C \ DERIV f' x :> (f'' x)" + assumes pos: "\ x. x \ C \ f'' x \ 0" + shows "convex_on C f" +using f''_imp_f'[OF conv f' f'' pos] assms pos_convex_function by fastsimp + +lemma minus_log_convex: + fixes b :: real + assumes "b > 1" + shows "convex_on {0 <..} (\ x. - log b x)" +proof - + have "\ z. z > 0 \ DERIV (log b) z :> 1 / (ln b * z)" using DERIV_log by auto + hence f': "\ z. z > 0 \ DERIV (\ z. - log b z) z :> - 1 / (ln b * z)" + using DERIV_minus by auto + have "\ z :: real. z > 0 \ DERIV inverse z :> - (inverse z ^ Suc (Suc 0))" + using less_imp_neq[THEN not_sym, THEN DERIV_inverse] by auto + from this[THEN DERIV_cmult, of _ "- 1 / ln b"] + have "\ z :: real. z > 0 \ DERIV (\ z. (- 1 / ln b) * inverse z) z :> (- 1 / ln b) * (- (inverse z ^ Suc (Suc 0)))" + by auto + hence f''0: "\ z :: real. z > 0 \ DERIV (\ z. - 1 / (ln b * z)) z :> 1 / (ln b * z * z)" + unfolding inverse_eq_divide by (auto simp add:real_mult_assoc) + have f''_ge0: "\ z :: real. z > 0 \ 1 / (ln b * z * z) \ 0" + using `b > 1` by (auto intro!:less_imp_le simp add:divide_pos_pos[of 1] real_mult_order) + from f''_ge0_imp_convex[OF pos_is_convex, + unfolded greaterThan_iff, OF f' f''0 f''_ge0] + show ?thesis by auto +qed + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Efficient_Nat.thy --- a/src/HOL/Library/Efficient_Nat.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Efficient_Nat.thy Tue May 04 20:30:22 2010 +0200 @@ -152,7 +152,8 @@ in case map_filter (fn th'' => SOME (th'', singleton - (Variable.trade (K (fn [th'''] => [th''' RS th'])) (Variable.thm_context th'')) th'') + (Variable.trade (K (fn [th'''] => [th''' RS th'])) + (Variable.global_thm_context th'')) th'') handle THM _ => NONE) thms of [] => NONE | thps => diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Formal_Power_Series.thy --- a/src/HOL/Library/Formal_Power_Series.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Formal_Power_Series.thy Tue May 04 20:30:22 2010 +0200 @@ -28,7 +28,7 @@ text{* Definition of the basic elements 0 and 1 and the basic operations of addition, negation and multiplication *} -instantiation fps :: (zero) zero +instantiation fps :: (zero) zero begin definition fps_zero_def: @@ -40,7 +40,7 @@ lemma fps_zero_nth [simp]: "0 $ n = 0" unfolding fps_zero_def by simp -instantiation fps :: ("{one,zero}") one +instantiation fps :: ("{one, zero}") one begin definition fps_one_def: @@ -588,7 +588,7 @@ from k have "real k > - log y x" by simp then have "ln y * real k > - ln x" unfolding log_def using ln_gt_zero_iff[OF yp] y1 - by (simp add: minus_divide_left field_simps field_eq_simps del:minus_divide_left[symmetric]) + by (simp add: minus_divide_left field_simps del:minus_divide_left[symmetric]) then have "ln y * real k + ln x > 0" by simp then have "exp (real k * ln y + ln x) > exp 0" by (simp add: mult_ac) @@ -596,7 +596,7 @@ unfolding exp_zero exp_add exp_real_of_nat_mult exp_ln[OF xp] exp_ln[OF yp] by simp then have "x > (1/y)^k" using yp - by (simp add: field_simps field_eq_simps nonzero_power_divide) + by (simp add: field_simps nonzero_power_divide) then show ?thesis using kp by blast qed lemma X_nth[simp]: "X$n = (if n = 1 then 1 else 0)" by (simp add: X_def) @@ -693,7 +693,7 @@ from th0[symmetric, unfolded nonzero_divide_eq_eq[OF f0]] have th1: "setsum (\i. f$i * natfun_inverse f (n - i)) {1..n} = - (f$0) * (inverse f)$n" - by (simp add: ring_simps) + by (simp add: field_simps) have "(f * inverse f) $ n = (\i = 0..n. f $i * natfun_inverse f (n - i))" unfolding fps_mult_nth ifn .. also have "\ = f$0 * natfun_inverse f n @@ -766,7 +766,7 @@ lemma fps_deriv_nth[simp]: "fps_deriv f $ n = of_nat (n +1) * f $ (n+1)" by (simp add: fps_deriv_def) lemma fps_deriv_linear[simp]: "fps_deriv (fps_const (a::'a::comm_semiring_1) * f + fps_const b * g) = fps_const a * fps_deriv f + fps_const b * fps_deriv g" - unfolding fps_eq_iff fps_add_nth fps_const_mult_left fps_deriv_nth by (simp add: ring_simps) + unfolding fps_eq_iff fps_add_nth fps_const_mult_left fps_deriv_nth by (simp add: field_simps) lemma fps_deriv_mult[simp]: fixes f :: "('a :: comm_ring_1) fps" @@ -817,7 +817,7 @@ unfolding s0 s1 unfolding setsum_addf[symmetric] setsum_right_distrib apply (rule setsum_cong2) - by (auto simp add: of_nat_diff ring_simps) + by (auto simp add: of_nat_diff field_simps) finally have "(f * ?D g + ?D f * g) $ n = ?D (f*g) $ n" .} then show ?thesis unfolding fps_eq_iff by auto qed @@ -878,7 +878,7 @@ proof- have "fps_deriv f = fps_deriv g \ fps_deriv (f - g) = 0" by simp also have "\ \ f - g = fps_const ((f-g)$0)" unfolding fps_deriv_eq_0_iff .. - finally show ?thesis by (simp add: ring_simps) + finally show ?thesis by (simp add: field_simps) qed lemma fps_deriv_eq_iff_ex: "(fps_deriv f = fps_deriv g) \ (\(c::'a::{idom,semiring_char_0}). f = fps_const c + g)" @@ -929,7 +929,7 @@ qed lemma fps_deriv_maclauren_0: "(fps_nth_deriv k (f:: ('a::comm_semiring_1) fps)) $ 0 = of_nat (fact k) * f$(k)" - by (induct k arbitrary: f) (auto simp add: ring_simps of_nat_mult) + by (induct k arbitrary: f) (auto simp add: field_simps of_nat_mult) subsection {* Powers*} @@ -943,7 +943,7 @@ case (Suc n) note h = Suc.hyps[OF `a$0 = 1`] show ?case unfolding power_Suc fps_mult_nth - using h `a$0 = 1` fps_power_zeroth_eq_one[OF `a$0=1`] by (simp add: ring_simps) + using h `a$0 = 1` fps_power_zeroth_eq_one[OF `a$0=1`] by (simp add: field_simps) qed lemma startsby_one_power:"a $ 0 = (1::'a::comm_ring_1) \ a^n $ 0 = 1" @@ -1005,7 +1005,7 @@ case 0 thus ?case by (simp add: power_0) next case (Suc n) - have "a ^ Suc n $ (Suc n) = (a^n * a)$(Suc n)" by (simp add: ring_simps power_Suc) + have "a ^ Suc n $ (Suc n) = (a^n * a)$(Suc n)" by (simp add: field_simps power_Suc) also have "\ = setsum (\i. a^n$i * a $ (Suc n - i)) {0.. Suc n}" by (simp add: fps_mult_nth) also have "\ = setsum (\i. a^n$i * a $ (Suc n - i)) {n .. Suc n}" apply (rule setsum_mono_zero_right) @@ -1045,8 +1045,8 @@ qed lemma fps_deriv_power: "fps_deriv (a ^ n) = fps_const (of_nat n :: 'a:: comm_ring_1) * fps_deriv a * a ^ (n - 1)" - apply (induct n, auto simp add: power_Suc ring_simps fps_const_add[symmetric] simp del: fps_const_add) - by (case_tac n, auto simp add: power_Suc ring_simps) + apply (induct n, auto simp add: power_Suc field_simps fps_const_add[symmetric] simp del: fps_const_add) + by (case_tac n, auto simp add: power_Suc field_simps) lemma fps_inverse_deriv: fixes a:: "('a :: field) fps" @@ -1060,11 +1060,11 @@ with inverse_mult_eq_1[OF a0] have "inverse a ^ 2 * fps_deriv a + fps_deriv (inverse a) = 0" unfolding power2_eq_square - apply (simp add: ring_simps) + apply (simp add: field_simps) by (simp add: mult_assoc[symmetric]) hence "inverse a ^ 2 * fps_deriv a + fps_deriv (inverse a) - fps_deriv a * inverse a ^ 2 = 0 - fps_deriv a * inverse a ^ 2" by simp - then show "fps_deriv (inverse a) = - fps_deriv a * inverse a ^ 2" by (simp add: ring_simps) + then show "fps_deriv (inverse a) = - fps_deriv a * inverse a ^ 2" by (simp add: field_simps) qed lemma fps_inverse_mult: @@ -1084,7 +1084,7 @@ from inverse_mult_eq_1[OF ab0] have "inverse (a*b) * (a*b) * inverse a * inverse b = 1 * inverse a * inverse b" by simp then have "inverse (a*b) * (inverse a * a) * (inverse b * b) = inverse a * inverse b" - by (simp add: ring_simps) + by (simp add: field_simps) then have ?thesis using inverse_mult_eq_1[OF a0] inverse_mult_eq_1[OF b0] by simp} ultimately show ?thesis by blast qed @@ -1105,7 +1105,7 @@ assumes a0: "b$0 \ 0" shows "fps_deriv (a / b) = (fps_deriv a * b - a * fps_deriv b) / b ^ 2" using fps_inverse_deriv[OF a0] - by (simp add: fps_divide_def ring_simps power2_eq_square fps_inverse_mult inverse_mult_eq_1'[OF a0]) + by (simp add: fps_divide_def field_simps power2_eq_square fps_inverse_mult inverse_mult_eq_1'[OF a0]) lemma fps_inverse_gp': "inverse (Abs_fps(\n. (1::'a::field))) @@ -1121,7 +1121,7 @@ proof- have eq: "(1 + X) * ?r = 1" unfolding minus_one_power_iff - by (auto simp add: ring_simps fps_eq_iff) + by (auto simp add: field_simps fps_eq_iff) show ?thesis by (auto simp add: eq intro: fps_inverse_unique) qed @@ -1185,7 +1185,7 @@ next case (Suc k) note th = Suc.hyps[symmetric] - have "(Abs_fps a - setsum (\i. fps_const (a i :: 'a) * X^i) {0 .. Suc k})$n = (Abs_fps a - setsum (\i. fps_const (a i :: 'a) * X^i) {0 .. k} - fps_const (a (Suc k)) * X^ Suc k) $ n" by (simp add: ring_simps) + have "(Abs_fps a - setsum (\i. fps_const (a i :: 'a) * X^i) {0 .. Suc k})$n = (Abs_fps a - setsum (\i. fps_const (a i :: 'a) * X^i) {0 .. k} - fps_const (a (Suc k)) * X^ Suc k) $ n" by (simp add: field_simps) also have "\ = (if n < Suc k then 0 else a n) - (fps_const (a (Suc k)) * X^ Suc k)$n" using th unfolding fps_sub_nth by simp @@ -1209,10 +1209,10 @@ definition "XD = op * X o fps_deriv" lemma XD_add[simp]:"XD (a + b) = XD a + XD (b :: ('a::comm_ring_1) fps)" - by (simp add: XD_def ring_simps) + by (simp add: XD_def field_simps) lemma XD_mult_const[simp]:"XD (fps_const (c::'a::comm_ring_1) * a) = fps_const c * XD a" - by (simp add: XD_def ring_simps) + by (simp add: XD_def field_simps) lemma XD_linear[simp]: "XD (fps_const c * a + fps_const d * b) = fps_const c * XD a + fps_const d * XD (b :: ('a::comm_ring_1) fps)" by simp @@ -1226,7 +1226,7 @@ lemma fps_mult_XD_shift: "(XD ^^ k) (a:: ('a::{comm_ring_1}) fps) = Abs_fps (\n. (of_nat n ^ k) * a$n)" - by (induct k arbitrary: a) (simp_all add: power_Suc XD_def fps_eq_iff ring_simps del: One_nat_def) + by (induct k arbitrary: a) (simp_all add: power_Suc XD_def fps_eq_iff field_simps del: One_nat_def) subsubsection{* Rule 3 is trivial and is given by @{text fps_times_def}*} subsubsection{* Rule 5 --- summation and "division" by (1 - X)*} @@ -1688,7 +1688,7 @@ then have "setsum ?f ?Pnkn = of_nat (k+1) * ?r $ n * r (Suc k) (a $ 0) ^ k" by (simp add: natpermute_max_card[OF nz, simplified]) also have "\ = a$n - setsum ?f ?Pnknn" - unfolding n1 using r00 a0 by (simp add: field_eq_simps fps_radical_def del: of_nat_Suc) + unfolding n1 using r00 a0 by (simp add: field_simps fps_radical_def del: of_nat_Suc) finally have fn: "setsum ?f ?Pnkn = a$n - setsum ?f ?Pnknn" . have "(?r ^ Suc k)$n = setsum ?f ?Pnkn + setsum ?f ?Pnknn" unfolding fps_power_nth_Suc setsum_Un_disjoint[OF f d, unfolded eq] .. @@ -1764,7 +1764,7 @@ shows "a = b / c" proof- from eq have "a * c * inverse c = b * inverse c" by simp - hence "a * (inverse c * c) = b/c" by (simp only: field_eq_simps divide_inverse) + hence "a * (inverse c * c) = b/c" by (simp only: field_simps divide_inverse) then show "a = b/c" unfolding field_inverse[OF c0] by simp qed @@ -1837,7 +1837,7 @@ show "a$(xs !i) = ?r$(xs!i)" . qed have th00: "\(x::'a). of_nat (Suc k) * (x * inverse (of_nat (Suc k))) = x" - by (simp add: field_eq_simps del: of_nat_Suc) + by (simp add: field_simps del: of_nat_Suc) from H have "b$n = a^Suc k $ n" by (simp add: fps_eq_iff) also have "a ^ Suc k$n = setsum ?g ?Pnkn + setsum ?g ?Pnknn" unfolding fps_power_nth_Suc @@ -1854,7 +1854,7 @@ then have "a$n = ?r $n" apply (simp del: of_nat_Suc) unfolding fps_radical_def n1 - by (simp add: field_eq_simps n1 th00 del: of_nat_Suc)} + by (simp add: field_simps n1 th00 del: of_nat_Suc)} ultimately show "a$n = ?r $ n" by (cases n, auto) qed} then have "a = ?r" by (simp add: fps_eq_iff)} @@ -2018,11 +2018,11 @@ proof- {fix n have "(fps_deriv (a oo b))$n = setsum (\i. a $ i * (fps_deriv (b^i))$n) {0.. Suc n}" - by (simp add: fps_compose_def ring_simps setsum_right_distrib del: of_nat_Suc) + by (simp add: fps_compose_def field_simps setsum_right_distrib del: of_nat_Suc) also have "\ = setsum (\i. a$i * ((fps_const (of_nat i)) * (fps_deriv b * (b^(i - 1))))$n) {0.. Suc n}" - by (simp add: ring_simps fps_deriv_power del: fps_mult_left_const_nth of_nat_Suc) + by (simp add: field_simps fps_deriv_power del: fps_mult_left_const_nth of_nat_Suc) also have "\ = setsum (\i. of_nat i * a$i * (((b^(i - 1)) * fps_deriv b))$n) {0.. Suc n}" - unfolding fps_mult_left_const_nth by (simp add: ring_simps) + unfolding fps_mult_left_const_nth by (simp add: field_simps) also have "\ = setsum (\i. of_nat i * a$i * (setsum (\j. (b^ (i - 1))$j * (fps_deriv b)$(n - j)) {0..n})) {0.. Suc n}" unfolding fps_mult_nth .. also have "\ = setsum (\i. of_nat i * a$i * (setsum (\j. (b^ (i - 1))$j * (fps_deriv b)$(n - j)) {0..n})) {1.. Suc n}" @@ -2170,7 +2170,7 @@ by (auto simp add: fps_eq_iff fps_compose_nth power_0_left setsum_0') lemma fps_compose_add_distrib: "(a + b) oo c = (a oo c) + (b oo c)" - by (simp add: fps_eq_iff fps_compose_nth ring_simps setsum_addf) + by (simp add: fps_eq_iff fps_compose_nth field_simps setsum_addf) lemma fps_compose_setsum_distrib: "(setsum f S) oo a = setsum (\i. f i oo a) S" proof- @@ -2212,7 +2212,7 @@ apply (simp add: fps_mult_nth setsum_right_distrib) apply (subst setsum_commute) apply (rule setsum_cong2) - by (auto simp add: ring_simps) + by (auto simp add: field_simps) also have "\ = ?l" apply (simp add: fps_mult_nth fps_compose_nth setsum_product) apply (rule setsum_cong2) @@ -2312,7 +2312,7 @@ qed lemma fps_compose_uminus: "- (a::'a::ring_1 fps) oo c = - (a oo c)" - by (simp add: fps_eq_iff fps_compose_nth ring_simps setsum_negf[symmetric]) + by (simp add: fps_eq_iff fps_compose_nth field_simps setsum_negf[symmetric]) lemma fps_compose_sub_distrib: shows "(a - b) oo (c::'a::ring_1 fps) = (a oo c) - (b oo c)" @@ -2469,7 +2469,7 @@ proof- let ?r = "fps_inv" have ra0: "?r a $ 0 = 0" by (simp add: fps_inv_def) - from a1 have ra1: "?r a $ 1 \ 0" by (simp add: fps_inv_def field_eq_simps) + from a1 have ra1: "?r a $ 1 \ 0" by (simp add: fps_inv_def field_simps) have X0: "X$0 = 0" by simp from fps_inv[OF ra0 ra1] have "?r (?r a) oo ?r a = X" . then have "?r (?r a) oo ?r a oo a = X oo a" by simp @@ -2486,7 +2486,7 @@ proof- let ?r = "fps_ginv" from c0 have rca0: "?r c a $0 = 0" by (simp add: fps_ginv_def) - from a1 c1 have rca1: "?r c a $ 1 \ 0" by (simp add: fps_ginv_def field_eq_simps) + from a1 c1 have rca1: "?r c a $ 1 \ 0" by (simp add: fps_ginv_def field_simps) from fps_ginv[OF rca0 rca1] have "?r b (?r c a) oo ?r c a = b" . then have "?r b (?r c a) oo ?r c a oo a = b oo a" by simp @@ -2534,8 +2534,8 @@ proof- {fix n have "?l$n = ?r $ n" - apply (auto simp add: E_def field_eq_simps power_Suc[symmetric]simp del: fact_Suc of_nat_Suc power_Suc) - by (simp add: of_nat_mult ring_simps)} + apply (auto simp add: E_def field_simps power_Suc[symmetric]simp del: fact_Suc of_nat_Suc power_Suc) + by (simp add: of_nat_mult field_simps)} then show ?thesis by (simp add: fps_eq_iff) qed @@ -2545,15 +2545,15 @@ proof- {assume d: ?lhs from d have th: "\n. a $ Suc n = c * a$n / of_nat (Suc n)" - by (simp add: fps_deriv_def fps_eq_iff field_eq_simps del: of_nat_Suc) + by (simp add: fps_deriv_def fps_eq_iff field_simps del: of_nat_Suc) {fix n have "a$n = a$0 * c ^ n/ (of_nat (fact n))" apply (induct n) apply simp unfolding th using fact_gt_zero_nat - apply (simp add: field_eq_simps del: of_nat_Suc fact_Suc) + apply (simp add: field_simps del: of_nat_Suc fact_Suc) apply (drule sym) - by (simp add: ring_simps of_nat_mult power_Suc)} + by (simp add: field_simps of_nat_mult power_Suc)} note th' = this have ?rhs by (auto simp add: fps_eq_iff fps_const_mult_left E_def intro : th')} @@ -2570,7 +2570,7 @@ lemma E_add_mult: "E (a + b) = E (a::'a::field_char_0) * E b" (is "?l = ?r") proof- have "fps_deriv (?r) = fps_const (a+b) * ?r" - by (simp add: fps_const_add[symmetric] ring_simps del: fps_const_add) + by (simp add: fps_const_add[symmetric] field_simps del: fps_const_add) then have "?r = ?l" apply (simp only: E_unique_ODE) by (simp add: fps_mult_nth E_def) then show ?thesis .. @@ -2618,13 +2618,13 @@ (is "inverse ?l = ?r") proof- have th: "?l * ?r = 1" - by (auto simp add: ring_simps fps_eq_iff minus_one_power_iff) + by (auto simp add: field_simps fps_eq_iff minus_one_power_iff) have th': "?l $ 0 \ 0" by (simp add: ) from fps_inverse_unique[OF th' th] show ?thesis . qed lemma E_power_mult: "(E (c::'a::field_char_0))^n = E (of_nat n * c)" - by (induct n, auto simp add: ring_simps E_add_mult power_Suc) + by (induct n, auto simp add: field_simps E_add_mult power_Suc) lemma radical_E: assumes r: "r (Suc k) 1 = 1" @@ -2649,18 +2649,18 @@ text{* The generalized binomial theorem as a consequence of @{thm E_add_mult} *} lemma gbinomial_theorem: - "((a::'a::{field_char_0, division_by_zero})+b) ^ n = (\k=0..n. of_nat (n choose k) * a^k * b^(n-k))" + "((a::'a::{field_char_0, field_inverse_zero})+b) ^ n = (\k=0..n. of_nat (n choose k) * a^k * b^(n-k))" proof- from E_add_mult[of a b] have "(E (a + b)) $ n = (E a * E b)$n" by simp then have "(a + b) ^ n = (\i\nat = 0\nat..n. a ^ i * b ^ (n - i) * (of_nat (fact n) / of_nat (fact i * fact (n - i))))" - by (simp add: field_eq_simps fps_mult_nth of_nat_mult[symmetric] setsum_right_distrib) + by (simp add: field_simps fps_mult_nth of_nat_mult[symmetric] setsum_right_distrib) then show ?thesis apply simp apply (rule setsum_cong2) apply simp apply (frule binomial_fact[where ?'a = 'a, symmetric]) - by (simp add: field_eq_simps of_nat_mult) + by (simp add: field_simps of_nat_mult) qed text{* And the nat-form -- also available from Binomial.thy *} @@ -2683,7 +2683,7 @@ by (simp add: L_def fps_eq_iff del: of_nat_Suc) lemma L_nth: "L c $ n = (if n=0 then 0 else 1/c * ((- 1) ^ (n - 1) / of_nat n))" - by (simp add: L_def field_eq_simps) + by (simp add: L_def field_simps) lemma L_0[simp]: "L c $ 0 = 0" by (simp add: L_def) lemma L_E_inv: @@ -2694,9 +2694,9 @@ have b0: "?b $ 0 = 0" by simp have b1: "?b $ 1 \ 0" by (simp add: a) have "fps_deriv (E a - 1) oo fps_inv (E a - 1) = (fps_const a * (E a - 1) + fps_const a) oo fps_inv (E a - 1)" - by (simp add: ring_simps) + by (simp add: field_simps) also have "\ = fps_const a * (X + 1)" apply (simp add: fps_compose_add_distrib fps_const_mult_apply_left[symmetric] fps_inv_right[OF b0 b1]) - by (simp add: ring_simps) + by (simp add: field_simps) finally have eq: "fps_deriv (E a - 1) oo fps_inv (E a - 1) = fps_const a * (X + 1)" . from fps_inv_deriv[OF b0 b1, unfolded eq] have "fps_deriv (fps_inv ?b) = fps_const (inverse a) / (X + 1)" @@ -2713,7 +2713,7 @@ shows "L c + L d = fps_const (c+d) * L (c*d)" (is "?r = ?l") proof- - from c0 d0 have eq: "1/c + 1/d = (c+d)/(c*d)" by (simp add: field_eq_simps) + from c0 d0 have eq: "1/c + 1/d = (c+d)/(c*d)" by (simp add: field_simps) have "fps_deriv ?r = fps_const (1/c + 1/d) * inverse (1 + X)" by (simp add: fps_deriv_L fps_const_add[symmetric] algebra_simps del: fps_const_add) also have "\ = fps_deriv ?l" @@ -2743,7 +2743,7 @@ have "?l = ?r \ inverse ?x1 * ?l = inverse ?x1 * ?r" by simp also have "\ \ ?da = (fps_const c * a) / ?x1" apply (simp only: fps_divide_def mult_assoc[symmetric] inverse_mult_eq_1[OF x10]) - by (simp add: ring_simps) + by (simp add: field_simps) finally have eq: "?l = ?r \ ?lhs" by simp moreover {assume h: "?l = ?r" @@ -2752,8 +2752,8 @@ from lrn have "a$ Suc n = ((c - of_nat n) / of_nat (Suc n)) * a $n" - apply (simp add: ring_simps del: of_nat_Suc) - by (cases n, simp_all add: field_eq_simps del: of_nat_Suc) + apply (simp add: field_simps del: of_nat_Suc) + by (cases n, simp_all add: field_simps del: of_nat_Suc) } note th0 = this {fix n have "a$n = (c gchoose n) * a$0" @@ -2762,24 +2762,24 @@ next case (Suc m) thus ?case unfolding th0 - apply (simp add: field_eq_simps del: of_nat_Suc) + apply (simp add: field_simps del: of_nat_Suc) unfolding mult_assoc[symmetric] gbinomial_mult_1 - by (simp add: ring_simps) + by (simp add: field_simps) qed} note th1 = this have ?rhs apply (simp add: fps_eq_iff) apply (subst th1) - by (simp add: ring_simps)} + by (simp add: field_simps)} moreover {assume h: ?rhs have th00:"\x y. x * (a$0 * y) = a$0 * (x*y)" by (simp add: mult_commute) have "?l = ?r" apply (subst h) apply (subst (2) h) - apply (clarsimp simp add: fps_eq_iff ring_simps) + apply (clarsimp simp add: fps_eq_iff field_simps) unfolding mult_assoc[symmetric] th00 gbinomial_mult_1 - by (simp add: ring_simps gbinomial_mult_1)} + by (simp add: field_simps gbinomial_mult_1)} ultimately show ?thesis by blast qed @@ -2798,9 +2798,9 @@ have "fps_deriv ?P = ?db c * ?b d + ?b c * ?db d - ?db (c + d)" by simp also have "\ = inverse (1 + X) * (fps_const c * ?b c * ?b d + fps_const d * ?b c * ?b d - fps_const (c+d) * ?b (c + d))" unfolding fps_binomial_deriv - by (simp add: fps_divide_def ring_simps) + by (simp add: fps_divide_def field_simps) also have "\ = (fps_const (c + d)/ (1 + X)) * ?P" - by (simp add: ring_simps fps_divide_def fps_const_add[symmetric] del: fps_const_add) + by (simp add: field_simps fps_divide_def fps_const_add[symmetric] del: fps_const_add) finally have th0: "fps_deriv ?P = fps_const (c+d) * ?P / (1 + X)" by (simp add: fps_divide_def) have "?P = fps_const (?P$0) * ?b (c + d)" @@ -2880,7 +2880,7 @@ using kn pochhammer_minus'[where k=k and n=n and b=b] apply (simp add: pochhammer_same) using bn0 - by (simp add: field_eq_simps power_add[symmetric])} + by (simp add: field_simps power_add[symmetric])} moreover {assume nk: "k \ n" have m1nk: "?m1 n = setprod (%i. - 1) {0..m}" @@ -2905,7 +2905,7 @@ unfolding m1nk unfolding m h pochhammer_Suc_setprod - apply (simp add: field_eq_simps del: fact_Suc id_def) + apply (simp add: field_simps del: fact_Suc id_def) unfolding fact_altdef_nat id_def unfolding of_nat_setprod unfolding setprod_timesf[symmetric] @@ -2942,10 +2942,10 @@ apply auto done then have th2: "(?m1 n * ?p b n)/pochhammer (b - of_nat n + 1) k = setprod (%i. b - of_nat i) {0.. n - k - 1}" - using nz' by (simp add: field_eq_simps) + using nz' by (simp add: field_simps) have "(?m1 n * ?p b n * ?m1 k * ?p (of_nat n) k) / (?f n * pochhammer (b - of_nat n + 1) k) = ((?m1 k * ?p (of_nat n) k) / ?f n) * ((?m1 n * ?p b n)/pochhammer (b - of_nat n + 1) k)" using bnz0 - by (simp add: field_eq_simps) + by (simp add: field_simps) also have "\ = b gchoose (n - k)" unfolding th1 th2 using kn' by (simp add: gbinomial_def) @@ -2959,15 +2959,15 @@ note th00 = this have "?r = ((a + b) gchoose n) * (of_nat (fact n)/ (?m1 n * pochhammer (- b) n))" unfolding gbinomial_pochhammer - using bn0 by (auto simp add: field_eq_simps) + using bn0 by (auto simp add: field_simps) also have "\ = ?l" unfolding gbinomial_Vandermonde[symmetric] apply (simp add: th00) unfolding gbinomial_pochhammer - using bn0 apply (simp add: setsum_left_distrib setsum_right_distrib field_eq_simps) + using bn0 apply (simp add: setsum_left_distrib setsum_right_distrib field_simps) apply (rule setsum_cong2) apply (drule th00(2)) - by (simp add: field_eq_simps power_add[symmetric]) + by (simp add: field_simps power_add[symmetric]) finally show ?thesis by simp qed @@ -2992,7 +2992,7 @@ have nz: "pochhammer c n \ 0" using c by (simp add: pochhammer_eq_0_iff) from Vandermonde_pochhammer_lemma[where a = "?a" and b="?b" and n=n, OF h, unfolded th0 th1] - show ?thesis using nz by (simp add: field_eq_simps setsum_right_distrib) + show ?thesis using nz by (simp add: field_simps setsum_right_distrib) qed subsubsection{* Formal trigonometric functions *} @@ -3014,11 +3014,11 @@ using en by (simp add: fps_sin_def) also have "\ = (- 1)^(n div 2) * c^Suc n * (of_nat (n+1) / (of_nat (Suc n) * of_nat (fact n)))" unfolding fact_Suc of_nat_mult - by (simp add: field_eq_simps del: of_nat_add of_nat_Suc) + by (simp add: field_simps del: of_nat_add of_nat_Suc) also have "\ = (- 1)^(n div 2) *c^Suc n / of_nat (fact n)" - by (simp add: field_eq_simps del: of_nat_add of_nat_Suc) + by (simp add: field_simps del: of_nat_add of_nat_Suc) finally have "?lhs $n = ?rhs$n" using en - by (simp add: fps_cos_def ring_simps power_Suc )} + by (simp add: fps_cos_def field_simps power_Suc )} then show "?lhs $ n = ?rhs $ n" by (cases "even n", simp_all add: fps_deriv_def fps_sin_def fps_cos_def) qed @@ -3038,13 +3038,13 @@ using en by (simp add: fps_cos_def) also have "\ = (- 1)^((n + 1) div 2)*c^Suc n * (of_nat (n+1) / (of_nat (Suc n) * of_nat (fact n)))" unfolding fact_Suc of_nat_mult - by (simp add: field_eq_simps del: of_nat_add of_nat_Suc) + by (simp add: field_simps del: of_nat_add of_nat_Suc) also have "\ = (- 1)^((n + 1) div 2) * c^Suc n / of_nat (fact n)" - by (simp add: field_eq_simps del: of_nat_add of_nat_Suc) + by (simp add: field_simps del: of_nat_add of_nat_Suc) also have "\ = (- ((- 1)^((n - 1) div 2))) * c^Suc n / of_nat (fact n)" unfolding th0 unfolding th1[OF en] by simp finally have "?lhs $n = ?rhs$n" using en - by (simp add: fps_sin_def ring_simps power_Suc)} + by (simp add: fps_sin_def field_simps power_Suc)} then show "?lhs $ n = ?rhs $ n" by (cases "even n", simp_all add: fps_deriv_def fps_sin_def fps_cos_def) @@ -3055,7 +3055,7 @@ proof- have "fps_deriv ?lhs = 0" apply (simp add: fps_deriv_power fps_sin_deriv fps_cos_deriv power_Suc) - by (simp add: ring_simps fps_const_neg[symmetric] del: fps_const_neg) + by (simp add: field_simps fps_const_neg[symmetric] del: fps_const_neg) then have "?lhs = fps_const (?lhs $ 0)" unfolding fps_deriv_eq_0_iff . also have "\ = 1" @@ -3177,7 +3177,7 @@ have th0: "fps_cos c $ 0 \ 0" by (simp add: fps_cos_def) show ?thesis using fps_sin_cos_sum_of_squares[of c] - apply (simp add: fps_tan_def fps_divide_deriv[OF th0] fps_sin_deriv fps_cos_deriv add: fps_const_neg[symmetric] ring_simps power2_eq_square del: fps_const_neg) + apply (simp add: fps_tan_def fps_divide_deriv[OF th0] fps_sin_deriv fps_cos_deriv add: fps_const_neg[symmetric] field_simps power2_eq_square del: fps_const_neg) unfolding right_distrib[symmetric] by simp qed @@ -3252,7 +3252,7 @@ subsection {* Hypergeometric series *} -definition "F as bs (c::'a::{field_char_0, division_by_zero}) = Abs_fps (%n. (foldl (%r a. r* pochhammer a n) 1 as * c^n)/ (foldl (%r b. r * pochhammer b n) 1 bs * of_nat (fact n)))" +definition "F as bs (c::'a::{field_char_0, field_inverse_zero}) = Abs_fps (%n. (foldl (%r a. r* pochhammer a n) 1 as * c^n)/ (foldl (%r b. r * pochhammer b n) 1 bs * of_nat (fact n)))" lemma F_nth[simp]: "F as bs c $ n = (foldl (%r a. r* pochhammer a n) 1 as * c^n)/ (foldl (%r b. r * pochhammer b n) 1 bs * of_nat (fact n))" by (simp add: F_def) @@ -3321,9 +3321,9 @@ by (simp add: fps_eq_iff fps_integral_def) lemma F_minus_nat: - "F [- of_nat n] [- of_nat (n + m)] (c::'a::{field_char_0, division_by_zero}) $ k = (if k <= n then pochhammer (- of_nat n) k * c ^ k / + "F [- of_nat n] [- of_nat (n + m)] (c::'a::{field_char_0, field_inverse_zero}) $ k = (if k <= n then pochhammer (- of_nat n) k * c ^ k / (pochhammer (- of_nat (n + m)) k * of_nat (fact k)) else 0)" - "F [- of_nat m] [- of_nat (m + n)] (c::'a::{field_char_0, division_by_zero}) $ k = (if k <= m then pochhammer (- of_nat m) k * c ^ k / + "F [- of_nat m] [- of_nat (m + n)] (c::'a::{field_char_0, field_inverse_zero}) $ k = (if k <= m then pochhammer (- of_nat m) k * c ^ k / (pochhammer (- of_nat (m + n)) k * of_nat (fact k)) else 0)" by (auto simp add: pochhammer_eq_0_iff) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Fraction_Field.thy --- a/src/HOL/Library/Fraction_Field.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Fraction_Field.thy Tue May 04 20:30:22 2010 +0200 @@ -232,7 +232,7 @@ thm mult_eq_0_iff end -instantiation fract :: (idom) field +instantiation fract :: (idom) field_inverse_zero begin definition @@ -263,16 +263,13 @@ next fix q r :: "'a fract" show "q / r = q * inverse r" by (simp add: divide_fract_def) +next + show "inverse 0 = (0:: 'a fract)" by (simp add: fract_expand) + (simp add: fract_collapse) qed end -instance fract :: (idom) division_by_zero -proof - show "inverse 0 = (0:: 'a fract)" by (simp add: fract_expand) - (simp add: fract_collapse) -qed - subsubsection {* The ordered field of fractions over an ordered idom *} @@ -434,7 +431,7 @@ end -instance fract :: (linordered_idom) linordered_field +instance fract :: (linordered_idom) linordered_field_inverse_zero proof fix q r s :: "'a fract" show "q \ r ==> s + q \ s + r" @@ -450,7 +447,7 @@ by simp with F have "(a * d) * (b * d) * ?F * ?F \ (c * b) * (b * d) * ?F * ?F" by (simp add: mult_le_cancel_right) - with neq show ?thesis by (simp add: ring_simps) + with neq show ?thesis by (simp add: field_simps) qed qed show "q < r ==> 0 < s ==> s * q < s * r" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/FrechetDeriv.thy --- a/src/HOL/Library/FrechetDeriv.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/FrechetDeriv.thy Tue May 04 20:30:22 2010 +0200 @@ -54,11 +54,6 @@ subsection {* Addition *} -lemma add_diff_add: - fixes a b c d :: "'a::ab_group_add" - shows "(a + c) - (b + d) = (a - b) + (c - d)" -by simp - lemma bounded_linear_add: assumes "bounded_linear f" assumes "bounded_linear g" @@ -385,7 +380,7 @@ fixes x :: "'a::{real_normed_algebra,comm_ring_1}" shows "FDERIV (\x. x ^ Suc n) x :> (\h. (1 + of_nat n) * x ^ n * h)" apply (induct n) - apply (simp add: power_Suc FDERIV_ident) + apply (simp add: FDERIV_ident) apply (drule FDERIV_mult [OF FDERIV_ident]) apply (simp only: of_nat_Suc left_distrib mult_1_left) apply (simp only: power_Suc right_distrib add_ac mult_ac) @@ -402,11 +397,6 @@ subsection {* Inverse *} -lemma inverse_diff_inverse: - "\(a::'a::division_ring) \ 0; b \ 0\ - \ inverse a - inverse b = - (inverse a * (a - b) * inverse b)" -by (simp add: right_diff_distrib left_diff_distrib mult_assoc) - lemmas bounded_linear_mult_const = mult.bounded_linear_left [THEN bounded_linear_compose] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Library.thy --- a/src/HOL/Library/Library.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Library.thy Tue May 04 20:30:22 2010 +0200 @@ -12,6 +12,7 @@ Code_Integer Continuity ContNotDenum + Convex Countable Diagonalize Dlist diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Multiset.thy --- a/src/HOL/Library/Multiset.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Multiset.thy Tue May 04 20:30:22 2010 +0200 @@ -1239,7 +1239,7 @@ qed have trans: "\K M N :: 'a multiset. K \# M \ M \# N \ K \# N" unfolding less_multiset_def mult_def by (blast intro: trancl_trans) - show "order (le_multiset :: 'a multiset \ _) less_multiset" proof + show "class.order (le_multiset :: 'a multiset \ _) less_multiset" proof qed (auto simp add: le_multiset_def irrefl dest: trans) qed diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Numeral_Type.thy --- a/src/HOL/Library/Numeral_Type.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Numeral_Type.thy Tue May 04 20:30:22 2010 +0200 @@ -213,7 +213,7 @@ lemma comm_ring_1: "OFCLASS('a, comm_ring_1_class)" apply (intro_classes, unfold definitions) -apply (simp_all add: Rep_simps zmod_simps ring_simps) +apply (simp_all add: Rep_simps zmod_simps field_simps) done end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Permutations.thy --- a/src/HOL/Library/Permutations.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Permutations.thy Tue May 04 20:30:22 2010 +0200 @@ -96,7 +96,7 @@ lemma permutes_superset: "p permutes S \ (\x \ S - T. p x = x) \ p permutes T" -by (simp add: Ball_def permutes_def Diff_iff) metis +by (simp add: Ball_def permutes_def) metis (* ------------------------------------------------------------------------- *) (* Group properties. *) @@ -125,7 +125,7 @@ apply (rule permutes_compose[OF pS]) apply (rule permutes_swap_id, simp) using permutes_in_image[OF pS, of a] apply simp - apply (auto simp add: Ball_def Diff_iff swap_def) + apply (auto simp add: Ball_def swap_def) done lemma permutes_insert: "{p. p permutes (insert a S)} = @@ -154,7 +154,7 @@ lemma card_permutations: assumes Sn: "card S = n" and fS: "finite S" shows "card {p. p permutes S} = fact n" using fS Sn proof (induct arbitrary: n) - case empty thus ?case by (simp add: permutes_empty) + case empty thus ?case by simp next case (insert x F) { fix n assume H0: "card (insert x F) = n" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Polynomial.thy --- a/src/HOL/Library/Polynomial.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Polynomial.thy Tue May 04 20:30:22 2010 +0200 @@ -1093,10 +1093,10 @@ apply (cases "r = 0") apply (cases "r' = 0") apply (simp add: pdivmod_rel_def) -apply (simp add: pdivmod_rel_def ring_simps degree_mult_eq) +apply (simp add: pdivmod_rel_def field_simps degree_mult_eq) apply (cases "r' = 0") apply (simp add: pdivmod_rel_def degree_mult_eq) -apply (simp add: pdivmod_rel_def ring_simps) +apply (simp add: pdivmod_rel_def field_simps) apply (simp add: degree_mult_eq degree_add_less) done diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Product_plus.thy --- a/src/HOL/Library/Product_plus.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Product_plus.thy Tue May 04 20:30:22 2010 +0200 @@ -112,4 +112,10 @@ instance "*" :: (ab_group_add, ab_group_add) ab_group_add by default (simp_all add: expand_prod_eq) +lemma fst_setsum: "fst (\x\A. f x) = (\x\A. fst (f x))" +by (cases "finite A", induct set: finite, simp_all) + +lemma snd_setsum: "snd (\x\A. f x) = (\x\A. snd (f x))" +by (cases "finite A", induct set: finite, simp_all) + end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/Sum_Of_Squares/sum_of_squares.ML --- a/src/HOL/Library/Sum_Of_Squares/sum_of_squares.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/Sum_Of_Squares/sum_of_squares.ML Tue May 04 20:30:22 2010 +0200 @@ -1282,9 +1282,9 @@ fun simple_cterm_ord t u = Term_Ord.fast_term_ord (term_of t, term_of u) = LESS val concl = Thm.dest_arg o cprop_of val shuffle1 = - fconv_rule (rewr_conv @{lemma "(a + x == y) == (x == y - (a::real))" by (atomize (full)) (simp add: ring_simps) }) + fconv_rule (rewr_conv @{lemma "(a + x == y) == (x == y - (a::real))" by (atomize (full)) (simp add: field_simps) }) val shuffle2 = - fconv_rule (rewr_conv @{lemma "(x + a == y) == (x == y - (a::real))" by (atomize (full)) (simp add: ring_simps)}) + fconv_rule (rewr_conv @{lemma "(x + a == y) == (x == y - (a::real))" by (atomize (full)) (simp add: field_simps)}) fun substitutable_monomial fvs tm = case term_of tm of Free(_,@{typ real}) => if not (member (op aconvc) fvs tm) then (Rat.one,tm) else raise Failure "substitutable_monomial" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Library/normarith.ML --- a/src/HOL/Library/normarith.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Library/normarith.ML Tue May 04 20:30:22 2010 +0200 @@ -395,7 +395,7 @@ fun init_conv ctxt = Simplifier.rewrite (Simplifier.context ctxt - (HOL_basic_ss addsimps ([(*@{thm vec_0}, @{thm vec_1},*) @{thm vector_dist_norm}, @{thm diff_0_right}, @{thm right_minus}, @{thm diff_self}, @{thm norm_zero}] @ @{thms arithmetic_simps} @ @{thms norm_pths}))) + (HOL_basic_ss addsimps ([(*@{thm vec_0}, @{thm vec_1},*) @{thm dist_norm}, @{thm diff_0_right}, @{thm right_minus}, @{thm diff_self}, @{thm norm_zero}] @ @{thms arithmetic_simps} @ @{thms norm_pths}))) then_conv field_comp_conv then_conv nnf_conv diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Limits.thy --- a/src/HOL/Limits.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Limits.thy Tue May 04 20:30:22 2010 +0200 @@ -11,62 +11,65 @@ subsection {* Nets *} text {* - A net is now defined as a filter base. - The definition also allows non-proper filter bases. + A net is now defined simply as a filter on a set. + The definition also allows non-proper filters. *} +locale is_filter = + fixes net :: "('a \ bool) \ bool" + assumes True: "net (\x. True)" + assumes conj: "net (\x. P x) \ net (\x. Q x) \ net (\x. P x \ Q x)" + assumes mono: "\x. P x \ Q x \ net (\x. P x) \ net (\x. Q x)" + typedef (open) 'a net = - "{net :: 'a set set. (\A. A \ net) - \ (\A\net. \B\net. \C\net. C \ A \ C \ B)}" + "{net :: ('a \ bool) \ bool. is_filter net}" proof - show "UNIV \ ?net" by auto + show "(\x. True) \ ?net" by (auto intro: is_filter.intro) qed -lemma Rep_net_nonempty: "\A. A \ Rep_net net" -using Rep_net [of net] by simp - -lemma Rep_net_directed: - "A \ Rep_net net \ B \ Rep_net net \ \C\Rep_net net. C \ A \ C \ B" +lemma is_filter_Rep_net: "is_filter (Rep_net net)" using Rep_net [of net] by simp lemma Abs_net_inverse': - assumes "\A. A \ net" - assumes "\A B. A \ net \ B \ net \ \C\net. C \ A \ C \ B" - shows "Rep_net (Abs_net net) = net" + assumes "is_filter net" shows "Rep_net (Abs_net net) = net" using assms by (simp add: Abs_net_inverse) -lemma image_nonempty: "\x. x \ A \ \x. x \ f ` A" -by auto - subsection {* Eventually *} definition eventually :: "('a \ bool) \ 'a net \ bool" where - [code del]: "eventually P net \ (\A\Rep_net net. \x\A. P x)" + [code del]: "eventually P net \ Rep_net net P" + +lemma eventually_Abs_net: + assumes "is_filter net" shows "eventually P (Abs_net net) = net P" +unfolding eventually_def using assms by (simp add: Abs_net_inverse) + +lemma expand_net_eq: + shows "net = net' \ (\P. eventually P net = eventually P net')" +unfolding Rep_net_inject [symmetric] expand_fun_eq eventually_def .. lemma eventually_True [simp]: "eventually (\x. True) net" -unfolding eventually_def using Rep_net_nonempty [of net] by fast +unfolding eventually_def +by (rule is_filter.True [OF is_filter_Rep_net]) + +lemma always_eventually: "\x. P x \ eventually P net" +proof - + assume "\x. P x" hence "P = (\x. True)" by (simp add: ext) + thus "eventually P net" by simp +qed lemma eventually_mono: "(\x. P x \ Q x) \ eventually P net \ eventually Q net" -unfolding eventually_def by blast +unfolding eventually_def +by (rule is_filter.mono [OF is_filter_Rep_net]) lemma eventually_conj: assumes P: "eventually (\x. P x) net" assumes Q: "eventually (\x. Q x) net" shows "eventually (\x. P x \ Q x) net" -proof - - obtain A where A: "A \ Rep_net net" "\x\A. P x" - using P unfolding eventually_def by fast - obtain B where B: "B \ Rep_net net" "\x\B. Q x" - using Q unfolding eventually_def by fast - obtain C where C: "C \ Rep_net net" "C \ A" "C \ B" - using Rep_net_directed [OF A(1) B(1)] by fast - then have "\x\C. P x \ Q x" "C \ Rep_net net" - using A(2) B(2) by auto - then show ?thesis unfolding eventually_def .. -qed +using assms unfolding eventually_def +by (rule is_filter.conj [OF is_filter_Rep_net]) lemma eventually_mp: assumes "eventually (\x. P x \ Q x) net" @@ -102,60 +105,196 @@ using assms by (auto elim!: eventually_rev_mp) +subsection {* Finer-than relation *} + +text {* @{term "net \ net'"} means that @{term net} is finer than +@{term net'}. *} + +instantiation net :: (type) complete_lattice +begin + +definition + le_net_def [code del]: + "net \ net' \ (\P. eventually P net' \ eventually P net)" + +definition + less_net_def [code del]: + "(net :: 'a net) < net' \ net \ net' \ \ net' \ net" + +definition + top_net_def [code del]: + "top = Abs_net (\P. \x. P x)" + +definition + bot_net_def [code del]: + "bot = Abs_net (\P. True)" + +definition + sup_net_def [code del]: + "sup net net' = Abs_net (\P. eventually P net \ eventually P net')" + +definition + inf_net_def [code del]: + "inf a b = Abs_net + (\P. \Q R. eventually Q a \ eventually R b \ (\x. Q x \ R x \ P x))" + +definition + Sup_net_def [code del]: + "Sup A = Abs_net (\P. \net\A. eventually P net)" + +definition + Inf_net_def [code del]: + "Inf A = Sup {x::'a net. \y\A. x \ y}" + +lemma eventually_top [simp]: "eventually P top \ (\x. P x)" +unfolding top_net_def +by (rule eventually_Abs_net, rule is_filter.intro, auto) + +lemma eventually_bot [simp]: "eventually P bot" +unfolding bot_net_def +by (subst eventually_Abs_net, rule is_filter.intro, auto) + +lemma eventually_sup: + "eventually P (sup net net') \ eventually P net \ eventually P net'" +unfolding sup_net_def +by (rule eventually_Abs_net, rule is_filter.intro) + (auto elim!: eventually_rev_mp) + +lemma eventually_inf: + "eventually P (inf a b) \ + (\Q R. eventually Q a \ eventually R b \ (\x. Q x \ R x \ P x))" +unfolding inf_net_def +apply (rule eventually_Abs_net, rule is_filter.intro) +apply (fast intro: eventually_True) +apply clarify +apply (intro exI conjI) +apply (erule (1) eventually_conj) +apply (erule (1) eventually_conj) +apply simp +apply auto +done + +lemma eventually_Sup: + "eventually P (Sup A) \ (\net\A. eventually P net)" +unfolding Sup_net_def +apply (rule eventually_Abs_net, rule is_filter.intro) +apply (auto intro: eventually_conj elim!: eventually_rev_mp) +done + +instance proof + fix x y :: "'a net" show "x < y \ x \ y \ \ y \ x" + by (rule less_net_def) +next + fix x :: "'a net" show "x \ x" + unfolding le_net_def by simp +next + fix x y z :: "'a net" assume "x \ y" and "y \ z" thus "x \ z" + unfolding le_net_def by simp +next + fix x y :: "'a net" assume "x \ y" and "y \ x" thus "x = y" + unfolding le_net_def expand_net_eq by fast +next + fix x :: "'a net" show "x \ top" + unfolding le_net_def eventually_top by (simp add: always_eventually) +next + fix x :: "'a net" show "bot \ x" + unfolding le_net_def by simp +next + fix x y :: "'a net" show "x \ sup x y" and "y \ sup x y" + unfolding le_net_def eventually_sup by simp_all +next + fix x y z :: "'a net" assume "x \ z" and "y \ z" thus "sup x y \ z" + unfolding le_net_def eventually_sup by simp +next + fix x y :: "'a net" show "inf x y \ x" and "inf x y \ y" + unfolding le_net_def eventually_inf by (auto intro: eventually_True) +next + fix x y z :: "'a net" assume "x \ y" and "x \ z" thus "x \ inf y z" + unfolding le_net_def eventually_inf + by (auto elim!: eventually_mono intro: eventually_conj) +next + fix x :: "'a net" and A assume "x \ A" thus "x \ Sup A" + unfolding le_net_def eventually_Sup by simp +next + fix A and y :: "'a net" assume "\x. x \ A \ x \ y" thus "Sup A \ y" + unfolding le_net_def eventually_Sup by simp +next + fix z :: "'a net" and A assume "z \ A" thus "Inf A \ z" + unfolding le_net_def Inf_net_def eventually_Sup Ball_def by simp +next + fix A and x :: "'a net" assume "\y. y \ A \ x \ y" thus "x \ Inf A" + unfolding le_net_def Inf_net_def eventually_Sup Ball_def by simp +qed + +end + +lemma net_leD: + "net \ net' \ eventually P net' \ eventually P net" +unfolding le_net_def by simp + +lemma net_leI: + "(\P. eventually P net' \ eventually P net) \ net \ net'" +unfolding le_net_def by simp + +lemma eventually_False: + "eventually (\x. False) net \ net = bot" +unfolding expand_net_eq by (auto elim: eventually_rev_mp) + + subsection {* Standard Nets *} definition - sequentially :: "nat net" where - [code del]: "sequentially = Abs_net (range (\n. {n..}))" - -definition - within :: "'a net \ 'a set \ 'a net" (infixr "within" 70) where - [code del]: "net within S = Abs_net ((\A. A \ S) ` Rep_net net)" + sequentially :: "nat net" +where [code del]: + "sequentially = Abs_net (\P. \k. \n\k. P n)" definition - at :: "'a::topological_space \ 'a net" where - [code del]: "at a = Abs_net ((\S. S - {a}) ` {S. open S \ a \ S})" - -lemma Rep_net_sequentially: - "Rep_net sequentially = range (\n. {n..})" -unfolding sequentially_def -apply (rule Abs_net_inverse') -apply (rule image_nonempty, simp) -apply (clarsimp, rename_tac m n) -apply (rule_tac x="max m n" in exI, auto) -done + within :: "'a net \ 'a set \ 'a net" (infixr "within" 70) +where [code del]: + "net within S = Abs_net (\P. eventually (\x. x \ S \ P x) net)" -lemma Rep_net_within: - "Rep_net (net within S) = (\A. A \ S) ` Rep_net net" -unfolding within_def -apply (rule Abs_net_inverse') -apply (rule image_nonempty, rule Rep_net_nonempty) -apply (clarsimp, rename_tac A B) -apply (drule (1) Rep_net_directed) -apply (clarify, rule_tac x=C in bexI, auto) -done - -lemma Rep_net_at: - "Rep_net (at a) = ((\S. S - {a}) ` {S. open S \ a \ S})" -unfolding at_def -apply (rule Abs_net_inverse') -apply (rule image_nonempty) -apply (rule_tac x="UNIV" in exI, simp) -apply (clarsimp, rename_tac S T) -apply (rule_tac x="S \ T" in exI, auto simp add: open_Int) -done +definition + at :: "'a::topological_space \ 'a net" +where [code del]: + "at a = Abs_net (\P. \S. open S \ a \ S \ (\x\S. x \ a \ P x))" lemma eventually_sequentially: "eventually P sequentially \ (\N. \n\N. P n)" -unfolding eventually_def Rep_net_sequentially by auto +unfolding sequentially_def +proof (rule eventually_Abs_net, rule is_filter.intro) + fix P Q :: "nat \ bool" + assume "\i. \n\i. P n" and "\j. \n\j. Q n" + then obtain i j where "\n\i. P n" and "\n\j. Q n" by auto + then have "\n\max i j. P n \ Q n" by simp + then show "\k. \n\k. P n \ Q n" .. +qed auto lemma eventually_within: "eventually P (net within S) = eventually (\x. x \ S \ P x) net" -unfolding eventually_def Rep_net_within by auto +unfolding within_def +by (rule eventually_Abs_net, rule is_filter.intro) + (auto elim!: eventually_rev_mp) + +lemma within_UNIV: "net within UNIV = net" + unfolding expand_net_eq eventually_within by simp lemma eventually_at_topological: "eventually P (at a) \ (\S. open S \ a \ S \ (\x\S. x \ a \ P x))" -unfolding eventually_def Rep_net_at by auto +unfolding at_def +proof (rule eventually_Abs_net, rule is_filter.intro) + have "open UNIV \ a \ UNIV \ (\x\UNIV. x \ a \ True)" by simp + thus "\S. open S \ a \ S \ (\x\S. x \ a \ True)" by - rule +next + fix P Q + assume "\S. open S \ a \ S \ (\x\S. x \ a \ P x)" + and "\T. open T \ a \ T \ (\x\T. x \ a \ Q x)" + then obtain S T where + "open S \ a \ S \ (\x\S. x \ a \ P x)" + "open T \ a \ T \ (\x\T. x \ a \ Q x)" by auto + hence "open (S \ T) \ a \ S \ T \ (\x\(S \ T). x \ a \ P x \ Q x)" + by (simp add: open_Int) + thus "\S. open S \ a \ S \ (\x\S. x \ a \ P x \ Q x)" by - rule +qed auto lemma eventually_at: fixes a :: "'a::metric_space" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/List.thy --- a/src/HOL/List.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/List.thy Tue May 04 20:30:22 2010 +0200 @@ -3039,6 +3039,9 @@ lemma length_replicate [simp]: "length (replicate n x) = n" by (induct n) auto +lemma Ex_list_of_length: "\xs. length xs = n" +by (rule exI[of _ "replicate n undefined"]) simp + lemma map_replicate [simp]: "map f (replicate n x) = replicate n (f x)" by (induct n) auto diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Log.thy --- a/src/HOL/Log.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Log.thy Tue May 04 20:30:22 2010 +0200 @@ -145,6 +145,21 @@ apply (drule_tac a = "log a x" in powr_less_mono, auto) done +lemma log_inj: assumes "1 < b" shows "inj_on (log b) {0 <..}" +proof (rule inj_onI, simp) + fix x y assume pos: "0 < x" "0 < y" and *: "log b x = log b y" + show "x = y" + proof (cases rule: linorder_cases) + assume "x < y" hence "log b x < log b y" + using log_less_cancel_iff[OF `1 < b`] pos by simp + thus ?thesis using * by simp + next + assume "y < x" hence "log b y < log b x" + using log_less_cancel_iff[OF `1 < b`] pos by simp + thus ?thesis using * by simp + qed simp +qed + lemma log_le_cancel_iff [simp]: "[| 1 < a; 0 < x; 0 < y |] ==> (log a x \ log a y) = (x \ y)" by (simp add: linorder_not_less [symmetric]) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Matrix/cplex/matrixlp.ML --- a/src/HOL/Matrix/cplex/matrixlp.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Matrix/cplex/matrixlp.ML Tue May 04 20:30:22 2010 +0200 @@ -81,7 +81,7 @@ fun matrix_simplify th = let val simp_th = matrix_compute (cprop_of th) - val th = strip_shyps (equal_elim simp_th th) + val th = Thm.strip_shyps (equal_elim simp_th th) fun removeTrue th = removeTrue (implies_elim th TrueI) handle _ => th (* FIXME avoid handle _ *) in removeTrue th diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/Abstraction.thy --- a/src/HOL/Metis_Examples/Abstraction.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/Abstraction.thy Tue May 04 20:30:22 2010 +0200 @@ -23,13 +23,11 @@ declare [[ atp_problem_prefix = "Abstraction__Collect_triv" ]] lemma (*Collect_triv:*) "a \ {x. P x} ==> P a" -proof (neg_clausify) -assume 0: "(a\'a\type) \ Collect (P\'a\type \ bool)" -assume 1: "\ (P\'a\type \ bool) (a\'a\type)" -have 2: "(P\'a\type \ bool) (a\'a\type)" - by (metis CollectD 0) -show "False" - by (metis 2 1) +proof - + assume "a \ {x. P x}" + hence "a \ P" by (metis Collect_def) + hence "P a" by (metis mem_def) + thus "P a" by metis qed lemma Collect_triv: "a \ {x. P x} ==> P a" @@ -38,76 +36,52 @@ declare [[ atp_problem_prefix = "Abstraction__Collect_mp" ]] lemma "a \ {x. P x --> Q x} ==> a \ {x. P x} ==> a \ {x. Q x}" - by (metis CollectI Collect_imp_eq ComplD UnE mem_Collect_eq); - --{*34 secs*} + by (metis Collect_imp_eq ComplD UnE) declare [[ atp_problem_prefix = "Abstraction__Sigma_triv" ]] lemma "(a,b) \ Sigma A B ==> a \ A & b \ B a" -proof (neg_clausify) -assume 0: "(a\'a\type, b\'b\type) \ Sigma (A\'a\type set) (B\'a\type \ 'b\type set)" -assume 1: "(a\'a\type) \ (A\'a\type set) \ (b\'b\type) \ (B\'a\type \ 'b\type set) a" -have 2: "(a\'a\type) \ (A\'a\type set)" - by (metis SigmaD1 0) -have 3: "(b\'b\type) \ (B\'a\type \ 'b\type set) (a\'a\type)" - by (metis SigmaD2 0) -have 4: "(b\'b\type) \ (B\'a\type \ 'b\type set) (a\'a\type)" - by (metis 1 2) -show "False" - by (metis 3 4) +proof - + assume A1: "(a, b) \ Sigma A B" + hence F1: "b \ B a" by (metis mem_Sigma_iff) + have F2: "a \ A" by (metis A1 mem_Sigma_iff) + have "b \ B a" by (metis F1) + thus "a \ A \ b \ B a" by (metis F2) qed lemma Sigma_triv: "(a,b) \ Sigma A B ==> a \ A & b \ B a" by (metis SigmaD1 SigmaD2) declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect" ]] -lemma "(a,b) \ (SIGMA x: A. {y. x = f y}) ==> a \ A & a = f b" -(*???metis says this is satisfiable! +lemma "(a, b) \ (SIGMA x:A. {y. x = f y}) \ a \ A \ a = f b" +(* Metis says this is satisfiable! by (metis CollectD SigmaD1 SigmaD2) *) by (meson CollectD SigmaD1 SigmaD2) -(*single-step*) -lemma "(a,b) \ (SIGMA x: A. {y. x = f y}) ==> a \ A & a = f b" -by (metis SigmaD1 SigmaD2 insert_def singleton_conv2 Un_empty_right vimage_Collect_eq vimage_def vimage_singleton_eq) - +lemma "(a, b) \ (SIGMA x:A. {y. x = f y}) \ a \ A \ a = f b" +by (metis mem_Sigma_iff singleton_conv2 vimage_Collect_eq vimage_singleton_eq) -lemma "(a,b) \ (SIGMA x: A. {y. x = f y}) ==> a \ A & a = f b" -proof (neg_clausify) -assume 0: "(a\'a\type, b\'b\type) -\ Sigma (A\'a\type set) - (COMBB Collect (COMBC (COMBB COMBB op =) (f\'b\type \ 'a\type)))" -assume 1: "(a\'a\type) \ (A\'a\type set) \ a \ (f\'b\type \ 'a\type) (b\'b\type)" -have 2: "(a\'a\type) \ (A\'a\type set)" - by (metis 0 SigmaD1) -have 3: "(b\'b\type) -\ COMBB Collect (COMBC (COMBB COMBB op =) (f\'b\type \ 'a\type)) (a\'a\type)" - by (metis 0 SigmaD2) -have 4: "(b\'b\type) \ Collect (COMBB (op = (a\'a\type)) (f\'b\type \ 'a\type))" - by (metis 3) -have 5: "(f\'b\type \ 'a\type) (b\'b\type) \ (a\'a\type)" - by (metis 1 2) -have 6: "(f\'b\type \ 'a\type) (b\'b\type) = (a\'a\type)" - by (metis 4 vimage_singleton_eq insert_def singleton_conv2 Un_empty_right vimage_Collect_eq vimage_def) -show "False" - by (metis 5 6) +lemma "(a, b) \ (SIGMA x:A. {y. x = f y}) \ a \ A \ a = f b" +proof - + assume A1: "(a, b) \ (SIGMA x:A. {y. x = f y})" + have F1: "\u. {u} = op = u" by (metis singleton_conv2 Collect_def) + have F2: "\y w v. v \ w -` op = y \ w v = y" + by (metis F1 vimage_singleton_eq) + have F3: "\x w. (\R. w (x R)) = x -` w" + by (metis vimage_Collect_eq Collect_def) + show "a \ A \ a = f b" by (metis A1 F2 F3 mem_Sigma_iff Collect_def) qed -(*Alternative structured proof, untyped*) -lemma "(a,b) \ (SIGMA x: A. {y. x = f y}) ==> a \ A & a = f b" -proof (neg_clausify) -assume 0: "(a, b) \ Sigma A (COMBB Collect (COMBC (COMBB COMBB op =) f))" -have 1: "b \ Collect (COMBB (op = a) f)" - by (metis 0 SigmaD2) -have 2: "f b = a" - by (metis 1 vimage_Collect_eq singleton_conv2 insert_def Un_empty_right vimage_singleton_eq vimage_def) -assume 3: "a \ A \ a \ f b" -have 4: "a \ A" - by (metis 0 SigmaD1) -have 5: "f b \ a" - by (metis 4 3) -show "False" - by (metis 5 2) +(* Alternative structured proof *) +lemma "(a, b) \ (SIGMA x:A. {y. x = f y}) \ a \ A \ a = f b" +proof - + assume A1: "(a, b) \ (SIGMA x:A. {y. x = f y})" + hence F1: "a \ A" by (metis mem_Sigma_iff) + have "b \ {R. a = f R}" by (metis A1 mem_Sigma_iff) + hence F2: "b \ (\R. a = f R)" by (metis Collect_def) + hence "a = f b" by (unfold mem_def) + thus "a \ A \ a = f b" by (metis F1) qed @@ -116,56 +90,40 @@ by (metis Collect_mem_eq SigmaD2) lemma "(cl,f) \ CLF ==> CLF = (SIGMA cl: CL.{f. f \ pset cl}) ==> f \ pset cl" -proof (neg_clausify) -assume 0: "(cl, f) \ CLF" -assume 1: "CLF = Sigma CL (COMBB Collect (COMBB (COMBC op \) pset))" -assume 2: "f \ pset cl" -have 3: "\X1 X2. X2 \ COMBB Collect (COMBB (COMBC op \) pset) X1 \ (X1, X2) \ CLF" - by (metis SigmaD2 1) -have 4: "\X1 X2. X2 \ pset X1 \ (X1, X2) \ CLF" - by (metis 3 Collect_mem_eq) -have 5: "(cl, f) \ CLF" - by (metis 2 4) -show "False" - by (metis 5 0) +proof - + assume A1: "(cl, f) \ CLF" + assume A2: "CLF = (SIGMA cl:CL. {f. f \ pset cl})" + have F1: "\v. (\R. R \ v) = v" by (metis Collect_mem_eq Collect_def) + have "\v u. (u, v) \ CLF \ v \ {R. R \ pset u}" by (metis A2 mem_Sigma_iff) + hence "\v u. (u, v) \ CLF \ v \ pset u" by (metis F1 Collect_def) + hence "f \ pset cl" by (metis A1) + thus "f \ pset cl" by metis qed declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect_Pi" ]] lemma "(cl,f) \ (SIGMA cl: CL. {f. f \ pset cl \ pset cl}) ==> f \ pset cl \ pset cl" -proof (neg_clausify) -assume 0: "f \ Pi (pset cl) (COMBK (pset cl))" -assume 1: "(cl, f) -\ Sigma CL - (COMBB Collect - (COMBB (COMBC op \) (COMBS (COMBB Pi pset) (COMBB COMBK pset))))" -show "False" -(* by (metis 0 Collect_mem_eq SigmaD2 1) ??doesn't terminate*) - by (insert 0 1, simp add: COMBB_def COMBS_def COMBC_def) +proof - + assume A1: "(cl, f) \ (SIGMA cl:CL. {f. f \ pset cl \ pset cl})" + have F1: "\v. (\R. R \ v) = v" by (metis Collect_mem_eq Collect_def) + have "f \ {R. R \ pset cl \ pset cl}" using A1 by simp + hence "f \ pset cl \ pset cl" by (metis F1 Collect_def) + thus "f \ pset cl \ pset cl" by metis qed - declare [[ atp_problem_prefix = "Abstraction__Sigma_Collect_Int" ]] lemma "(cl,f) \ (SIGMA cl: CL. {f. f \ pset cl \ cl}) ==> f \ pset cl \ cl" -proof (neg_clausify) -assume 0: "(cl, f) -\ Sigma CL - (COMBB Collect (COMBB (COMBC op \) (COMBS (COMBB op \ pset) COMBI)))" -assume 1: "f \ pset cl \ cl" -have 2: "f \ COMBB Collect (COMBB (COMBC op \) (COMBS (COMBB op \ pset) COMBI)) cl" - by (insert 0, simp add: COMBB_def) -(* by (metis SigmaD2 0) ??doesn't terminate*) -have 3: "f \ COMBS (COMBB op \ pset) COMBI cl" - by (metis 2 Collect_mem_eq) -have 4: "f \ cl \ pset cl" - by (metis 1 Int_commute) -have 5: "f \ cl \ pset cl" - by (metis 3 Int_commute) -show "False" - by (metis 5 4) +proof - + assume A1: "(cl, f) \ (SIGMA cl:CL. {f. f \ pset cl \ cl})" + have F1: "\v. (\R. R \ v) = v" by (metis Collect_mem_eq Collect_def) + have "f \ {R. R \ pset cl \ cl}" using A1 by simp + hence "f \ Id_on cl `` pset cl" by (metis F1 Int_commute Image_Id_on Collect_def) + hence "f \ Id_on cl `` pset cl" by metis + hence "f \ cl \ pset cl" by (metis Image_Id_on) + thus "f \ pset cl \ cl" by (metis Int_commute) qed @@ -181,19 +139,13 @@ f \ pset cl \ cl" by auto -(*??no longer terminates, with combinators -by (metis Collect_mem_eq Int_def SigmaD2 UnCI Un_absorb1) - --{*@{text Int_def} is redundant*} -*) declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Int" ]] lemma "(cl,f) \ CLF ==> CLF = (SIGMA cl: CL. {f. f \ pset cl \ cl}) ==> f \ pset cl \ cl" by auto -(*??no longer terminates, with combinators -by (metis Collect_mem_eq Int_commute SigmaD2) -*) + declare [[ atp_problem_prefix = "Abstraction__CLF_subset_Collect_Pi" ]] lemma @@ -201,9 +153,7 @@ CLF \ (SIGMA cl': CL. {f. f \ pset cl' \ pset cl'}) ==> f \ pset cl \ pset cl" by fast -(*??no longer terminates, with combinators -by (metis Collect_mem_eq SigmaD2 subsetD) -*) + declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Pi" ]] lemma @@ -211,9 +161,7 @@ CLF = (SIGMA cl: CL. {f. f \ pset cl \ pset cl}) ==> f \ pset cl \ pset cl" by auto -(*??no longer terminates, with combinators -by (metis Collect_mem_eq SigmaD2 contra_subsetD equalityE) -*) + declare [[ atp_problem_prefix = "Abstraction__CLF_eq_Collect_Pi_mono" ]] lemma @@ -225,37 +173,33 @@ declare [[ atp_problem_prefix = "Abstraction__map_eq_zipA" ]] lemma "map (%x. (f x, g x)) xs = zip (map f xs) (map g xs)" apply (induct xs) -(*sledgehammer*) -apply auto -done + apply (metis map_is_Nil_conv zip.simps(1)) +by auto declare [[ atp_problem_prefix = "Abstraction__map_eq_zipB" ]] lemma "map (%w. (w -> w, w \ w)) xs = zip (map (%w. w -> w) xs) (map (%w. w \ w) xs)" apply (induct xs) -(*sledgehammer*) -apply auto -done + apply (metis Nil_is_map_conv zip_Nil) +by auto declare [[ atp_problem_prefix = "Abstraction__image_evenA" ]] -lemma "(%x. Suc(f x)) ` {x. even x} <= A ==> (\x. even x --> Suc(f x) \ A)"; -(*sledgehammer*) -by auto +lemma "(%x. Suc(f x)) ` {x. even x} <= A ==> (\x. even x --> Suc(f x) \ A)" +by (metis Collect_def image_subset_iff mem_def) declare [[ atp_problem_prefix = "Abstraction__image_evenB" ]] lemma "(%x. f (f x)) ` ((%x. Suc(f x)) ` {x. even x}) <= A ==> (\x. even x --> f (f (Suc(f x))) \ A)"; -(*sledgehammer*) -by auto +by (metis Collect_def imageI image_image image_subset_iff mem_def) declare [[ atp_problem_prefix = "Abstraction__image_curry" ]] lemma "f \ (%u v. b \ u \ v) ` A ==> \u v. P (b \ u \ v) ==> P(f y)" -(*sledgehammer*) +(*sledgehammer*) by auto declare [[ atp_problem_prefix = "Abstraction__image_TimesA" ]] lemma image_TimesA: "(%(x,y). (f x, g y)) ` (A \ B) = (f`A) \ (g`B)" -(*sledgehammer*) +(*sledgehammer*) apply (rule equalityI) (***Even the two inclusions are far too difficult using [[ atp_problem_prefix = "Abstraction__image_TimesA_simpler"]] @@ -283,15 +227,15 @@ declare [[ atp_problem_prefix = "Abstraction__image_TimesB" ]] lemma image_TimesB: - "(%(x,y,z). (f x, g y, h z)) ` (A \ B \ C) = (f`A) \ (g`B) \ (h`C)" -(*sledgehammer*) + "(%(x,y,z). (f x, g y, h z)) ` (A \ B \ C) = (f`A) \ (g`B) \ (h`C)" +(*sledgehammer*) by force declare [[ atp_problem_prefix = "Abstraction__image_TimesC" ]] lemma image_TimesC: "(%(x,y). (x \ x, y \ y)) ` (A \ B) = ((%x. x \ x) ` A) \ ((%y. y \ y) ` B)" -(*sledgehammer*) +(*sledgehammer*) by auto end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/BT.thy --- a/src/HOL/Metis_Examples/BT.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/BT.thy Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ (* Title: HOL/MetisTest/BT.thy Author: Lawrence C Paulson, Cambridge University Computer Laboratory + Author: Jasmin Blanchette, TU Muenchen Testing the metis method *) @@ -10,7 +11,6 @@ imports Main begin - datatype 'a bt = Lf | Br 'a "'a bt" "'a bt" @@ -66,178 +66,223 @@ text {* \medskip BT simplification *} declare [[ atp_problem_prefix = "BT__n_leaves_reflect" ]] + lemma n_leaves_reflect: "n_leaves (reflect t) = n_leaves t" - apply (induct t) - apply (metis add_right_cancel n_leaves.simps(1) reflect.simps(1)) - apply (metis add_commute n_leaves.simps(2) reflect.simps(2)) - done +proof (induct t) + case Lf thus ?case + proof - + let "?p\<^isub>1 x\<^isub>1" = "x\<^isub>1 \ n_leaves (reflect (Lf::'a bt))" + have "\ ?p\<^isub>1 (Suc 0)" by (metis reflect.simps(1) n_leaves.simps(1)) + hence "\ ?p\<^isub>1 (n_leaves (Lf::'a bt))" by (metis n_leaves.simps(1)) + thus "n_leaves (reflect (Lf::'a bt)) = n_leaves (Lf::'a bt)" by metis + qed +next + case (Br a t1 t2) thus ?case + by (metis n_leaves.simps(2) nat_add_commute reflect.simps(2)) +qed declare [[ atp_problem_prefix = "BT__n_nodes_reflect" ]] + lemma n_nodes_reflect: "n_nodes (reflect t) = n_nodes t" - apply (induct t) - apply (metis reflect.simps(1)) - apply (metis n_nodes.simps(2) nat_add_commute reflect.simps(2)) - done +proof (induct t) + case Lf thus ?case by (metis reflect.simps(1)) +next + case (Br a t1 t2) thus ?case + by (metis class_semiring.semiring_rules(24) n_nodes.simps(2) reflect.simps(2)) +qed declare [[ atp_problem_prefix = "BT__depth_reflect" ]] + lemma depth_reflect: "depth (reflect t) = depth t" - apply (induct t) - apply (metis depth.simps(1) reflect.simps(1)) - apply (metis depth.simps(2) min_max.sup_commute reflect.simps(2)) - done +apply (induct t) + apply (metis depth.simps(1) reflect.simps(1)) +by (metis depth.simps(2) min_max.inf_sup_aci(5) reflect.simps(2)) text {* - The famous relationship between the numbers of leaves and nodes. +The famous relationship between the numbers of leaves and nodes. *} declare [[ atp_problem_prefix = "BT__n_leaves_nodes" ]] + lemma n_leaves_nodes: "n_leaves t = Suc (n_nodes t)" - apply (induct t) - apply (metis n_leaves.simps(1) n_nodes.simps(1)) - apply auto - done +apply (induct t) + apply (metis n_leaves.simps(1) n_nodes.simps(1)) +by auto declare [[ atp_problem_prefix = "BT__reflect_reflect_ident" ]] + lemma reflect_reflect_ident: "reflect (reflect t) = t" - apply (induct t) - apply (metis add_right_cancel reflect.simps(1)); - apply (metis reflect.simps(2)) - done +apply (induct t) + apply (metis reflect.simps(1)) +proof - + fix a :: 'a and t1 :: "'a bt" and t2 :: "'a bt" + assume A1: "reflect (reflect t1) = t1" + assume A2: "reflect (reflect t2) = t2" + have "\V U. reflect (Br U V (reflect t1)) = Br U t1 (reflect V)" + using A1 by (metis reflect.simps(2)) + hence "\V U. Br U t1 (reflect (reflect V)) = reflect (reflect (Br U t1 V))" + by (metis reflect.simps(2)) + hence "\U. reflect (reflect (Br U t1 t2)) = Br U t1 t2" + using A2 by metis + thus "reflect (reflect (Br a t1 t2)) = Br a t1 t2" by blast +qed declare [[ atp_problem_prefix = "BT__bt_map_ident" ]] + lemma bt_map_ident: "bt_map (%x. x) = (%y. y)" apply (rule ext) apply (induct_tac y) - apply (metis bt_map.simps(1)) -txt{*BUG involving flex-flex pairs*} -(* apply (metis bt_map.simps(2)) *) -apply auto -done - + apply (metis bt_map.simps(1)) +by (metis bt_map.simps(2)) declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]] + lemma bt_map_appnd: "bt_map f (appnd t u) = appnd (bt_map f t) (bt_map f u)" apply (induct t) - apply (metis appnd.simps(1) bt_map.simps(1)) - apply (metis appnd.simps(2) bt_map.simps(2)) (*slow!!*) -done - + apply (metis appnd.simps(1) bt_map.simps(1)) +by (metis appnd.simps(2) bt_map.simps(2)) declare [[ atp_problem_prefix = "BT__bt_map_compose" ]] + lemma bt_map_compose: "bt_map (f o g) t = bt_map f (bt_map g t)" -apply (induct t) - apply (metis bt_map.simps(1)) -txt{*Metis runs forever*} -(* apply (metis bt_map.simps(2) o_apply)*) -apply auto -done - +apply (induct t) + apply (metis bt_map.simps(1)) +by (metis bt_map.simps(2) o_eq_dest_lhs) declare [[ atp_problem_prefix = "BT__bt_map_reflect" ]] + lemma bt_map_reflect: "bt_map f (reflect t) = reflect (bt_map f t)" - apply (induct t) - apply (metis add_right_cancel bt_map.simps(1) reflect.simps(1)) - apply (metis add_right_cancel bt_map.simps(2) reflect.simps(2)) - done +apply (induct t) + apply (metis bt_map.simps(1) reflect.simps(1)) +by (metis bt_map.simps(2) reflect.simps(2)) declare [[ atp_problem_prefix = "BT__preorder_bt_map" ]] + lemma preorder_bt_map: "preorder (bt_map f t) = map f (preorder t)" - apply (induct t) - apply (metis bt_map.simps(1) map.simps(1) preorder.simps(1)) - apply simp - done +apply (induct t) + apply (metis bt_map.simps(1) map.simps(1) preorder.simps(1)) +by simp declare [[ atp_problem_prefix = "BT__inorder_bt_map" ]] + lemma inorder_bt_map: "inorder (bt_map f t) = map f (inorder t)" - apply (induct t) - apply (metis bt_map.simps(1) inorder.simps(1) map.simps(1)) - apply simp - done +proof (induct t) + case Lf thus ?case + proof - + have "map f [] = []" by (metis map.simps(1)) + hence "map f [] = inorder Lf" by (metis inorder.simps(1)) + hence "inorder (bt_map f Lf) = map f []" by (metis bt_map.simps(1)) + thus "inorder (bt_map f Lf) = map f (inorder Lf)" by (metis inorder.simps(1)) + qed +next + case (Br a t1 t2) thus ?case by simp +qed declare [[ atp_problem_prefix = "BT__postorder_bt_map" ]] + lemma postorder_bt_map: "postorder (bt_map f t) = map f (postorder t)" - apply (induct t) - apply (metis bt_map.simps(1) map.simps(1) postorder.simps(1)) - apply simp - done +apply (induct t) + apply (metis Nil_is_map_conv bt_map.simps(1) postorder.simps(1)) +by simp declare [[ atp_problem_prefix = "BT__depth_bt_map" ]] + lemma depth_bt_map [simp]: "depth (bt_map f t) = depth t" - apply (induct t) - apply (metis bt_map.simps(1) depth.simps(1)) - apply simp - done +apply (induct t) + apply (metis bt_map.simps(1) depth.simps(1)) +by simp declare [[ atp_problem_prefix = "BT__n_leaves_bt_map" ]] + lemma n_leaves_bt_map [simp]: "n_leaves (bt_map f t) = n_leaves t" - apply (induct t) - apply (metis One_nat_def Suc_eq_plus1 bt_map.simps(1) less_add_one less_antisym linorder_neq_iff n_leaves.simps(1)) - apply (metis bt_map.simps(2) n_leaves.simps(2)) - done - +apply (induct t) + apply (metis bt_map.simps(1) n_leaves.simps(1)) +proof - + fix a :: 'b and t1 :: "'b bt" and t2 :: "'b bt" + assume A1: "n_leaves (bt_map f t1) = n_leaves t1" + assume A2: "n_leaves (bt_map f t2) = n_leaves t2" + have "\V U. n_leaves (Br U (bt_map f t1) V) = n_leaves t1 + n_leaves V" + using A1 by (metis n_leaves.simps(2)) + hence "\V U. n_leaves (bt_map f (Br U t1 V)) = n_leaves t1 + n_leaves (bt_map f V)" + by (metis bt_map.simps(2)) + hence F1: "\U. n_leaves (bt_map f (Br U t1 t2)) = n_leaves t1 + n_leaves t2" + using A2 by metis + have "n_leaves t1 + n_leaves t2 = n_leaves (Br a t1 t2)" + by (metis n_leaves.simps(2)) + thus "n_leaves (bt_map f (Br a t1 t2)) = n_leaves (Br a t1 t2)" + using F1 by metis +qed declare [[ atp_problem_prefix = "BT__preorder_reflect" ]] + lemma preorder_reflect: "preorder (reflect t) = rev (postorder t)" - apply (induct t) - apply (metis postorder.simps(1) preorder.simps(1) reflect.simps(1) rev_is_Nil_conv) - apply (metis append_Nil Cons_eq_append_conv postorder.simps(2) preorder.simps(2) reflect.simps(2) rev.simps(2) rev_append rev_rev_ident) - done +apply (induct t) + apply (metis Nil_is_rev_conv postorder.simps(1) preorder.simps(1) + reflect.simps(1)) +by (metis append.simps(1) append.simps(2) postorder.simps(2) preorder.simps(2) + reflect.simps(2) rev.simps(2) rev_append rev_swap) declare [[ atp_problem_prefix = "BT__inorder_reflect" ]] + lemma inorder_reflect: "inorder (reflect t) = rev (inorder t)" - apply (induct t) - apply (metis inorder.simps(1) reflect.simps(1) rev.simps(1)) - apply simp - done +apply (induct t) + apply (metis Nil_is_rev_conv inorder.simps(1) reflect.simps(1)) +by simp +(* Slow: +by (metis append.simps(1) append_eq_append_conv2 inorder.simps(2) + reflect.simps(2) rev.simps(2) rev_append) +*) declare [[ atp_problem_prefix = "BT__postorder_reflect" ]] + lemma postorder_reflect: "postorder (reflect t) = rev (preorder t)" - apply (induct t) - apply (metis postorder.simps(1) preorder.simps(1) reflect.simps(1) rev.simps(1)) - apply (metis Cons_eq_appendI postorder.simps(2) preorder.simps(2) reflect.simps(2) rev.simps(2) rev_append self_append_conv2) - done +apply (induct t) + apply (metis Nil_is_rev_conv postorder.simps(1) preorder.simps(1) + reflect.simps(1)) +by (metis preorder_reflect reflect_reflect_ident rev_swap) text {* - Analogues of the standard properties of the append function for lists. +Analogues of the standard properties of the append function for lists. *} declare [[ atp_problem_prefix = "BT__appnd_assoc" ]] -lemma appnd_assoc [simp]: - "appnd (appnd t1 t2) t3 = appnd t1 (appnd t2 t3)" - apply (induct t1) - apply (metis appnd.simps(1)) - apply (metis appnd.simps(2)) - done + +lemma appnd_assoc [simp]: "appnd (appnd t1 t2) t3 = appnd t1 (appnd t2 t3)" +apply (induct t1) + apply (metis appnd.simps(1)) +by (metis appnd.simps(2)) declare [[ atp_problem_prefix = "BT__appnd_Lf2" ]] + lemma appnd_Lf2 [simp]: "appnd t Lf = t" - apply (induct t) - apply (metis appnd.simps(1)) - apply (metis appnd.simps(2)) - done +apply (induct t) + apply (metis appnd.simps(1)) +by (metis appnd.simps(2)) + +declare max_add_distrib_left [simp] declare [[ atp_problem_prefix = "BT__depth_appnd" ]] - declare max_add_distrib_left [simp] + lemma depth_appnd [simp]: "depth (appnd t1 t2) = depth t1 + depth t2" - apply (induct t1) - apply (metis add_0 appnd.simps(1) depth.simps(1)) -apply (simp add: ); - done +apply (induct t1) + apply (metis appnd.simps(1) depth.simps(1) plus_nat.simps(1)) +by simp declare [[ atp_problem_prefix = "BT__n_leaves_appnd" ]] + lemma n_leaves_appnd [simp]: "n_leaves (appnd t1 t2) = n_leaves t1 * n_leaves t2" - apply (induct t1) - apply (metis One_nat_def appnd.simps(1) less_irrefl less_linear n_leaves.simps(1) nat_mult_1) - apply (simp add: left_distrib) - done +apply (induct t1) + apply (metis appnd.simps(1) n_leaves.simps(1) nat_mult_1 plus_nat.simps(1) + semiring_norm(111)) +by (simp add: left_distrib) declare [[ atp_problem_prefix = "BT__bt_map_appnd" ]] + lemma (*bt_map_appnd:*) "bt_map f (appnd t1 t2) = appnd (bt_map f t1) (bt_map f t2)" - apply (induct t1) - apply (metis appnd.simps(1) bt_map_appnd) - apply (metis bt_map_appnd) - done +apply (induct t1) + apply (metis appnd.simps(1) bt_map.simps(1)) +by (metis bt_map_appnd) end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/BigO.thy --- a/src/HOL/Metis_Examples/BigO.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/BigO.thy Tue May 04 20:30:22 2010 +0200 @@ -23,12 +23,12 @@ apply (case_tac "c = 0", simp) apply (rule_tac x = "1" in exI, simp) apply (rule_tac x = "abs c" in exI, auto) - apply (metis abs_ge_minus_self abs_ge_zero abs_minus_cancel abs_of_nonneg equation_minus_iff Orderings.xt1(6) abs_mult) + apply (metis abs_ge_zero abs_of_nonneg Orderings.xt1(6) abs_mult) done -(*** Now various verions with an increasing modulus ***) +(*** Now various verions with an increasing shrink factor ***) -sledgehammer_params [modulus = 1] +sledgehammer_params [shrink_factor = 1] lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom). ALL x. (abs (h x)) <= (c * (abs (f x)))) @@ -37,67 +37,28 @@ apply (case_tac "c = 0", simp) apply (rule_tac x = "1" in exI, simp) apply (rule_tac x = "abs c" in exI, auto) -proof (neg_clausify) -fix c x -have 0: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \X1 * X2\ = \X2 * X1\" - by (metis abs_mult mult_commute) -have 1: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - X1 \ (0\'a\linordered_idom) \ \X2\ * X1 = \X2 * X1\" - by (metis abs_mult_pos linorder_linear) -have 2: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - \ (0\'a\linordered_idom) < X1 * X2 \ - \ (0\'a\linordered_idom) \ X2 \ \ X1 \ (0\'a\linordered_idom)" - by (metis linorder_not_less mult_nonneg_nonpos2) -assume 3: "\x\'b\type. - \(h\'b\type \ 'a\linordered_idom) x\ - \ (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) x\" -assume 4: "\ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ - \ \c\'a\linordered_idom\ * \(f\'b\type \ 'a\linordered_idom) x\" -have 5: "\ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) x\" - by (metis 4 abs_mult) -have 6: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - \ X1 \ (0\'a\linordered_idom) \ X1 \ \X2\" - by (metis abs_ge_zero xt1(6)) -have 7: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - X1 \ \X2\ \ (0\'a\linordered_idom) < X1" - by (metis not_leE 6) -have 8: "(0\'a\linordered_idom) < \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\" - by (metis 5 7) -have 9: "\X1\'a\linordered_idom. - \ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ \ X1 \ - (0\'a\linordered_idom) < X1" - by (metis 8 order_less_le_trans) -have 10: "(0\'a\linordered_idom) -< (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) (x\'b\type)\" - by (metis 3 9) -have 11: "\ (c\'a\linordered_idom) \ (0\'a\linordered_idom)" - by (metis abs_ge_zero 2 10) -have 12: "\X1\'a\linordered_idom. (c\'a\linordered_idom) * \X1\ = \X1 * c\" - by (metis mult_commute 1 11) -have 13: "\X1\'b\type. - - (h\'b\type \ 'a\linordered_idom) X1 - \ (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) X1\" - by (metis 3 abs_le_D2) -have 14: "\X1\'b\type. - - (h\'b\type \ 'a\linordered_idom) X1 - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) X1\" - by (metis 0 12 13) -have 15: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \X1 * \X2\\ = \X1 * X2\" - by (metis abs_mult abs_mult_pos abs_ge_zero) -have 16: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. X1 \ \X2\ \ \ X1 \ X2" - by (metis xt1(6) abs_ge_self) -have 17: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \ \X1\ \ X2 \ X1 \ \X2\" - by (metis 16 abs_le_D1) -have 18: "\X1\'b\type. - (h\'b\type \ 'a\linordered_idom) X1 - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) X1\" - by (metis 17 3 15) -show "False" - by (metis abs_le_iff 5 18 14) +proof - + fix c :: 'a and x :: 'b + assume A1: "\x. \h x\ \ c * \f x\" + have F1: "\x\<^isub>1\'a\linordered_idom. 0 \ \x\<^isub>1\" by (metis abs_ge_zero) + have F2: "\x\<^isub>1\'a\linordered_idom. 1 * x\<^isub>1 = x\<^isub>1" by (metis class_semiring.mul_1) + have F3: "\x\<^isub>1 x\<^isub>3. x\<^isub>3 \ \h x\<^isub>1\ \ x\<^isub>3 \ c * \f x\<^isub>1\" by (metis A1 order_trans) + have F4: "\x\<^isub>2 x\<^isub>3\'a\linordered_idom. \x\<^isub>3\ * \x\<^isub>2\ = \x\<^isub>3 * x\<^isub>2\" + by (metis abs_mult) + have F5: "\x\<^isub>3 x\<^isub>1\'a\linordered_idom. 0 \ x\<^isub>1 \ \x\<^isub>3 * x\<^isub>1\ = \x\<^isub>3\ * x\<^isub>1" + by (metis abs_mult_pos) + hence "\x\<^isub>1\0. \x\<^isub>1\'a\linordered_idom\ = \1\ * x\<^isub>1" by (metis F2) + hence "\x\<^isub>1\0. \x\<^isub>1\'a\linordered_idom\ = x\<^isub>1" by (metis F2 abs_one) + hence "\x\<^isub>3. 0 \ \h x\<^isub>3\ \ \c * \f x\<^isub>3\\ = c * \f x\<^isub>3\" by (metis F3) + hence "\x\<^isub>3. \c * \f x\<^isub>3\\ = c * \f x\<^isub>3\" by (metis F1) + hence "\x\<^isub>3. (0\'a) \ \f x\<^isub>3\ \ c * \f x\<^isub>3\ = \c\ * \f x\<^isub>3\" by (metis F5) + hence "\x\<^isub>3. (0\'a) \ \f x\<^isub>3\ \ c * \f x\<^isub>3\ = \c * f x\<^isub>3\" by (metis F4) + hence "\x\<^isub>3. c * \f x\<^isub>3\ = \c * f x\<^isub>3\" by (metis F1) + hence "\h x\ \ \c * f x\" by (metis A1) + thus "\h x\ \ \c\ * \f x\" by (metis F4) qed -sledgehammer_params [modulus = 2] +sledgehammer_params [shrink_factor = 2] lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom). ALL x. (abs (h x)) <= (c * (abs (f x)))) @@ -106,39 +67,20 @@ apply (case_tac "c = 0", simp) apply (rule_tac x = "1" in exI, simp) apply (rule_tac x = "abs c" in exI, auto); -proof (neg_clausify) -fix c x -have 0: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \X1 * X2\ = \X2 * X1\" - by (metis abs_mult mult_commute) -assume 1: "\x\'b\type. - \(h\'b\type \ 'a\linordered_idom) x\ - \ (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) x\" -assume 2: "\ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ - \ \c\'a\linordered_idom\ * \(f\'b\type \ 'a\linordered_idom) x\" -have 3: "\ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) x\" - by (metis 2 abs_mult) -have 4: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - \ X1 \ (0\'a\linordered_idom) \ X1 \ \X2\" - by (metis abs_ge_zero xt1(6)) -have 5: "(0\'a\linordered_idom) < \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\" - by (metis not_leE 4 3) -have 6: "(0\'a\linordered_idom) -< (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) (x\'b\type)\" - by (metis 1 order_less_le_trans 5) -have 7: "\X1\'a\linordered_idom. (c\'a\linordered_idom) * \X1\ = \X1 * c\" - by (metis abs_ge_zero linorder_not_less mult_nonneg_nonpos2 6 linorder_linear abs_mult_pos mult_commute) -have 8: "\X1\'b\type. - - (h\'b\type \ 'a\linordered_idom) X1 - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) X1\" - by (metis 0 7 abs_le_D2 1) -have 9: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \ \X1\ \ X2 \ X1 \ \X2\" - by (metis abs_ge_self xt1(6) abs_le_D1) -show "False" - by (metis 8 abs_ge_zero abs_mult_pos abs_mult 1 9 3 abs_le_iff) +proof - + fix c :: 'a and x :: 'b + assume A1: "\x. \h x\ \ c * \f x\" + have F1: "\x\<^isub>1\'a\linordered_idom. 1 * x\<^isub>1 = x\<^isub>1" by (metis class_semiring.mul_1) + have F2: "\x\<^isub>2 x\<^isub>3\'a\linordered_idom. \x\<^isub>3\ * \x\<^isub>2\ = \x\<^isub>3 * x\<^isub>2\" + by (metis abs_mult) + have "\x\<^isub>1\0. \x\<^isub>1\'a\linordered_idom\ = x\<^isub>1" by (metis F1 abs_mult_pos abs_one) + hence "\x\<^isub>3. \c * \f x\<^isub>3\\ = c * \f x\<^isub>3\" by (metis A1 abs_ge_zero order_trans) + hence "\x\<^isub>3. 0 \ \f x\<^isub>3\ \ c * \f x\<^isub>3\ = \c * f x\<^isub>3\" by (metis F2 abs_mult_pos) + hence "\h x\ \ \c * f x\" by (metis A1 abs_ge_zero) + thus "\h x\ \ \c\ * \f x\" by (metis F2) qed -sledgehammer_params [modulus = 3] +sledgehammer_params [shrink_factor = 3] lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom). ALL x. (abs (h x)) <= (c * (abs (f x)))) @@ -146,31 +88,18 @@ apply auto apply (case_tac "c = 0", simp) apply (rule_tac x = "1" in exI, simp) - apply (rule_tac x = "abs c" in exI, auto); -proof (neg_clausify) -fix c x -assume 0: "\x\'b\type. - \(h\'b\type \ 'a\linordered_idom) x\ - \ (c\'a\linordered_idom) * \(f\'b\type \ 'a\linordered_idom) x\" -assume 1: "\ \(h\'b\type \ 'a\linordered_idom) (x\'b\type)\ - \ \c\'a\linordered_idom\ * \(f\'b\type \ 'a\linordered_idom) x\" -have 2: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. - X1 \ \X2\ \ (0\'a\linordered_idom) < X1" - by (metis abs_ge_zero xt1(6) not_leE) -have 3: "\ (c\'a\linordered_idom) \ (0\'a\linordered_idom)" - by (metis abs_ge_zero mult_nonneg_nonpos2 linorder_not_less order_less_le_trans 1 abs_mult 2 0) -have 4: "\(X1\'a\linordered_idom) X2\'a\linordered_idom. \X1 * \X2\\ = \X1 * X2\" - by (metis abs_ge_zero abs_mult_pos abs_mult) -have 5: "\X1\'b\type. - (h\'b\type \ 'a\linordered_idom) X1 - \ \(c\'a\linordered_idom) * (f\'b\type \ 'a\linordered_idom) X1\" - by (metis 4 0 xt1(6) abs_ge_self abs_le_D1) -show "False" - by (metis abs_mult mult_commute 3 abs_mult_pos linorder_linear 0 abs_le_D2 5 1 abs_le_iff) + apply (rule_tac x = "abs c" in exI, auto) +proof - + fix c :: 'a and x :: 'b + assume A1: "\x. \h x\ \ c * \f x\" + have F1: "\x\<^isub>1\'a\linordered_idom. 1 * x\<^isub>1 = x\<^isub>1" by (metis class_semiring.mul_1) + have F2: "\x\<^isub>3 x\<^isub>1\'a\linordered_idom. 0 \ x\<^isub>1 \ \x\<^isub>3 * x\<^isub>1\ = \x\<^isub>3\ * x\<^isub>1" by (metis abs_mult_pos) + hence "\x\<^isub>1\0. \x\<^isub>1\'a\linordered_idom\ = x\<^isub>1" by (metis F1 abs_one) + hence "\x\<^isub>3. 0 \ \f x\<^isub>3\ \ c * \f x\<^isub>3\ = \c\ * \f x\<^isub>3\" by (metis F2 A1 abs_ge_zero order_trans) + thus "\h x\ \ \c\ * \f x\" by (metis A1 abs_mult abs_ge_zero) qed - -sledgehammer_params [modulus = 4] +sledgehammer_params [shrink_factor = 4] lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom). ALL x. (abs (h x)) <= (c * (abs (f x)))) @@ -178,35 +107,18 @@ apply auto apply (case_tac "c = 0", simp) apply (rule_tac x = "1" in exI, simp) - apply (rule_tac x = "abs c" in exI, auto); -proof (neg_clausify) -fix c x (*sort/type constraint inserted by hand!*) -have 0: "\(X1\'a\linordered_idom) X2. \X1 * \X2\\ = \X1 * X2\" - by (metis abs_ge_zero abs_mult_pos abs_mult) -assume 1: "\A. \h A\ \ c * \f A\" -have 2: "\X1 X2. \ \X1\ \ X2 \ (0\'a) \ X2" - by (metis abs_ge_zero order_trans) -have 3: "\X1. (0\'a) \ c * \f X1\" - by (metis 1 2) -have 4: "\X1. c * \f X1\ = \c * f X1\" - by (metis 0 abs_of_nonneg 3) -have 5: "\X1. - h X1 \ c * \f X1\" - by (metis 1 abs_le_D2) -have 6: "\X1. - h X1 \ \c * f X1\" - by (metis 4 5) -have 7: "\X1. h X1 \ c * \f X1\" - by (metis 1 abs_le_D1) -have 8: "\X1. h X1 \ \c * f X1\" - by (metis 4 7) -assume 9: "\ \h x\ \ \c\ * \f x\" -have 10: "\ \h x\ \ \c * f x\" - by (metis abs_mult 9) -show "False" - by (metis 6 8 10 abs_leI) + apply (rule_tac x = "abs c" in exI, auto) +proof - + fix c :: 'a and x :: 'b + assume A1: "\x. \h x\ \ c * \f x\" + have "\x\<^isub>1\'a\linordered_idom. 1 * x\<^isub>1 = x\<^isub>1" by (metis class_semiring.mul_1) + hence "\x\<^isub>3. \c * \f x\<^isub>3\\ = c * \f x\<^isub>3\" + by (metis A1 abs_ge_zero order_trans abs_mult_pos abs_one) + hence "\h x\ \ \c * f x\" by (metis A1 abs_ge_zero abs_mult_pos abs_mult) + thus "\h x\ \ \c\ * \f x\" by (metis abs_mult) qed - -sledgehammer_params [sorts] +sledgehammer_params [shrink_factor = 1] lemma bigo_alt_def: "O(f) = {h. EX c. (0 < c & (ALL x. abs (h x) <= c * abs (f x)))}" @@ -232,29 +144,13 @@ declare [[ atp_problem_prefix = "BigO__bigo_refl" ]] lemma bigo_refl [intro]: "f : O(f)" - apply(auto simp add: bigo_def) -proof (neg_clausify) -fix x -assume 0: "\xa. \ \f (x xa)\ \ xa * \f (x xa)\" -have 1: "\X2. X2 \ (1\'b) * X2 \ \ (1\'b) \ (1\'b)" - by (metis mult_le_cancel_right1 order_eq_iff) -have 2: "\X2. X2 \ (1\'b) * X2" - by (metis order_eq_iff 1) -show "False" - by (metis 0 2) -qed +apply (auto simp add: bigo_def) +by (metis class_semiring.mul_1 order_refl) declare [[ atp_problem_prefix = "BigO__bigo_zero" ]] lemma bigo_zero: "0 : O(g)" - apply (auto simp add: bigo_def func_zero) -proof (neg_clausify) -fix x -assume 0: "\xa. \ (0\'b) \ xa * \g (x xa)\" -have 1: "\ (0\'b) \ (0\'b)" - by (metis 0 mult_eq_0_iff) -show "False" - by (metis 1 linorder_neq_iff linorder_antisym_conv1) -qed +apply (auto simp add: bigo_def func_zero) +by (metis class_semiring.mul_0 order_refl) lemma bigo_zero2: "O(%x.0) = {%x.0}" apply (auto simp add: bigo_def) @@ -367,103 +263,36 @@ lemma bigo_bounded_alt: "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" apply (auto simp add: bigo_def) -(*Version 1: one-shot proof*) +(* Version 1: one-line proof *) apply (metis abs_le_D1 linorder_class.not_less order_less_le Orderings.xt1(12) abs_mult) done lemma (*bigo_bounded_alt:*) "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> - f : O(g)" - apply (auto simp add: bigo_def) -(*Version 2: single-step proof*) -proof (neg_clausify) -fix x -assume 0: "\x. f x \ c * g x" -assume 1: "\xa. \ f (x xa) \ xa * \g (x xa)\" -have 2: "\X3. c * g X3 = f X3 \ \ c * g X3 \ f X3" - by (metis 0 order_antisym_conv) -have 3: "\X3. \ f (x \X3\) \ \X3 * g (x \X3\)\" - by (metis 1 abs_mult) -have 4: "\X1 X3\'b\linordered_idom. X3 \ X1 \ X1 \ \X3\" - by (metis linorder_linear abs_le_D1) -have 5: "\X3::'b. \X3\ * \X3\ = X3 * X3" - by (metis abs_mult_self) -have 6: "\X3. \ X3 * X3 < (0\'b\linordered_idom)" - by (metis not_square_less_zero) -have 7: "\X1 X3::'b. \X1\ * \X3\ = \X3 * X1\" - by (metis abs_mult mult_commute) -have 8: "\X3::'b. X3 * X3 = \X3 * X3\" - by (metis abs_mult 5) -have 9: "\X3. X3 * g (x \X3\) \ f (x \X3\)" - by (metis 3 4) -have 10: "c * g (x \c\) = f (x \c\)" - by (metis 2 9) -have 11: "\X3::'b. \X3\ * \\X3\\ = \X3\ * \X3\" - by (metis abs_idempotent abs_mult 8) -have 12: "\X3::'b. \X3 * \X3\\ = \X3\ * \X3\" - by (metis mult_commute 7 11) -have 13: "\X3::'b. \X3 * \X3\\ = X3 * X3" - by (metis 8 7 12) -have 14: "\X3. X3 \ \X3\ \ X3 < (0\'b)" - by (metis abs_ge_self abs_le_D1 abs_if) -have 15: "\X3. X3 \ \X3\ \ \X3\ < (0\'b)" - by (metis abs_ge_self abs_le_D1 abs_if) -have 16: "\X3. X3 * X3 < (0\'b) \ X3 * \X3\ \ X3 * X3" - by (metis 15 13) -have 17: "\X3::'b. X3 * \X3\ \ X3 * X3" - by (metis 16 6) -have 18: "\X3. X3 \ \X3\ \ \ X3 < (0\'b)" - by (metis mult_le_cancel_left 17) -have 19: "\X3::'b. X3 \ \X3\" - by (metis 18 14) -have 20: "\ f (x \c\) \ \f (x \c\)\" - by (metis 3 10) -show "False" - by (metis 20 19) + f : O(g)" +apply (auto simp add: bigo_def) +(* Version 2: structured proof *) +proof - + assume "\x. f x \ c * g x" + thus "\c. \x. f x \ c * \g x\" by (metis abs_mult abs_ge_self order_trans) qed +text{*So here is the easier (and more natural) problem using transitivity*} +declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt_trans" ]] +lemma "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" +apply (auto simp add: bigo_def) +(* Version 1: one-line proof *) +by (metis abs_ge_self abs_mult order_trans) text{*So here is the easier (and more natural) problem using transitivity*} declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt_trans" ]] lemma "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" apply (auto simp add: bigo_def) - (*Version 1: one-shot proof*) - apply (metis Orderings.leD Orderings.leI abs_ge_self abs_le_D1 abs_mult abs_of_nonneg order_le_less) - done - -text{*So here is the easier (and more natural) problem using transitivity*} -declare [[ atp_problem_prefix = "BigO__bigo_bounded_alt_trans" ]] -lemma "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==> f : O(g)" - apply (auto simp add: bigo_def) -(*Version 2: single-step proof*) -proof (neg_clausify) -fix x -assume 0: "\A\'a\type. - (f\'a\type \ 'b\linordered_idom) A - \ (c\'b\linordered_idom) * (g\'a\type \ 'b\linordered_idom) A" -assume 1: "\A\'b\linordered_idom. - \ (f\'a\type \ 'b\linordered_idom) ((x\'b\linordered_idom \ 'a\type) A) - \ A * \(g\'a\type \ 'b\linordered_idom) (x A)\" -have 2: "\X2\'a\type. - \ (c\'b\linordered_idom) * (g\'a\type \ 'b\linordered_idom) X2 - < (f\'a\type \ 'b\linordered_idom) X2" - by (metis 0 linorder_not_le) -have 3: "\X2\'b\linordered_idom. - \ (f\'a\type \ 'b\linordered_idom) ((x\'b\linordered_idom \ 'a\type) \X2\) - \ \X2 * (g\'a\type \ 'b\linordered_idom) (x \X2\)\" - by (metis abs_mult 1) -have 4: "\X2\'b\linordered_idom. - \X2 * (g\'a\type \ 'b\linordered_idom) ((x\'b\linordered_idom \ 'a\type) \X2\)\ - < (f\'a\type \ 'b\linordered_idom) (x \X2\)" - by (metis 3 linorder_not_less) -have 5: "\X2\'b\linordered_idom. - X2 * (g\'a\type \ 'b\linordered_idom) ((x\'b\linordered_idom \ 'a\type) \X2\) - < (f\'a\type \ 'b\linordered_idom) (x \X2\)" - by (metis abs_less_iff 4) -show "False" - by (metis 2 5) +(* Version 2: structured proof *) +proof - + assume "\x. f x \ c * g x" + thus "\c. \x. f x \ c * \g x\" by (metis abs_mult abs_ge_self order_trans) qed - lemma bigo_bounded: "ALL x. 0 <= f x ==> ALL x. f x <= g x ==> f : O(g)" apply (erule bigo_bounded_alt [of f 1 g]) @@ -473,63 +302,37 @@ declare [[ atp_problem_prefix = "BigO__bigo_bounded2" ]] lemma bigo_bounded2: "ALL x. lb x <= f x ==> ALL x. f x <= lb x + g x ==> f : lb +o O(g)" - apply (rule set_minus_imp_plus) - apply (rule bigo_bounded) - apply (auto simp add: diff_minus fun_Compl_def func_plus) - prefer 2 - apply (drule_tac x = x in spec)+ - apply arith (*not clear that it's provable otherwise*) -proof (neg_clausify) -fix x -assume 0: "\y. lb y \ f y" -assume 1: "\ (0\'b) \ f x + - lb x" -have 2: "\X3. (0\'b) + X3 = X3" - by (metis diff_eq_eq right_minus_eq) -have 3: "\ (0\'b) \ f x - lb x" - by (metis 1 diff_minus) -have 4: "\ (0\'b) + lb x \ f x" - by (metis 3 le_diff_eq) -show "False" - by (metis 4 2 0) +apply (rule set_minus_imp_plus) +apply (rule bigo_bounded) + apply (auto simp add: diff_minus fun_Compl_def func_plus) + prefer 2 + apply (drule_tac x = x in spec)+ + apply (metis add_right_mono class_semiring.semiring_rules(24) diff_add_cancel diff_minus_eq_add le_less order_trans) +proof - + fix x :: 'a + assume "\x. lb x \ f x" + thus "(0\'b) \ f x + - lb x" by (metis not_leE diff_minus less_iff_diff_less_0 less_le_not_le) qed declare [[ atp_problem_prefix = "BigO__bigo_abs" ]] lemma bigo_abs: "(%x. abs(f x)) =o O(f)" - apply (unfold bigo_def) - apply auto -proof (neg_clausify) -fix x -assume 0: "\xa. \ \f (x xa)\ \ xa * \f (x xa)\" -have 1: "\X2. X2 \ (1\'b) * X2 \ \ (1\'b) \ (1\'b)" - by (metis mult_le_cancel_right1 order_eq_iff) -have 2: "\X2. X2 \ (1\'b) * X2" - by (metis order_eq_iff 1) -show "False" - by (metis 0 2) -qed +apply (unfold bigo_def) +apply auto +by (metis class_semiring.mul_1 order_refl) declare [[ atp_problem_prefix = "BigO__bigo_abs2" ]] lemma bigo_abs2: "f =o O(%x. abs(f x))" - apply (unfold bigo_def) - apply auto -proof (neg_clausify) -fix x -assume 0: "\xa. \ \f (x xa)\ \ xa * \f (x xa)\" -have 1: "\X2. X2 \ (1\'b) * X2 \ \ (1\'b) \ (1\'b)" - by (metis mult_le_cancel_right1 order_eq_iff) -have 2: "\X2. X2 \ (1\'b) * X2" - by (metis order_eq_iff 1) -show "False" - by (metis 0 2) -qed +apply (unfold bigo_def) +apply auto +by (metis class_semiring.mul_1 order_refl) lemma bigo_abs3: "O(f) = O(%x. abs(f x))" - apply (rule equalityI) - apply (rule bigo_elt_subset) - apply (rule bigo_abs2) - apply (rule bigo_elt_subset) - apply (rule bigo_abs) -done +proof - + have F1: "\v u. u \ O(v) \ O(u) \ O(v)" by (metis bigo_elt_subset) + have F2: "\u. (\R. \u R\) \ O(u)" by (metis bigo_abs) + have "\u. u \ O(\R. \u R\)" by (metis bigo_abs2) + thus "O(f) = O(\x. \f x\)" using F1 F2 by auto +qed lemma bigo_abs4: "f =o g +o O(h) ==> (%x. abs (f x)) =o (%x. abs (g x)) +o O(h)" @@ -599,63 +402,9 @@ abs_mult mult_pos_pos) apply (erule ssubst) apply (subst abs_mult) -(*not qute BigO__bigo_mult_simpler_1 (a hard problem!) as abs_mult has - just been done*) -proof (neg_clausify) -fix a c b ca x -assume 0: "(0\'b\linordered_idom) < (c\'b\linordered_idom)" -assume 1: "\(a\'a \ 'b\linordered_idom) (x\'a)\ -\ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) x\" -assume 2: "\(b\'a \ 'b\linordered_idom) (x\'a)\ -\ (ca\'b\linordered_idom) * \(g\'a \ 'b\linordered_idom) x\" -assume 3: "\ \(a\'a \ 'b\linordered_idom) (x\'a)\ * - \(b\'a \ 'b\linordered_idom) x\ - \ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) x\ * - ((ca\'b\linordered_idom) * \(g\'a \ 'b\linordered_idom) x\)" -have 4: "\c\'b\linordered_idom\ = c" - by (metis abs_of_pos 0) -have 5: "\X1\'b\linordered_idom. (c\'b\linordered_idom) * \X1\ = \c * X1\" - by (metis abs_mult 4) -have 6: "(0\'b\linordered_idom) = (1\'b\linordered_idom) \ -(0\'b\linordered_idom) < (1\'b\linordered_idom)" - by (metis abs_not_less_zero abs_one linorder_neqE_linordered_idom) -have 7: "(0\'b\linordered_idom) < (1\'b\linordered_idom)" - by (metis 6 one_neq_zero) -have 8: "\1\'b\linordered_idom\ = (1\'b\linordered_idom)" - by (metis abs_of_pos 7) -have 9: "\X1\'b\linordered_idom. (0\'b\linordered_idom) \ (c\'b\linordered_idom) * \X1\" - by (metis abs_ge_zero 5) -have 10: "\X1\'b\linordered_idom. X1 * (1\'b\linordered_idom) = X1" - by (metis mult_cancel_right2 mult_commute) -have 11: "\X1\'b\linordered_idom. \\X1\\ = \X1\ * \1\'b\linordered_idom\" - by (metis abs_mult abs_idempotent 10) -have 12: "\X1\'b\linordered_idom. \\X1\\ = \X1\" - by (metis 11 8 10) -have 13: "\X1\'b\linordered_idom. (0\'b\linordered_idom) \ \X1\" - by (metis abs_ge_zero 12) -have 14: "\ (0\'b\linordered_idom) - \ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) (x\'a)\ \ -\ (0\'b\linordered_idom) \ \(b\'a \ 'b\linordered_idom) x\ \ -\ \b x\ \ (ca\'b\linordered_idom) * \(g\'a \ 'b\linordered_idom) x\ \ -\ \(a\'a \ 'b\linordered_idom) x\ \ c * \f x\" - by (metis 3 mult_mono) -have 15: "\ (0\'b\linordered_idom) \ \(b\'a \ 'b\linordered_idom) (x\'a)\ \ -\ \b x\ \ (ca\'b\linordered_idom) * \(g\'a \ 'b\linordered_idom) x\ \ -\ \(a\'a \ 'b\linordered_idom) x\ - \ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) x\" - by (metis 14 9) -have 16: "\ \(b\'a \ 'b\linordered_idom) (x\'a)\ - \ (ca\'b\linordered_idom) * \(g\'a \ 'b\linordered_idom) x\ \ -\ \(a\'a \ 'b\linordered_idom) x\ - \ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) x\" - by (metis 15 13) -have 17: "\ \(a\'a \ 'b\linordered_idom) (x\'a)\ - \ (c\'b\linordered_idom) * \(f\'a \ 'b\linordered_idom) x\" - by (metis 16 2) -show 18: "False" - by (metis 17 1) -qed - +(* not quite as hard as BigO__bigo_mult_simpler_1 (a hard problem!) since + abs_mult has just been done *) +by (metis abs_ge_zero mult_mono') declare [[ atp_problem_prefix = "BigO__bigo_mult2" ]] lemma bigo_mult2 [intro]: "f *o O(g) <= O(f * g)" @@ -674,7 +423,7 @@ declare [[ atp_problem_prefix = "BigO__bigo_mult3" ]] lemma bigo_mult3: "f : O(h) ==> g : O(j) ==> f * g : O(h * j)" -by (metis bigo_mult set_times_intro subset_iff) +by (metis bigo_mult set_rev_mp set_times_intro) declare [[ atp_problem_prefix = "BigO__bigo_mult4" ]] lemma bigo_mult4 [intro]:"f : k +o O(h) ==> g * f : (g * k) +o O(g * h)" @@ -811,40 +560,16 @@ by (metis bigo_const1 bigo_elt_subset); lemma bigo_const2 [intro]: "O(%x. c::'b::linordered_idom) <= O(%x. 1)"; -(*??FAILS because the two occurrences of COMBK have different polymorphic types -proof (neg_clausify) -assume 0: "\ O(COMBK (c\'b\linordered_idom)) \ O(COMBK (1\'b\linordered_idom))" -have 1: "COMBK (c\'b\linordered_idom) \ O(COMBK (1\'b\linordered_idom))" -apply (rule notI) -apply (rule 0 [THEN notE]) -apply (rule bigo_elt_subset) -apply assumption; -sorry - by (metis 0 bigo_elt_subset) loops?? -show "False" - by (metis 1 bigo_const1) +(* "thus" had to be replaced by "show" with an explicit reference to "F1" *) +proof - + have F1: "\u. (\Q. u) \ O(\Q. 1)" by (metis bigo_const1) + show "O(\x. c) \ O(\x. 1)" by (metis F1 bigo_elt_subset) qed -*) - apply (rule bigo_elt_subset) - apply (rule bigo_const1) -done declare [[ atp_problem_prefix = "BigO__bigo_const3" ]] lemma bigo_const3: "(c::'a::linordered_field) ~= 0 ==> (%x. 1) : O(%x. c)" apply (simp add: bigo_def) -proof (neg_clausify) -assume 0: "(c\'a\linordered_field) \ (0\'a\linordered_field)" -assume 1: "\A\'a\linordered_field. \ (1\'a\linordered_field) \ A * \c\'a\linordered_field\" -have 2: "(0\'a\linordered_field) = \c\'a\linordered_field\ \ -\ (1\'a\linordered_field) \ (1\'a\linordered_field)" - by (metis 1 field_inverse) -have 3: "\c\'a\linordered_field\ = (0\'a\linordered_field)" - by (metis linorder_neq_iff linorder_antisym_conv1 2) -have 4: "(0\'a\linordered_field) = (c\'a\linordered_field)" - by (metis 3 abs_eq_0) -show "False" - by (metis 0 4) -qed +by (metis abs_eq_0 left_inverse order_refl) lemma bigo_const4: "(c::'a::linordered_field) ~= 0 ==> O(%x. 1) <= O(%x. c)" by (rule bigo_elt_subset, rule bigo_const3, assumption) @@ -856,15 +581,7 @@ declare [[ atp_problem_prefix = "BigO__bigo_const_mult1" ]] lemma bigo_const_mult1: "(%x. c * f x) : O(f)" apply (simp add: bigo_def abs_mult) -proof (neg_clausify) -fix x -assume 0: "\xa\'b\linordered_idom. - \ \c\'b\linordered_idom\ * - \(f\'a\type \ 'b\linordered_idom) ((x\'b\linordered_idom \ 'a\type) xa)\ - \ xa * \f (x xa)\" -show "False" - by (metis linorder_neq_iff linorder_antisym_conv1 0) -qed +by (metis le_less) lemma bigo_const_mult2: "O(%x. c * f x) <= O(f)" by (rule bigo_elt_subset, rule bigo_const_mult1) @@ -872,7 +589,7 @@ declare [[ atp_problem_prefix = "BigO__bigo_const_mult3" ]] lemma bigo_const_mult3: "(c::'a::linordered_field) ~= 0 ==> f : O(%x. c * f x)" apply (simp add: bigo_def) -(*sledgehammer [no luck]*); +(*sledgehammer [no luck]*) apply (rule_tac x = "abs(inverse c)" in exI) apply (simp only: abs_mult [symmetric] mult_assoc [symmetric]) apply (subst left_inverse) @@ -1134,15 +851,17 @@ declare [[ atp_problem_prefix = "BigO__bigo_lesso1" ]] lemma bigo_lesso1: "ALL x. f x <= g x ==> f x. max (f x - g x) 0) = 0" + thus "(\x. max (f x - g x) 0) \ O(h)" by (metis bigo_zero) +next + show "\x\'a. f x \ g x \ (\x\'a. max (f x - g x) (0\'b)) = (0\'a \ 'b)" apply (unfold func_zero) apply (rule ext) - apply (simp split: split_max) -done - + by (simp split: split_max) +qed declare [[ atp_problem_prefix = "BigO__bigo_lesso2" ]] lemma bigo_lesso2: "f =o g +o O(h) ==> @@ -1158,8 +877,9 @@ apply (erule thin_rl) (*sledgehammer*); apply (case_tac "0 <= k x - g x") - prefer 2 (*re-order subgoals because I don't know what to put after a structured proof*) - apply (metis abs_ge_zero abs_minus_commute linorder_linear min_max.sup_absorb1 min_max.sup_commute) +(* apply (metis abs_le_iff add_le_imp_le_right diff_minus le_less + le_max_iff_disj min_max.le_supE min_max.sup_absorb2 + min_max.sup_commute) *) proof (neg_clausify) fix x assume 0: "\A. k A \ f A" @@ -1179,6 +899,11 @@ by (metis 5 abs_minus_commute 7 min_max.sup_commute 6) show "False" by (metis min_max.sup_commute min_max.inf_commute min_max.sup_inf_absorb min_max.le_iff_inf 0 max_diff_distrib_left 1 linorder_not_le 8) +next + show "\x\'a. + \\x\'a. (0\'b) \ k x; \x\'a. k x \ f x; \ (0\'b) \ k x - g x\ + \ max (k x - g x) (0\'b) \ \f x - g x\" + by (metis abs_ge_zero le_cases min_max.sup_absorb2) qed declare [[ atp_problem_prefix = "BigO__bigo_lesso3" ]] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/Message.thy --- a/src/HOL/Metis_Examples/Message.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/Message.thy Tue May 04 20:30:22 2010 +0200 @@ -4,11 +4,12 @@ Testing the metis method. *) -theory Message imports Main begin +theory Message +imports Main +begin -(*Needed occasionally with spy_analz_tac, e.g. in analz_insert_Key_newK*) lemma strange_Un_eq [simp]: "A \ (B \ A) = B \ A" -by blast +by (metis Un_ac(2) Un_ac(3)) types key = nat @@ -20,7 +21,7 @@ specification (invKey) invKey [simp]: "invKey (invKey K) = K" invKey_symmetric: "all_symmetric --> invKey = id" - by (rule exI [of _ id], auto) +by (metis id_apply) text{*The inverse of a symmetric key is itself; that of a public key @@ -74,33 +75,28 @@ | Snd: "{|X,Y|} \ parts H ==> Y \ parts H" | Body: "Crypt K X \ parts H ==> X \ parts H" - -declare [[ atp_problem_prefix = "Message__parts_mono" ]] lemma parts_mono: "G \ H ==> parts(G) \ parts(H)" apply auto -apply (erule parts.induct) -apply (metis Inj set_mp) -apply (metis Fst) -apply (metis Snd) -apply (metis Body) -done - +apply (erule parts.induct) + apply (metis parts.Inj set_rev_mp) + apply (metis parts.Fst) + apply (metis parts.Snd) +by (metis parts.Body) text{*Equations hold because constructors are injective.*} lemma Friend_image_eq [simp]: "(Friend x \ Friend`A) = (x:A)" -by auto +by (metis agent.inject imageI image_iff) -lemma Key_image_eq [simp]: "(Key x \ Key`A) = (x\A)" -by auto +lemma Key_image_eq [simp]: "(Key x \ Key`A) = (x \ A)" +by (metis image_iff msg.inject(4)) -lemma Nonce_Key_image_eq [simp]: "(Nonce x \ Key`A)" -by auto +lemma Nonce_Key_image_eq [simp]: "Nonce x \ Key`A" +by (metis image_iff msg.distinct(23)) subsubsection{*Inverse of keys *} -declare [[ atp_problem_prefix = "Message__invKey_eq" ]] -lemma invKey_eq [simp]: "(invKey K = invKey K') = (K=K')" +lemma invKey_eq [simp]: "(invKey K = invKey K') = (K = K')" by (metis invKey) @@ -155,7 +151,7 @@ [| X \ parts H; Y \ parts H |] ==> P |] ==> P" by (blast dest: parts.Fst parts.Snd) - declare MPair_parts [elim!] parts.Body [dest!] +declare MPair_parts [elim!] parts.Body [dest!] text{*NB These two rules are UNSAFE in the formal sense, as they discard the compound message. They work well on THIS FILE. @{text MPair_parts} is left as SAFE because it speeds up proofs. @@ -200,7 +196,6 @@ apply (simp only: parts_Un) done -declare [[ atp_problem_prefix = "Message__parts_insert_two" ]] lemma parts_insert2: "parts (insert X (insert Y H)) = parts {X} \ parts {Y} \ parts H" by (metis Un_commute Un_empty_left Un_empty_right Un_insert_left Un_insert_right parts_Un) @@ -237,7 +232,6 @@ lemma parts_idem [simp]: "parts (parts H) = parts H" by blast -declare [[ atp_problem_prefix = "Message__parts_subset_iff" ]] lemma parts_subset_iff [simp]: "(parts G \ parts H) = (G \ parts H)" apply (rule iffI) apply (metis Un_absorb1 Un_subset_iff parts_Un parts_increasing) @@ -247,13 +241,10 @@ lemma parts_trans: "[| X\ parts G; G \ parts H |] ==> X\ parts H" by (blast dest: parts_mono); - -declare [[ atp_problem_prefix = "Message__parts_cut" ]] lemma parts_cut: "[|Y\ parts(insert X G); X\ parts H|] ==> Y\ parts(G \ H)" by (metis Un_insert_left Un_insert_right insert_absorb mem_def parts_Un parts_idem sup1CI) - subsubsection{*Rewrite rules for pulling out atomic messages *} lemmas parts_insert_eq_I = equalityI [OF subsetI parts_insert_subset] @@ -312,8 +303,6 @@ apply (erule parts.induct, auto) done - -declare [[ atp_problem_prefix = "Message__msg_Nonce_supply" ]] lemma msg_Nonce_supply: "\N. \n. N\n --> Nonce n \ parts {msg}" apply (induct_tac "msg") apply (simp_all add: parts_insert2) @@ -364,8 +353,6 @@ lemmas not_parts_not_analz = analz_subset_parts [THEN contra_subsetD, standard] - -declare [[ atp_problem_prefix = "Message__parts_analz" ]] lemma parts_analz [simp]: "parts (analz H) = parts H" apply (rule equalityI) apply (metis analz_subset_parts parts_subset_iff) @@ -517,8 +504,8 @@ by (drule analz_mono, blast) -declare [[ atp_problem_prefix = "Message__analz_cut" ]] - declare analz_trans[intro] +declare analz_trans[intro] + lemma analz_cut: "[| Y\ analz (insert X H); X\ analz H |] ==> Y\ analz H" (*TOO SLOW by (metis analz_idem analz_increasing analz_mono insert_absorb insert_mono insert_subset) --{*317s*} @@ -535,7 +522,6 @@ text{*A congruence rule for "analz" *} -declare [[ atp_problem_prefix = "Message__analz_subset_cong" ]] lemma analz_subset_cong: "[| analz G \ analz G'; analz H \ analz H' |] ==> analz (G \ H) \ analz (G' \ H')" @@ -612,9 +598,6 @@ lemma synth_Un: "synth(G) \ synth(H) \ synth(G \ H)" by (intro Un_least synth_mono Un_upper1 Un_upper2) - -declare [[ atp_problem_prefix = "Message__synth_insert" ]] - lemma synth_insert: "insert X (synth H) \ synth(insert X H)" by (metis insert_iff insert_subset subset_insertI synth.Inj synth_mono) @@ -635,7 +618,6 @@ lemma synth_trans: "[| X\ synth G; G \ synth H |] ==> X\ synth H" by (drule synth_mono, blast) -declare [[ atp_problem_prefix = "Message__synth_cut" ]] lemma synth_cut: "[| Y\ synth (insert X H); X\ synth H |] ==> Y\ synth H" (*TOO SLOW by (metis insert_absorb insert_mono insert_subset synth_idem synth_increasing synth_mono) @@ -667,7 +649,6 @@ subsubsection{*Combinations of parts, analz and synth *} -declare [[ atp_problem_prefix = "Message__parts_synth" ]] lemma parts_synth [simp]: "parts (synth H) = parts H \ synth H" apply (rule equalityI) apply (rule subsetI) @@ -679,18 +660,14 @@ apply (metis Un_subset_iff parts_increasing parts_mono synth_increasing) done - - - -declare [[ atp_problem_prefix = "Message__analz_analz_Un" ]] lemma analz_analz_Un [simp]: "analz (analz G \ H) = analz (G \ H)" apply (rule equalityI); apply (metis analz_idem analz_subset_cong order_eq_refl) apply (metis analz_increasing analz_subset_cong order_eq_refl) done -declare [[ atp_problem_prefix = "Message__analz_synth_Un" ]] - declare analz_mono [intro] analz.Fst [intro] analz.Snd [intro] Un_least [intro] +declare analz_mono [intro] analz.Fst [intro] analz.Snd [intro] Un_least [intro] + lemma analz_synth_Un [simp]: "analz (synth G \ H) = analz (G \ H) \ synth G" apply (rule equalityI) apply (rule subsetI) @@ -702,102 +679,81 @@ apply blast done -declare [[ atp_problem_prefix = "Message__analz_synth" ]] lemma analz_synth [simp]: "analz (synth H) = analz H \ synth H" -proof (neg_clausify) -assume 0: "analz (synth H) \ analz H \ synth H" -have 1: "\X1 X3. sup (analz (sup X3 X1)) (synth X3) = analz (sup (synth X3) X1)" - by (metis analz_synth_Un) -have 2: "sup (analz H) (synth H) \ analz (synth H)" - by (metis 0) -have 3: "\X1 X3. sup (synth X3) (analz (sup X3 X1)) = analz (sup (synth X3) X1)" - by (metis 1 Un_commute) -have 4: "\X3. sup (synth X3) (analz X3) = analz (sup (synth X3) {})" - by (metis 3 Un_empty_right) -have 5: "\X3. sup (synth X3) (analz X3) = analz (synth X3)" - by (metis 4 Un_empty_right) -have 6: "\X3. sup (analz X3) (synth X3) = analz (synth X3)" - by (metis 5 Un_commute) -show "False" - by (metis 2 6) +proof - + have "\x\<^isub>2 x\<^isub>1. synth x\<^isub>1 \ analz (x\<^isub>1 \ x\<^isub>2) = analz (synth x\<^isub>1 \ x\<^isub>2)" + by (metis Un_commute analz_synth_Un) + hence "\x\<^isub>3 x\<^isub>1. synth x\<^isub>1 \ analz x\<^isub>1 = analz (synth x\<^isub>1 \ UNION {} x\<^isub>3)" + by (metis UN_extend_simps(3)) + hence "\x\<^isub>1. synth x\<^isub>1 \ analz x\<^isub>1 = analz (synth x\<^isub>1)" + by (metis UN_extend_simps(3)) + hence "\x\<^isub>1. analz x\<^isub>1 \ synth x\<^isub>1 = analz (synth x\<^isub>1)" + by (metis Un_commute) + thus "analz (synth H) = analz H \ synth H" by metis qed subsubsection{*For reasoning about the Fake rule in traces *} -declare [[ atp_problem_prefix = "Message__parts_insert_subset_Un" ]] lemma parts_insert_subset_Un: "X\ G ==> parts(insert X H) \ parts G \ parts H" -proof (neg_clausify) -assume 0: "X \ G" -assume 1: "\ parts (insert X H) \ parts G \ parts H" -have 2: "\ parts (insert X H) \ parts (G \ H)" - by (metis 1 parts_Un) -have 3: "\ insert X H \ G \ H" - by (metis 2 parts_mono) -have 4: "X \ G \ H \ \ H \ G \ H" - by (metis 3 insert_subset) -have 5: "X \ G \ H" - by (metis 4 Un_upper2) -have 6: "X \ G" - by (metis 5 UnCI) -show "False" - by (metis 6 0) +proof - + assume "X \ G" + hence "\u. X \ G \ u" by (metis Un_iff) + hence "X \ G \ H \ H \ G \ H" + by (metis Un_upper2) + hence "insert X H \ G \ H" by (metis insert_subset) + hence "parts (insert X H) \ parts (G \ H)" + by (metis parts_mono) + thus "parts (insert X H) \ parts G \ parts H" + by (metis parts_Un) qed -declare [[ atp_problem_prefix = "Message__Fake_parts_insert" ]] lemma Fake_parts_insert: "X \ synth (analz H) ==> parts (insert X H) \ synth (analz H) \ parts H" -proof (neg_clausify) -assume 0: "X \ synth (analz H)" -assume 1: "\ parts (insert X H) \ synth (analz H) \ parts H" -have 2: "\X3. parts X3 \ synth (analz X3) = parts (synth (analz X3))" - by (metis parts_synth parts_analz) -have 3: "\X3. analz X3 \ synth (analz X3) = analz (synth (analz X3))" - by (metis analz_synth analz_idem) -have 4: "\X3. analz X3 \ analz (synth X3)" - by (metis Un_upper1 analz_synth) -have 5: "\ parts (insert X H) \ parts H \ synth (analz H)" - by (metis 1 Un_commute) -have 6: "\ parts (insert X H) \ parts (synth (analz H))" - by (metis 5 2) -have 7: "\ insert X H \ synth (analz H)" - by (metis 6 parts_mono) -have 8: "X \ synth (analz H) \ \ H \ synth (analz H)" - by (metis 7 insert_subset) -have 9: "\ H \ synth (analz H)" - by (metis 8 0) -have 10: "\X3. X3 \ analz (synth X3)" - by (metis analz_subset_iff 4) -have 11: "\X3. X3 \ analz (synth (analz X3))" - by (metis analz_subset_iff 10) -have 12: "\X3. analz (synth (analz X3)) = synth (analz X3) \ - \ analz X3 \ synth (analz X3)" - by (metis Un_absorb1 3) -have 13: "\X3. analz (synth (analz X3)) = synth (analz X3)" - by (metis 12 synth_increasing) -have 14: "\X3. X3 \ synth (analz X3)" - by (metis 11 13) -show "False" - by (metis 9 14) +(*sledgehammer*) +proof - + assume A1: "X \ synth (analz H)" + have F1: "\x\<^isub>1. analz x\<^isub>1 \ synth (analz x\<^isub>1) = analz (synth (analz x\<^isub>1))" + by (metis analz_idem analz_synth) + have F2: "\x\<^isub>1. parts x\<^isub>1 \ synth (analz x\<^isub>1) = parts (synth (analz x\<^isub>1))" + by (metis parts_analz parts_synth) + have F3: "synth (analz H) X" using A1 by (metis mem_def) + have "\x\<^isub>2 x\<^isub>1\msg set. x\<^isub>1 \ sup x\<^isub>1 x\<^isub>2" by (metis inf_sup_ord(3)) + hence F4: "\x\<^isub>1. analz x\<^isub>1 \ analz (synth x\<^isub>1)" by (metis analz_synth) + have F5: "X \ synth (analz H)" using F3 by (metis mem_def) + have "\x\<^isub>1. analz x\<^isub>1 \ synth (analz x\<^isub>1) + \ analz (synth (analz x\<^isub>1)) = synth (analz x\<^isub>1)" + using F1 by (metis subset_Un_eq) + hence F6: "\x\<^isub>1. analz (synth (analz x\<^isub>1)) = synth (analz x\<^isub>1)" + by (metis synth_increasing) + have "\x\<^isub>1. x\<^isub>1 \ analz (synth x\<^isub>1)" using F4 by (metis analz_subset_iff) + hence "\x\<^isub>1. x\<^isub>1 \ analz (synth (analz x\<^isub>1))" by (metis analz_subset_iff) + hence "\x\<^isub>1. x\<^isub>1 \ synth (analz x\<^isub>1)" using F6 by metis + hence "H \ synth (analz H)" by metis + hence "H \ synth (analz H) \ X \ synth (analz H)" using F5 by metis + hence "insert X H \ synth (analz H)" by (metis insert_subset) + hence "parts (insert X H) \ parts (synth (analz H))" by (metis parts_mono) + hence "parts (insert X H) \ parts H \ synth (analz H)" using F2 by metis + thus "parts (insert X H) \ synth (analz H) \ parts H" by (metis Un_commute) qed lemma Fake_parts_insert_in_Un: "[|Z \ parts (insert X H); X: synth (analz H)|] ==> Z \ synth (analz H) \ parts H"; -by (blast dest: Fake_parts_insert [THEN subsetD, dest]) +by (blast dest: Fake_parts_insert [THEN subsetD, dest]) -declare [[ atp_problem_prefix = "Message__Fake_analz_insert" ]] - declare analz_mono [intro] synth_mono [intro] +declare analz_mono [intro] synth_mono [intro] + lemma Fake_analz_insert: - "X\ synth (analz G) ==> + "X \ synth (analz G) ==> analz (insert X H) \ synth (analz G) \ analz (G \ H)" -by (metis Un_commute Un_insert_left Un_insert_right Un_upper1 analz_analz_Un analz_mono analz_synth_Un equalityE insert_absorb order_le_less xt1(12)) +by (metis Un_commute Un_insert_left Un_insert_right Un_upper1 analz_analz_Un + analz_mono analz_synth_Un insert_absorb) -declare [[ atp_problem_prefix = "Message__Fake_analz_insert_simpler" ]] -(*simpler problems? BUT METIS CAN'T PROVE +(* Simpler problems? BUT METIS CAN'T PROVE THE LAST STEP lemma Fake_analz_insert_simpler: - "X\ synth (analz G) ==> + "X \ synth (analz G) ==> analz (insert X H) \ synth (analz G) \ analz (G \ H)" apply (rule subsetI) apply (subgoal_tac "x \ analz (synth (analz G) \ H) ") diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/Tarski.thy --- a/src/HOL/Metis_Examples/Tarski.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/Tarski.thy Tue May 04 20:30:22 2010 +0200 @@ -514,67 +514,44 @@ "H = {x. (x, f x) \ r & x \ A} ==> lub H cl \ fix f A" apply (simp add: fix_def) apply (rule conjI) -proof (neg_clausify) -assume 0: "H = -Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) (COMBC op \ A))" -assume 1: "lub (Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) - (COMBC op \ A))) - cl -\ A" -have 2: "lub H cl \ A" - by (metis 1 0) -have 3: "(lub H cl, f (lub H cl)) \ r" - by (metis lubH_le_flubH 0) -have 4: "(f (lub H cl), lub H cl) \ r" - by (metis flubH_le_lubH 0) -have 5: "lub H cl = f (lub H cl) \ (lub H cl, f (lub H cl)) \ r" - by (metis antisymE 4) -have 6: "lub H cl = f (lub H cl)" - by (metis 5 3) -have 7: "(lub H cl, lub H cl) \ r" - by (metis 6 4) -have 8: "\X1. lub H cl \ X1 \ \ refl_on X1 r" - by (metis 7 refl_onD2) -have 9: "\ refl_on A r" - by (metis 8 2) -show "False" - by (metis CO_refl_on 9); -next --{*apparently the way to insert a second structured proof*} - show "H = {x. (x, f x) \ r \ x \ A} \ - f (lub {x. (x, f x) \ r \ x \ A} cl) = lub {x. (x, f x) \ r \ x \ A} cl" - proof (neg_clausify) - assume 0: "H = - Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) (COMBC op \ A))" - assume 1: "f (lub (Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) - (COMBC op \ A))) - cl) \ - lub (Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) - (COMBC op \ A))) - cl" - have 2: "f (lub H cl) \ - lub (Collect - (COMBS (COMBB op \ (COMBC (COMBB op \ (COMBS Pair f)) r)) - (COMBC op \ A))) - cl" - by (metis 1 0) - have 3: "f (lub H cl) \ lub H cl" - by (metis 2 0) - have 4: "(lub H cl, f (lub H cl)) \ r" - by (metis lubH_le_flubH 0) - have 5: "(f (lub H cl), lub H cl) \ r" - by (metis flubH_le_lubH 0) - have 6: "lub H cl = f (lub H cl) \ (lub H cl, f (lub H cl)) \ r" - by (metis antisymE 5) - have 7: "lub H cl = f (lub H cl)" - by (metis 6 4) - show "False" - by (metis 3 7) - qed +proof - + assume A1: "H = {x. (x, f x) \ r \ x \ A}" + have F1: "\x\<^isub>2. (\R. R \ x\<^isub>2) = x\<^isub>2" by (metis Collect_def Collect_mem_eq) + have F2: "\x\<^isub>1 x\<^isub>2. (\R. x\<^isub>2 (x\<^isub>1 R)) = x\<^isub>1 -` x\<^isub>2" + by (metis Collect_def vimage_Collect_eq) + have F3: "\x\<^isub>2 x\<^isub>1. (\R. x\<^isub>1 R \ x\<^isub>2) = x\<^isub>1 -` x\<^isub>2" + by (metis Collect_def vimage_def) + have F4: "\x\<^isub>3 x\<^isub>1. (\R. x\<^isub>1 R \ x\<^isub>3 R) = x\<^isub>1 \ x\<^isub>3" + by (metis Collect_def Collect_conj_eq) + have F5: "(\R. (R, f R) \ r \ R \ A) = H" using A1 by (metis Collect_def) + have F6: "\x\<^isub>1\A. glb x\<^isub>1 (dual cl) \ A" by (metis lub_dual_glb lub_in_lattice) + have F7: "\x\<^isub>2 x\<^isub>1. (\R. x\<^isub>1 R \ x\<^isub>2) = (\R. x\<^isub>2 (x\<^isub>1 R))" by (metis F2 F3) + have "(\R. (R, f R) \ r \ A R) = H" by (metis F1 F5) + hence "A \ (\R. r (R, f R)) = H" by (metis F4 F7 Int_commute) + hence "H \ A" by (metis Int_lower1) + hence "H \ A" by metis + hence "glb H (dual cl) \ A" using F6 by metis + hence "glb (\R. (R, f R) \ r \ R \ A) (dual cl) \ A" using F5 by metis + hence "lub (\R. (R, f R) \ r \ R \ A) cl \ A" by (metis lub_dual_glb) + thus "lub {x. (x, f x) \ r \ x \ A} cl \ A" by (metis Collect_def) +next + assume A1: "H = {x. (x, f x) \ r \ x \ A}" + have F1: "\v. (\R. R \ v) = v" by (metis Collect_mem_eq Collect_def) + have F2: "\w u. (\R. u R \ w R) = u \ w" + by (metis Collect_conj_eq Collect_def) + have F3: "\x v. (\R. v R \ x) = v -` x" by (metis vimage_def Collect_def) + hence F4: "A \ (\R. (R, f R)) -` r = H" using A1 by auto + hence F5: "(f (lub H cl), lub H cl) \ r" + by (metis F1 F3 F2 Int_commute flubH_le_lubH Collect_def) + have F6: "(lub H cl, f (lub H cl)) \ r" + by (metis F1 F3 F2 F4 Int_commute lubH_le_flubH Collect_def) + have "(lub H cl, f (lub H cl)) \ r \ f (lub H cl) = lub H cl" + using F5 by (metis antisymE) + hence "f (lub H cl) = lub H cl" using F6 by metis + thus "H = {x. (x, f x) \ r \ x \ A} + \ f (lub {x. (x, f x) \ r \ x \ A} cl) = + lub {x. (x, f x) \ r \ x \ A} cl" + by (metis F4 F2 F3 F1 Collect_def Int_commute) qed lemma (in CLF) (*lubH_is_fixp:*) @@ -744,18 +721,13 @@ "[| a \ A; b \ A; interval r a b \ {} |] ==> (| pset = interval r a b, order = induced (interval r a b) r |) \ PartialOrder" -proof (neg_clausify) -assume 0: "a \ A" -assume 1: "b \ A" -assume 2: "\pset = interval r a b, order = induced (interval r a b) r\ \ PartialOrder" -have 3: "\ interval r a b \ A" - by (metis 2 po_subset_po) -have 4: "b \ A \ a \ A" - by (metis 3 interval_subset) -have 5: "a \ A" - by (metis 4 1) -show "False" - by (metis 5 0) +proof - + assume A1: "a \ A" + assume "b \ A" + hence "\u. u \ A \ interval r u b \ A" by (metis interval_subset) + hence "interval r a b \ A" using A1 by metis + hence "interval r a b \ A" by metis + thus ?thesis by (metis po_subset_po) qed lemma (in CLF) intv_CL_lub: @@ -1096,9 +1068,9 @@ apply (blast intro!: Tarski.tarski_full_lemma [OF Tarski.intro, OF CLF.intro Tarski_axioms.intro, OF CL.intro CLF_axioms.intro, OF PO.intro CL_axioms.intro] cl_po cl_co f_cl) done - declare (in CLF) fixf_po[rule del] P_def [simp del] A_def [simp del] r_def [simp del] + +declare (in CLF) fixf_po [rule del] P_def [simp del] A_def [simp del] r_def [simp del] Tarski.tarski_full_lemma [rule del] cl_po [rule del] cl_co [rule del] CompleteLatticeI_simp [rule del] - end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/TransClosure.thy --- a/src/HOL/Metis_Examples/TransClosure.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/TransClosure.thy Tue May 04 20:30:22 2010 +0200 @@ -17,45 +17,45 @@ | Intg int -- "integer value" | Addr addr -- "addresses of objects in the heap" -consts R::"(addr \ addr) set" - -consts f:: "addr \ val" +consts R :: "(addr \ addr) set" -declare [[ atp_problem_prefix = "TransClosure__test" ]] -lemma "\ f c = Intg x; \ y. f b = Intg y \ y \ x; (a,b) \ R\<^sup>*; (b,c) \ R\<^sup>* \ - \ \ c. (b,c) \ R \ (a,c) \ R\<^sup>*" -by (metis Transitive_Closure.rtrancl_into_rtrancl converse_rtranclE trancl_reflcl) +consts f :: "addr \ val" -lemma "\ f c = Intg x; \ y. f b = Intg y \ y \ x; (a,b) \ R\<^sup>*; (b,c) \ R\<^sup>* \ - \ \ c. (b,c) \ R \ (a,c) \ R\<^sup>*" -proof (neg_clausify) -assume 0: "f c = Intg x" -assume 1: "(a, b) \ R\<^sup>*" -assume 2: "(b, c) \ R\<^sup>*" -assume 3: "f b \ Intg x" -assume 4: "\A. (b, A) \ R \ (a, A) \ R\<^sup>*" -have 5: "b = c \ b \ Domain R" - by (metis Not_Domain_rtrancl 2) -have 6: "\X1. (a, X1) \ R\<^sup>* \ (b, X1) \ R" - by (metis Transitive_Closure.rtrancl_into_rtrancl 1) -have 7: "\X1. (b, X1) \ R" - by (metis 6 4) -have 8: "b \ Domain R" - by (metis 7 DomainE) -have 9: "c = b" - by (metis 5 8) -have 10: "f b = Intg x" - by (metis 0 9) -show "False" - by (metis 10 3) +lemma "\f c = Intg x; \y. f b = Intg y \ y \ x; (a, b) \ R\<^sup>*; (b, c) \ R\<^sup>*\ + \ \c. (b, c) \ R \ (a, c) \ R\<^sup>*" +(* sledgehammer *) +proof - + assume A1: "f c = Intg x" + assume A2: "\y. f b = Intg y \ y \ x" + assume A3: "(a, b) \ R\<^sup>*" + assume A4: "(b, c) \ R\<^sup>*" + have F1: "f c \ f b" using A2 A1 by metis + have F2: "\u. (b, u) \ R \ (a, u) \ R\<^sup>*" using A3 by (metis transitive_closure_trans(6)) + have F3: "\x. (b, x R b c) \ R \ c = b" using A4 by (metis converse_rtranclE) + have "c \ b" using F1 by metis + hence "\u. (b, u) \ R" using F3 by metis + thus "\c. (b, c) \ R \ (a, c) \ R\<^sup>*" using F2 by metis qed -declare [[ atp_problem_prefix = "TransClosure__test_simpler" ]] -lemma "\ f c = Intg x; \ y. f b = Intg y \ y \ x; (a,b) \ R\<^sup>*; (b,c) \ R\<^sup>* \ - \ \ c. (b,c) \ R \ (a,c) \ R\<^sup>*" -apply (erule_tac x="b" in converse_rtranclE) -apply (metis rel_pow_0_E rel_pow_0_I) -apply (metis DomainE Domain_iff Transitive_Closure.rtrancl_into_rtrancl) -done +lemma "\f c = Intg x; \y. f b = Intg y \ y \ x; (a, b) \ R\<^sup>*; (b,c) \ R\<^sup>*\ + \ \c. (b, c) \ R \ (a, c) \ R\<^sup>*" +(* sledgehammer [isar_proof, shrink_factor = 2] *) +proof - + assume A1: "f c = Intg x" + assume A2: "\y. f b = Intg y \ y \ x" + assume A3: "(a, b) \ R\<^sup>*" + assume A4: "(b, c) \ R\<^sup>*" + have "(R\<^sup>*) (a, b)" using A3 by (metis mem_def) + hence F1: "(a, b) \ R\<^sup>*" by (metis mem_def) + have "b \ c" using A1 A2 by metis + hence "\x\<^isub>1. (b, x\<^isub>1) \ R" using A4 by (metis converse_rtranclE) + thus "\c. (b, c) \ R \ (a, c) \ R\<^sup>*" using F1 by (metis transitive_closure_trans(6)) +qed + +lemma "\f c = Intg x; \y. f b = Intg y \ y \ x; (a, b) \ R\<^sup>*; (b, c) \ R\<^sup>*\ + \ \c. (b, c) \ R \ (a, c) \ R\<^sup>*" +apply (erule_tac x = b in converse_rtranclE) + apply metis +by (metis transitive_closure_trans(6)) end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Metis_Examples/set.thy --- a/src/HOL/Metis_Examples/set.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Metis_Examples/set.thy Tue May 04 20:30:22 2010 +0200 @@ -12,20 +12,15 @@ (S(x,y) | ~S(y,z) | Q(Z,Z)) & (Q(X,y) | ~Q(y,Z) | S(X,X))" by metis -(*??But metis can't prove the single-step version...*) - - lemma "P(n::nat) ==> ~P(0) ==> n ~= 0" by metis -sledgehammer_params [modulus = 1] - +sledgehammer_params [shrink_factor = 1] (*multiple versions of this example*) lemma (*equal_union: *) - "(X = Y \ Z) = - (Y \ X \ Z \ X \ (\V. Y \ V \ Z \ V \ X \ V))" + "(X = Y \ Z) = (Y \ X \ Z \ X \ (\V. Y \ V \ Z \ V \ X \ V))" proof (neg_clausify) fix x assume 0: "Y \ X \ X = Y \ Z" @@ -90,7 +85,7 @@ by (metis 31 29) qed -sledgehammer_params [modulus = 2] +sledgehammer_params [shrink_factor = 2] lemma (*equal_union: *) "(X = Y \ Z) = @@ -133,7 +128,7 @@ by (metis 18 17) qed -sledgehammer_params [modulus = 3] +sledgehammer_params [shrink_factor = 3] lemma (*equal_union: *) "(X = Y \ Z) = @@ -168,7 +163,7 @@ (*Example included in TPHOLs paper*) -sledgehammer_params [modulus = 4] +sledgehammer_params [shrink_factor = 4] lemma (*equal_union: *) "(X = Y \ Z) = @@ -269,15 +264,14 @@ "P (f b) \ \s A. (\x \ A. P x) \ f s \ A" "P (f b) \ \s A. (\x \ A. P x) \ f s \ A" "\A. a \ A" - "(\C. (0, 0) \ C \ (\x y. (x, y) \ C \ (Suc x, Suc y) \ C) \ (n, m) \ C) \ Q n \ Q m" -apply (metis atMost_iff) -apply (metis emptyE) -apply (metis insert_iff singletonE) + "(\C. (0, 0) \ C \ (\x y. (x, y) \ C \ (Suc x, Suc y) \ C) \ (n, m) \ C) \ Q n \ Q m" +apply (metis all_not_in_conv)+ +apply (metis mem_def) apply (metis insertCI singletonE zless_le) apply (metis Collect_def Collect_mem_eq) apply (metis Collect_def Collect_mem_eq) apply (metis DiffE) -apply (metis pair_in_Id_conv) +apply (metis pair_in_Id_conv) done end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML --- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer.ML Tue May 04 20:30:22 2010 +0200 @@ -283,10 +283,7 @@ fun default_atp_name () = hd (#atps (Sledgehammer_Isar.default_params thy [])) handle Empty => error "No ATP available." - fun get_prover name = - (case ATP_Manager.get_prover thy name of - SOME prover => (name, prover) - | NONE => error ("Bad ATP: " ^ quote name)) + fun get_prover name = (name, ATP_Manager.get_prover thy name) in (case AList.lookup (op =) args proverK of SOME name => get_prover name @@ -304,11 +301,11 @@ let val {context = ctxt, facts, goal} = Proof.goal st val thy = ProofContext.theory_of ctxt - val change_dir = (fn SOME d => Config.put ATP_Wrapper.destdir d | _ => I) + val change_dir = (fn SOME d => Config.put ATP_Systems.dest_dir d | _ => I) val ctxt' = ctxt |> change_dir dir - |> Config.put ATP_Wrapper.measure_runtime true + |> Config.put ATP_Systems.measure_runtime true val params = Sledgehammer_Isar.default_params thy [] val problem = {subgoal = 1, goal = (ctxt', (facts, goal)), @@ -318,13 +315,14 @@ (case hard_timeout of NONE => I | SOME secs => TimeLimit.timeLimit (Time.fromSeconds secs)) - val ({success, message, relevant_thm_names, + val ({outcome, message, relevant_thm_names, atp_run_time_in_msecs = time_atp, ...}: ATP_Manager.prover_result, time_isa) = time_limit (Mirabelle.cpu_time (prover params (K "") (Time.fromSeconds timeout))) problem in - if success then (message, SH_OK (time_isa, time_atp, relevant_thm_names)) - else (message, SH_FAIL (time_isa, time_atp)) + case outcome of + NONE => (message, SH_OK (time_isa, time_atp, relevant_thm_names)) + | SOME _ => (message, SH_FAIL (time_isa, time_atp)) end handle Sledgehammer_HOL_Clause.TRIVIAL => ("trivial", SH_OK (0, 0, [])) | ERROR msg => ("error: " ^ msg, SH_ERROR) @@ -379,19 +377,22 @@ end +val subgoal_count = Logic.count_prems o prop_of o #goal o Proof.goal + fun run_minimize args named_thms id ({pre=st, log, ...}: Mirabelle.run_args) = let - open ATP_Minimal + open Sledgehammer_Fact_Minimizer open Sledgehammer_Isar val thy = Proof.theory_of st val n0 = length (these (!named_thms)) - val (prover_name, prover) = get_atp thy args + val (prover_name, _) = get_atp thy args val timeout = AList.lookup (op =) args minimize_timeoutK |> Option.map (fst o read_int o explode) |> the_default 5 - val params = default_params thy [("minimize_timeout", Int.toString timeout)] - val minimize = minimize_theorems params prover prover_name 1 + val params = default_params thy + [("atps", prover_name), ("minimize_timeout", Int.toString timeout)] + val minimize = minimize_theorems params 1 (subgoal_count st) val _ = log separator in case minimize st (these (!named_thms)) of @@ -475,7 +476,7 @@ fun invoke args = let - val _ = ATP_Manager.full_types := AList.defined (op =) args full_typesK + val _ = Sledgehammer_Isar.full_types := AList.defined (op =) args full_typesK in Mirabelle.register (init, sledgehammer_action args, done) end end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Brouwer_Fixpoint.thy --- a/src/HOL/Multivariate_Analysis/Brouwer_Fixpoint.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Brouwer_Fixpoint.thy Tue May 04 20:30:22 2010 +0200 @@ -22,8 +22,6 @@ imports Convex_Euclidean_Space begin -declare norm_scaleR[simp] - lemma brouwer_compactness_lemma: assumes "compact s" "continuous_on s f" "\ (\x\s. (f x = (0::real^'n)))" obtains d where "0 < d" "\x\s. d \ norm(f x)" proof(cases "s={}") case False @@ -131,7 +129,7 @@ lemma image_lemma_2: assumes "finite s" "finite t" "card s = card t" "f ` s \ t" "f ` s \ t" "b \ t" shows "(card {s'. \a\s. (s' = s - {a}) \ f ` s' = t - {b}} = 0) \ (card {s'. \a\s. (s' = s - {a}) \ f ` s' = t - {b}} = 2)" proof(cases "{a\s. f ` (s - {a}) = t - {b}} = {}") - case True thus ?thesis apply-apply(rule disjI1, rule image_lemma_0) using assms(1) by(auto simp add:card_0_eq) + case True thus ?thesis apply-apply(rule disjI1, rule image_lemma_0) using assms(1) by auto next let ?M = "{a\s. f ` (s - {a}) = t - {b}}" case False then obtain a where "a\?M" by auto hence a:"a\s" "f ` (s - {a}) = t - {b}" by auto have "f a \ t - {b}" using a and assms by auto @@ -1192,7 +1190,7 @@ have d':"d / real n / 8 > 0" apply(rule divide_pos_pos)+ using d(1) unfolding n_def by auto have *:"uniformly_continuous_on {0..1} f" by(rule compact_uniformly_continuous[OF assms(1) compact_interval]) guess e using *[unfolded uniformly_continuous_on_def,rule_format,OF d'] apply-apply(erule exE,(erule conjE)+) . - note e=this[rule_format,unfolded vector_dist_norm] + note e=this[rule_format,unfolded dist_norm] show ?thesis apply(rule_tac x="min (e/2) (d/real n/8)" in exI) apply(rule) defer apply(rule,rule,rule,rule,rule) apply(erule conjE)+ proof- show "0 < min (e / 2) (d / real n / 8)" using d' e by auto @@ -1204,7 +1202,7 @@ show "\f x $ i - x $ i\ \ norm (f y -f x) + norm (y - x)" apply(rule lem1[rule_format]) using as by auto show "\f x $ i - f z $ i\ \ norm (f x - f z)" "\x $ i - z $ i\ \ norm (x - z)" unfolding vector_minus_component[THEN sym] by(rule component_le_norm)+ - have tria:"norm (y - x) \ norm (y - z) + norm (x - z)" using dist_triangle[of y x z,unfolded vector_dist_norm] + have tria:"norm (y - x) \ norm (y - z) + norm (x - z)" using dist_triangle[of y x z,unfolded dist_norm] unfolding norm_minus_commute by auto also have "\ < e / 2 + e / 2" apply(rule add_strict_mono) using as(4,5) by auto finally show "norm (f y - f x) < d / real n / 8" apply- apply(rule e(2)) using as by auto @@ -1357,14 +1355,14 @@ assumes "compact s" "convex s" "s \ {}" "continuous_on s f" "f ` s \ s" obtains x where "x \ s" "f x = x" proof- have "\e>0. s \ cball 0 e" using compact_imp_bounded[OF assms(1)] unfolding bounded_pos - apply(erule_tac exE,rule_tac x=b in exI) by(auto simp add: vector_dist_norm) + apply(erule_tac exE,rule_tac x=b in exI) by(auto simp add: dist_norm) then guess e apply-apply(erule exE,(erule conjE)+) . note e=this have "\x\ cball 0 e. (f \ closest_point s) x = x" apply(rule_tac brouwer_ball[OF e(1), of 0 "f \ closest_point s"]) apply(rule continuous_on_compose ) apply(rule continuous_on_closest_point[OF assms(2) compact_imp_closed[OF assms(1)] assms(3)]) apply(rule continuous_on_subset[OF assms(4)]) using closest_point_in_set[OF compact_imp_closed[OF assms(1)] assms(3)] apply - defer - using assms(5)[unfolded subset_eq] using e(2)[unfolded subset_eq mem_cball] by(auto simp add:vector_dist_norm) + using assms(5)[unfolded subset_eq] using e(2)[unfolded subset_eq mem_cball] by(auto simp add: dist_norm) then guess x .. note x=this have *:"closest_point s x = x" apply(rule closest_point_self) apply(rule assms(5)[unfolded subset_eq,THEN bspec[where x="x"],unfolded image_iff]) @@ -1382,8 +1380,8 @@ apply(rule,rule,erule conjE) apply(rule brouwer_ball[OF assms]) apply assumption+ apply(rule_tac x=x in bexI) apply assumption+ apply(rule continuous_on_intros)+ unfolding frontier_cball subset_eq Ball_def image_iff apply(rule,rule,erule bexE) - unfolding vector_dist_norm apply(simp add: * norm_minus_commute) . note x = this - hence "scaleR 2 a = scaleR 1 x + scaleR 1 x" by(auto simp add:group_simps) + unfolding dist_norm apply(simp add: * norm_minus_commute) . note x = this + hence "scaleR 2 a = scaleR 1 x + scaleR 1 x" by(auto simp add:algebra_simps) hence "a = x" unfolding scaleR_left_distrib[THEN sym] by auto thus False using x using assms by auto qed @@ -1396,7 +1394,7 @@ "interval_bij (a,b) (u,v) = (\x. (\ i. (v$i - u$i) / (b$i - a$i) * x$i) + (\ i. u$i - (v$i - u$i) / (b$i - a$i) * a$i))" apply rule unfolding Cart_eq interval_bij_def vector_component_simps - by(auto simp add:group_simps field_simps add_divide_distrib[THEN sym]) + by(auto simp add: field_simps add_divide_distrib[THEN sym]) lemma continuous_interval_bij: "continuous (at x) (interval_bij (a,b::real^'n) (u,v))" @@ -1432,550 +1430,4 @@ unfolding interval_bij_def split_conv Cart_eq Cart_lambda_beta apply(rule,insert assms,erule_tac x=i in allE) by auto -subsection {*Fashoda meet theorem. *} - -lemma infnorm_2: "infnorm (x::real^2) = max (abs(x$1)) (abs(x$2))" - unfolding infnorm_def UNIV_2 apply(rule Sup_eq) by auto - -lemma infnorm_eq_1_2: "infnorm (x::real^2) = 1 \ - (abs(x$1) \ 1 \ abs(x$2) \ 1 \ (x$1 = -1 \ x$1 = 1 \ x$2 = -1 \ x$2 = 1))" - unfolding infnorm_2 by auto - -lemma infnorm_eq_1_imp: assumes "infnorm (x::real^2) = 1" shows "abs(x$1) \ 1" "abs(x$2) \ 1" - using assms unfolding infnorm_eq_1_2 by auto - -lemma fashoda_unit: fixes f g::"real^1 \ real^2" - assumes "f ` {- 1..1} \ {- 1..1}" "g ` {- 1..1} \ {- 1..1}" - "continuous_on {- 1..1} f" "continuous_on {- 1..1} g" - "f (- 1)$1 = - 1" "f 1$1 = 1" "g (- 1) $2 = -1" "g 1 $2 = 1" - shows "\s\{- 1..1}. \t\{- 1..1}. f s = g t" proof(rule ccontr) - case goal1 note as = this[unfolded bex_simps,rule_format] - def sqprojection \ "\z::real^2. (inverse (infnorm z)) *\<^sub>R z" - def negatex \ "\x::real^2. (vector [-(x$1), x$2])::real^2" - have lem1:"\z::real^2. infnorm(negatex z) = infnorm z" - unfolding negatex_def infnorm_2 vector_2 by auto - have lem2:"\z. z\0 \ infnorm(sqprojection z) = 1" unfolding sqprojection_def - unfolding infnorm_mul[unfolded smult_conv_scaleR] unfolding abs_inverse real_abs_infnorm - unfolding infnorm_eq_0[THEN sym] by auto - let ?F = "(\w::real^2. (f \ vec1 \ (\x. x$1)) w - (g \ vec1 \ (\x. x$2)) w)" - have *:"\i. vec1 ` (\x::real^2. x $ i) ` {- 1..1} = {- 1..1::real^1}" - apply(rule set_ext) unfolding image_iff Bex_def mem_interval apply rule defer - apply(rule_tac x="dest_vec1 x" in exI) apply rule apply(rule_tac x="vec (dest_vec1 x)" in exI) by auto - { fix x assume "x \ (\w. (f \ vec1 \ (\x. x $ 1)) w - (g \ vec1 \ (\x. x $ 2)) w) ` {- 1..1::real^2}" - then guess w unfolding image_iff .. note w = this - hence "x \ 0" using as[of "vec1 (w$1)" "vec1 (w$2)"] unfolding mem_interval by auto} note x0=this - have 21:"\i::2. i\1 \ i=2" using UNIV_2 by auto - have 1:"{- 1<..<1::real^2} \ {}" unfolding interval_eq_empty by auto - have 2:"continuous_on {- 1..1} (negatex \ sqprojection \ ?F)" apply(rule continuous_on_intros continuous_on_component continuous_on_vec1)+ - prefer 2 apply(rule continuous_on_intros continuous_on_component continuous_on_vec1)+ unfolding * - apply(rule assms)+ apply(rule continuous_on_compose,subst sqprojection_def) - apply(rule continuous_on_mul ) apply(rule continuous_at_imp_continuous_on,rule) apply(rule continuous_at_inv[unfolded o_def]) - apply(rule continuous_at_infnorm) unfolding infnorm_eq_0 defer apply(rule continuous_on_id) apply(rule linear_continuous_on) proof- - show "bounded_linear negatex" apply(rule bounded_linearI') unfolding Cart_eq proof(rule_tac[!] allI) fix i::2 and x y::"real^2" and c::real - show "negatex (x + y) $ i = (negatex x + negatex y) $ i" "negatex (c *s x) $ i = (c *s negatex x) $ i" - apply-apply(case_tac[!] "i\1") prefer 3 apply(drule_tac[1-2] 21) - unfolding negatex_def by(auto simp add:vector_2 ) qed qed(insert x0, auto) - have 3:"(negatex \ sqprojection \ ?F) ` {- 1..1} \ {- 1..1}" unfolding subset_eq apply rule proof- - case goal1 then guess y unfolding image_iff .. note y=this have "?F y \ 0" apply(rule x0) using y(1) by auto - hence *:"infnorm (sqprojection (?F y)) = 1" unfolding y o_def apply- by(rule lem2[rule_format]) - have "infnorm x = 1" unfolding *[THEN sym] y o_def by(rule lem1[rule_format]) - thus "x\{- 1..1}" unfolding mem_interval infnorm_2 apply- apply rule - proof-case goal1 thus ?case apply(cases "i=1") defer apply(drule 21) by auto qed qed - guess x apply(rule brouwer_weak[of "{- 1..1::real^2}" "negatex \ sqprojection \ ?F"]) - apply(rule compact_interval convex_interval)+ unfolding interior_closed_interval - apply(rule 1 2 3)+ . note x=this - have "?F x \ 0" apply(rule x0) using x(1) by auto - hence *:"infnorm (sqprojection (?F x)) = 1" unfolding o_def by(rule lem2[rule_format]) - have nx:"infnorm x = 1" apply(subst x(2)[THEN sym]) unfolding *[THEN sym] o_def by(rule lem1[rule_format]) - have "\x i. x \ 0 \ (0 < (sqprojection x)$i \ 0 < x$i)" "\x i. x \ 0 \ ((sqprojection x)$i < 0 \ x$i < 0)" - apply- apply(rule_tac[!] allI impI)+ proof- fix x::"real^2" and i::2 assume x:"x\0" - have "inverse (infnorm x) > 0" using x[unfolded infnorm_pos_lt[THEN sym]] by auto - thus "(0 < sqprojection x $ i) = (0 < x $ i)" "(sqprojection x $ i < 0) = (x $ i < 0)" - unfolding sqprojection_def vector_component_simps Cart_nth.scaleR real_scaleR_def - unfolding zero_less_mult_iff mult_less_0_iff by(auto simp add:field_simps) qed - note lem3 = this[rule_format] - have x1:"vec1 (x $ 1) \ {- 1..1::real^1}" "vec1 (x $ 2) \ {- 1..1::real^1}" using x(1) unfolding mem_interval by auto - hence nz:"f (vec1 (x $ 1)) - g (vec1 (x $ 2)) \ 0" unfolding right_minus_eq apply-apply(rule as) by auto - have "x $ 1 = -1 \ x $ 1 = 1 \ x $ 2 = -1 \ x $ 2 = 1" using nx unfolding infnorm_eq_1_2 by auto - thus False proof- fix P Q R S - presume "P \ Q \ R \ S" "P\False" "Q\False" "R\False" "S\False" thus False by auto - next assume as:"x$1 = 1" hence "vec1 (x$1) = 1" unfolding Cart_eq by auto - hence *:"f (vec1 (x $ 1)) $ 1 = 1" using assms(6) by auto - have "sqprojection (f (vec1 (x$1)) - g (vec1 (x$2))) $ 1 < 0" - using x(2)[unfolded o_def Cart_eq,THEN spec[where x=1]] - unfolding as negatex_def vector_2 by auto moreover - from x1 have "g (vec1 (x $ 2)) \ {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto - ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval - apply(erule_tac x=1 in allE) by auto - next assume as:"x$1 = -1" hence "vec1 (x$1) = - 1" unfolding Cart_eq by auto - hence *:"f (vec1 (x $ 1)) $ 1 = - 1" using assms(5) by auto - have "sqprojection (f (vec1 (x$1)) - g (vec1 (x$2))) $ 1 > 0" - using x(2)[unfolded o_def Cart_eq,THEN spec[where x=1]] - unfolding as negatex_def vector_2 by auto moreover - from x1 have "g (vec1 (x $ 2)) \ {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto - ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval - apply(erule_tac x=1 in allE) by auto - next assume as:"x$2 = 1" hence "vec1 (x$2) = 1" unfolding Cart_eq by auto - hence *:"g (vec1 (x $ 2)) $ 2 = 1" using assms(8) by auto - have "sqprojection (f (vec1 (x$1)) - g (vec1 (x$2))) $ 2 > 0" - using x(2)[unfolded o_def Cart_eq,THEN spec[where x=2]] - unfolding as negatex_def vector_2 by auto moreover - from x1 have "f (vec1 (x $ 1)) \ {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto - ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval - apply(erule_tac x=2 in allE) by auto - next assume as:"x$2 = -1" hence "vec1 (x$2) = - 1" unfolding Cart_eq by auto - hence *:"g (vec1 (x $ 2)) $ 2 = - 1" using assms(7) by auto - have "sqprojection (f (vec1 (x$1)) - g (vec1 (x$2))) $ 2 < 0" - using x(2)[unfolded o_def Cart_eq,THEN spec[where x=2]] - unfolding as negatex_def vector_2 by auto moreover - from x1 have "f (vec1 (x $ 1)) \ {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto - ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval - apply(erule_tac x=2 in allE) by auto qed(auto) qed - -lemma fashoda_unit_path: fixes f ::"real^1 \ real^2" and g ::"real^1 \ real^2" - assumes "path f" "path g" "path_image f \ {- 1..1}" "path_image g \ {- 1..1}" - "(pathstart f)$1 = -1" "(pathfinish f)$1 = 1" "(pathstart g)$2 = -1" "(pathfinish g)$2 = 1" - obtains z where "z \ path_image f" "z \ path_image g" proof- - note assms=assms[unfolded path_def pathstart_def pathfinish_def path_image_def] - def iscale \ "\z::real^1. inverse 2 *\<^sub>R (z + 1)" - have isc:"iscale ` {- 1..1} \ {0..1}" unfolding iscale_def by(auto) - have "\s\{- 1..1}. \t\{- 1..1}. (f \ iscale) s = (g \ iscale) t" proof(rule fashoda_unit) - show "(f \ iscale) ` {- 1..1} \ {- 1..1}" "(g \ iscale) ` {- 1..1} \ {- 1..1}" - using isc and assms(3-4) unfolding image_compose by auto - have *:"continuous_on {- 1..1} iscale" unfolding iscale_def by(rule continuous_on_intros)+ - show "continuous_on {- 1..1} (f \ iscale)" "continuous_on {- 1..1} (g \ iscale)" - apply-apply(rule_tac[!] continuous_on_compose[OF *]) apply(rule_tac[!] continuous_on_subset[OF _ isc]) - by(rule assms)+ have *:"(1 / 2) *\<^sub>R (1 + (1::real^1)) = 1" unfolding Cart_eq by auto - show "(f \ iscale) (- 1) $ 1 = - 1" "(f \ iscale) 1 $ 1 = 1" "(g \ iscale) (- 1) $ 2 = -1" "(g \ iscale) 1 $ 2 = 1" - unfolding o_def iscale_def using assms by(auto simp add:*) qed - then guess s .. from this(2) guess t .. note st=this - show thesis apply(rule_tac z="f (iscale s)" in that) - using st `s\{- 1..1}` unfolding o_def path_image_def image_iff apply- - apply(rule_tac x="iscale s" in bexI) prefer 3 apply(rule_tac x="iscale t" in bexI) - using isc[unfolded subset_eq, rule_format] by auto qed - -lemma fashoda: fixes b::"real^2" - assumes "path f" "path g" "path_image f \ {a..b}" "path_image g \ {a..b}" - "(pathstart f)$1 = a$1" "(pathfinish f)$1 = b$1" - "(pathstart g)$2 = a$2" "(pathfinish g)$2 = b$2" - obtains z where "z \ path_image f" "z \ path_image g" proof- - fix P Q S presume "P \ Q \ S" "P \ thesis" "Q \ thesis" "S \ thesis" thus thesis by auto -next have "{a..b} \ {}" using assms(3) using path_image_nonempty by auto - hence "a \ b" unfolding interval_eq_empty vector_le_def by(auto simp add: not_less) - thus "a$1 = b$1 \ a$2 = b$2 \ (a$1 < b$1 \ a$2 < b$2)" unfolding vector_le_def forall_2 by auto -next assume as:"a$1 = b$1" have "\z\path_image g. z$2 = (pathstart f)$2" apply(rule connected_ivt_component) - apply(rule connected_path_image assms)+apply(rule pathstart_in_path_image,rule pathfinish_in_path_image) - unfolding assms using assms(3)[unfolded path_image_def subset_eq,rule_format,of "f 0"] - unfolding pathstart_def by(auto simp add: vector_le_def) then guess z .. note z=this - have "z \ {a..b}" using z(1) assms(4) unfolding path_image_def by blast - hence "z = f 0" unfolding Cart_eq forall_2 unfolding z(2) pathstart_def - using assms(3)[unfolded path_image_def subset_eq mem_interval,rule_format,of "f 0" 1] - unfolding mem_interval apply(erule_tac x=1 in allE) using as by auto - thus thesis apply-apply(rule that[OF _ z(1)]) unfolding path_image_def by auto -next assume as:"a$2 = b$2" have "\z\path_image f. z$1 = (pathstart g)$1" apply(rule connected_ivt_component) - apply(rule connected_path_image assms)+apply(rule pathstart_in_path_image,rule pathfinish_in_path_image) - unfolding assms using assms(4)[unfolded path_image_def subset_eq,rule_format,of "g 0"] - unfolding pathstart_def by(auto simp add: vector_le_def) then guess z .. note z=this - have "z \ {a..b}" using z(1) assms(3) unfolding path_image_def by blast - hence "z = g 0" unfolding Cart_eq forall_2 unfolding z(2) pathstart_def - using assms(4)[unfolded path_image_def subset_eq mem_interval,rule_format,of "g 0" 2] - unfolding mem_interval apply(erule_tac x=2 in allE) using as by auto - thus thesis apply-apply(rule that[OF z(1)]) unfolding path_image_def by auto -next assume as:"a $ 1 < b $ 1 \ a $ 2 < b $ 2" - have int_nem:"{- 1..1::real^2} \ {}" unfolding interval_eq_empty by auto - guess z apply(rule fashoda_unit_path[of "interval_bij (a,b) (- 1,1) \ f" "interval_bij (a,b) (- 1,1) \ g"]) - unfolding path_def path_image_def pathstart_def pathfinish_def - apply(rule_tac[1-2] continuous_on_compose) apply(rule assms[unfolded path_def] continuous_on_interval_bij)+ - unfolding subset_eq apply(rule_tac[1-2] ballI) - proof- fix x assume "x \ (interval_bij (a, b) (- 1, 1) \ f) ` {0..1}" - then guess y unfolding image_iff .. note y=this - show "x \ {- 1..1}" unfolding y o_def apply(rule in_interval_interval_bij) - using y(1) using assms(3)[unfolded path_image_def subset_eq] int_nem by auto - next fix x assume "x \ (interval_bij (a, b) (- 1, 1) \ g) ` {0..1}" - then guess y unfolding image_iff .. note y=this - show "x \ {- 1..1}" unfolding y o_def apply(rule in_interval_interval_bij) - using y(1) using assms(4)[unfolded path_image_def subset_eq] int_nem by auto - next show "(interval_bij (a, b) (- 1, 1) \ f) 0 $ 1 = -1" - "(interval_bij (a, b) (- 1, 1) \ f) 1 $ 1 = 1" - "(interval_bij (a, b) (- 1, 1) \ g) 0 $ 2 = -1" - "(interval_bij (a, b) (- 1, 1) \ g) 1 $ 2 = 1" unfolding interval_bij_def Cart_lambda_beta vector_component_simps o_def split_conv - unfolding assms[unfolded pathstart_def pathfinish_def] using as by auto qed note z=this - from z(1) guess zf unfolding image_iff .. note zf=this - from z(2) guess zg unfolding image_iff .. note zg=this - have *:"\i. (- 1) $ i < (1::real^2) $ i \ a $ i < b $ i" unfolding forall_2 using as by auto - show thesis apply(rule_tac z="interval_bij (- 1,1) (a,b) z" in that) - apply(subst zf) defer apply(subst zg) unfolding o_def interval_bij_bij[OF *] path_image_def - using zf(1) zg(1) by auto qed - -subsection {*Some slightly ad hoc lemmas I use below*} - -lemma segment_vertical: fixes a::"real^2" assumes "a$1 = b$1" - shows "x \ closed_segment a b \ (x$1 = a$1 \ x$1 = b$1 \ - (a$2 \ x$2 \ x$2 \ b$2 \ b$2 \ x$2 \ x$2 \ a$2))" (is "_ = ?R") -proof- - let ?L = "\u. (x $ 1 = (1 - u) * a $ 1 + u * b $ 1 \ x $ 2 = (1 - u) * a $ 2 + u * b $ 2) \ 0 \ u \ u \ 1" - { presume "?L \ ?R" "?R \ ?L" thus ?thesis unfolding closed_segment_def mem_Collect_eq - unfolding Cart_eq forall_2 smult_conv_scaleR[THEN sym] vector_component_simps by blast } - { assume ?L then guess u apply-apply(erule exE)apply(erule conjE)+ . note u=this - { fix b a assume "b + u * a > a + u * b" - hence "(1 - u) * b > (1 - u) * a" by(auto simp add:field_simps) - hence "b \ a" apply(drule_tac mult_less_imp_less_left) using u by auto - hence "u * a \ u * b" apply-apply(rule mult_left_mono[OF _ u(3)]) - using u(3-4) by(auto simp add:field_simps) } note * = this - { fix a b assume "u * b > u * a" hence "(1 - u) * a \ (1 - u) * b" apply-apply(rule mult_left_mono) - apply(drule mult_less_imp_less_left) using u by auto - hence "a + u * b \ b + u * a" by(auto simp add:field_simps) } note ** = this - thus ?R unfolding u assms using u by(auto simp add:field_simps not_le intro:* **) } - { assume ?R thus ?L proof(cases "x$2 = b$2") - case True thus ?L apply(rule_tac x="(x$2 - a$2) / (b$2 - a$2)" in exI) unfolding assms True - using `?R` by(auto simp add:field_simps) - next case False thus ?L apply(rule_tac x="1 - (x$2 - b$2) / (a$2 - b$2)" in exI) unfolding assms using `?R` - by(auto simp add:field_simps) - qed } qed - -lemma segment_horizontal: fixes a::"real^2" assumes "a$2 = b$2" - shows "x \ closed_segment a b \ (x$2 = a$2 \ x$2 = b$2 \ - (a$1 \ x$1 \ x$1 \ b$1 \ b$1 \ x$1 \ x$1 \ a$1))" (is "_ = ?R") -proof- - let ?L = "\u. (x $ 1 = (1 - u) * a $ 1 + u * b $ 1 \ x $ 2 = (1 - u) * a $ 2 + u * b $ 2) \ 0 \ u \ u \ 1" - { presume "?L \ ?R" "?R \ ?L" thus ?thesis unfolding closed_segment_def mem_Collect_eq - unfolding Cart_eq forall_2 smult_conv_scaleR[THEN sym] vector_component_simps by blast } - { assume ?L then guess u apply-apply(erule exE)apply(erule conjE)+ . note u=this - { fix b a assume "b + u * a > a + u * b" - hence "(1 - u) * b > (1 - u) * a" by(auto simp add:field_simps) - hence "b \ a" apply(drule_tac mult_less_imp_less_left) using u by auto - hence "u * a \ u * b" apply-apply(rule mult_left_mono[OF _ u(3)]) - using u(3-4) by(auto simp add:field_simps) } note * = this - { fix a b assume "u * b > u * a" hence "(1 - u) * a \ (1 - u) * b" apply-apply(rule mult_left_mono) - apply(drule mult_less_imp_less_left) using u by auto - hence "a + u * b \ b + u * a" by(auto simp add:field_simps) } note ** = this - thus ?R unfolding u assms using u by(auto simp add:field_simps not_le intro:* **) } - { assume ?R thus ?L proof(cases "x$1 = b$1") - case True thus ?L apply(rule_tac x="(x$1 - a$1) / (b$1 - a$1)" in exI) unfolding assms True - using `?R` by(auto simp add:field_simps) - next case False thus ?L apply(rule_tac x="1 - (x$1 - b$1) / (a$1 - b$1)" in exI) unfolding assms using `?R` - by(auto simp add:field_simps) - qed } qed - -subsection {*useful Fashoda corollary pointed out to me by Tom Hales. *} - -lemma fashoda_interlace: fixes a::"real^2" - assumes "path f" "path g" - "path_image f \ {a..b}" "path_image g \ {a..b}" - "(pathstart f)$2 = a$2" "(pathfinish f)$2 = a$2" - "(pathstart g)$2 = a$2" "(pathfinish g)$2 = a$2" - "(pathstart f)$1 < (pathstart g)$1" "(pathstart g)$1 < (pathfinish f)$1" - "(pathfinish f)$1 < (pathfinish g)$1" - obtains z where "z \ path_image f" "z \ path_image g" -proof- - have "{a..b} \ {}" using path_image_nonempty using assms(3) by auto - note ab=this[unfolded interval_eq_empty not_ex forall_2 not_less] - have "pathstart f \ {a..b}" "pathfinish f \ {a..b}" "pathstart g \ {a..b}" "pathfinish g \ {a..b}" - using pathstart_in_path_image pathfinish_in_path_image using assms(3-4) by auto - note startfin = this[unfolded mem_interval forall_2] - let ?P1 = "linepath (vector[a$1 - 2, a$2 - 2]) (vector[(pathstart f)$1,a$2 - 2]) +++ - linepath(vector[(pathstart f)$1,a$2 - 2])(pathstart f) +++ f +++ - linepath(pathfinish f)(vector[(pathfinish f)$1,a$2 - 2]) +++ - linepath(vector[(pathfinish f)$1,a$2 - 2])(vector[b$1 + 2,a$2 - 2])" - let ?P2 = "linepath(vector[(pathstart g)$1, (pathstart g)$2 - 3])(pathstart g) +++ g +++ - linepath(pathfinish g)(vector[(pathfinish g)$1,a$2 - 1]) +++ - linepath(vector[(pathfinish g)$1,a$2 - 1])(vector[b$1 + 1,a$2 - 1]) +++ - linepath(vector[b$1 + 1,a$2 - 1])(vector[b$1 + 1,b$2 + 3])" - let ?a = "vector[a$1 - 2, a$2 - 3]" - let ?b = "vector[b$1 + 2, b$2 + 3]" - have P1P2:"path_image ?P1 = path_image (linepath (vector[a$1 - 2, a$2 - 2]) (vector[(pathstart f)$1,a$2 - 2])) \ - path_image (linepath(vector[(pathstart f)$1,a$2 - 2])(pathstart f)) \ path_image f \ - path_image (linepath(pathfinish f)(vector[(pathfinish f)$1,a$2 - 2])) \ - path_image (linepath(vector[(pathfinish f)$1,a$2 - 2])(vector[b$1 + 2,a$2 - 2]))" - "path_image ?P2 = path_image(linepath(vector[(pathstart g)$1, (pathstart g)$2 - 3])(pathstart g)) \ path_image g \ - path_image(linepath(pathfinish g)(vector[(pathfinish g)$1,a$2 - 1])) \ - path_image(linepath(vector[(pathfinish g)$1,a$2 - 1])(vector[b$1 + 1,a$2 - 1])) \ - path_image(linepath(vector[b$1 + 1,a$2 - 1])(vector[b$1 + 1,b$2 + 3]))" using assms(1-2) - by(auto simp add: pathstart_join pathfinish_join path_image_join path_image_linepath path_join path_linepath) - have abab: "{a..b} \ {?a..?b}" by(auto simp add:vector_le_def forall_2 vector_2) - guess z apply(rule fashoda[of ?P1 ?P2 ?a ?b]) - unfolding pathstart_join pathfinish_join pathstart_linepath pathfinish_linepath vector_2 proof- - show "path ?P1" "path ?P2" using assms by(auto simp add: pathstart_join pathfinish_join path_join) - have "path_image ?P1 \ {?a .. ?b}" unfolding P1P2 path_image_linepath apply(rule Un_least)+ defer 3 - apply(rule_tac[1-4] convex_interval(1)[unfolded convex_contains_segment,rule_format]) - unfolding mem_interval forall_2 vector_2 using ab startfin abab assms(3) - using assms(9-) unfolding assms by(auto simp add:field_simps) - thus "path_image ?P1 \ {?a .. ?b}" . - have "path_image ?P2 \ {?a .. ?b}" unfolding P1P2 path_image_linepath apply(rule Un_least)+ defer 2 - apply(rule_tac[1-4] convex_interval(1)[unfolded convex_contains_segment,rule_format]) - unfolding mem_interval forall_2 vector_2 using ab startfin abab assms(4) - using assms(9-) unfolding assms by(auto simp add:field_simps) - thus "path_image ?P2 \ {?a .. ?b}" . - show "a $ 1 - 2 = a $ 1 - 2" "b $ 1 + 2 = b $ 1 + 2" "pathstart g $ 2 - 3 = a $ 2 - 3" "b $ 2 + 3 = b $ 2 + 3" - by(auto simp add: assms) - qed note z=this[unfolded P1P2 path_image_linepath] - show thesis apply(rule that[of z]) proof- - have "(z \ closed_segment (vector [a $ 1 - 2, a $ 2 - 2]) (vector [pathstart f $ 1, a $ 2 - 2]) \ - z \ closed_segment (vector [pathstart f $ 1, a $ 2 - 2]) (pathstart f)) \ - z \ closed_segment (pathfinish f) (vector [pathfinish f $ 1, a $ 2 - 2]) \ - z \ closed_segment (vector [pathfinish f $ 1, a $ 2 - 2]) (vector [b $ 1 + 2, a $ 2 - 2]) \ - (((z \ closed_segment (vector [pathstart g $ 1, pathstart g $ 2 - 3]) (pathstart g)) \ - z \ closed_segment (pathfinish g) (vector [pathfinish g $ 1, a $ 2 - 1])) \ - z \ closed_segment (vector [pathfinish g $ 1, a $ 2 - 1]) (vector [b $ 1 + 1, a $ 2 - 1])) \ - z \ closed_segment (vector [b $ 1 + 1, a $ 2 - 1]) (vector [b $ 1 + 1, b $ 2 + 3]) \ False" - apply(simp only: segment_vertical segment_horizontal vector_2) proof- case goal1 note as=this - have "pathfinish f \ {a..b}" using assms(3) pathfinish_in_path_image[of f] by auto - hence "1 + b $ 1 \ pathfinish f $ 1 \ False" unfolding mem_interval forall_2 by auto - hence "z$1 \ pathfinish f$1" using as(2) using assms ab by(auto simp add:field_simps) - moreover have "pathstart f \ {a..b}" using assms(3) pathstart_in_path_image[of f] by auto - hence "1 + b $ 1 \ pathstart f $ 1 \ False" unfolding mem_interval forall_2 by auto - hence "z$1 \ pathstart f$1" using as(2) using assms ab by(auto simp add:field_simps) - ultimately have *:"z$2 = a$2 - 2" using goal1(1) by auto - have "z$1 \ pathfinish g$1" using as(2) using assms ab by(auto simp add:field_simps *) - moreover have "pathstart g \ {a..b}" using assms(4) pathstart_in_path_image[of g] by auto - note this[unfolded mem_interval forall_2] - hence "z$1 \ pathstart g$1" using as(1) using assms ab by(auto simp add:field_simps *) - ultimately have "a $ 2 - 1 \ z $ 2 \ z $ 2 \ b $ 2 + 3 \ b $ 2 + 3 \ z $ 2 \ z $ 2 \ a $ 2 - 1" - using as(2) unfolding * assms by(auto simp add:field_simps) - thus False unfolding * using ab by auto - qed hence "z \ path_image f \ z \ path_image g" using z unfolding Un_iff by blast - hence z':"z\{a..b}" using assms(3-4) by auto - have "a $ 2 = z $ 2 \ (z $ 1 = pathstart f $ 1 \ z $ 1 = pathfinish f $ 1) \ (z = pathstart f \ z = pathfinish f)" - unfolding Cart_eq forall_2 assms by auto - with z' show "z\path_image f" using z(1) unfolding Un_iff mem_interval forall_2 apply- - apply(simp only: segment_vertical segment_horizontal vector_2) unfolding assms by auto - have "a $ 2 = z $ 2 \ (z $ 1 = pathstart g $ 1 \ z $ 1 = pathfinish g $ 1) \ (z = pathstart g \ z = pathfinish g)" - unfolding Cart_eq forall_2 assms by auto - with z' show "z\path_image g" using z(2) unfolding Un_iff mem_interval forall_2 apply- - apply(simp only: segment_vertical segment_horizontal vector_2) unfolding assms by auto - qed qed - -(** The Following still needs to be translated. Maybe I will do that later. - -(* ------------------------------------------------------------------------- *) -(* Complement in dimension N >= 2 of set homeomorphic to any interval in *) -(* any dimension is (path-)connected. This naively generalizes the argument *) -(* in Ryuji Maehara's paper "The Jordan curve theorem via the Brouwer *) -(* fixed point theorem", American Mathematical Monthly 1984. *) -(* ------------------------------------------------------------------------- *) - -let RETRACTION_INJECTIVE_IMAGE_INTERVAL = prove - (`!p:real^M->real^N a b. - ~(interval[a,b] = {}) /\ - p continuous_on interval[a,b] /\ - (!x y. x IN interval[a,b] /\ y IN interval[a,b] /\ p x = p y ==> x = y) - ==> ?f. f continuous_on (:real^N) /\ - IMAGE f (:real^N) SUBSET (IMAGE p (interval[a,b])) /\ - (!x. x IN (IMAGE p (interval[a,b])) ==> f x = x)`, - REPEAT STRIP_TAC THEN - FIRST_ASSUM(MP_TAC o GEN_REWRITE_RULE I [INJECTIVE_ON_LEFT_INVERSE]) THEN - DISCH_THEN(X_CHOOSE_TAC `q:real^N->real^M`) THEN - SUBGOAL_THEN `(q:real^N->real^M) continuous_on - (IMAGE p (interval[a:real^M,b]))` - ASSUME_TAC THENL - [MATCH_MP_TAC CONTINUOUS_ON_INVERSE THEN ASM_REWRITE_TAC[COMPACT_INTERVAL]; - ALL_TAC] THEN - MP_TAC(ISPECL [`q:real^N->real^M`; - `IMAGE (p:real^M->real^N) - (interval[a,b])`; - `a:real^M`; `b:real^M`] - TIETZE_CLOSED_INTERVAL) THEN - ASM_SIMP_TAC[COMPACT_INTERVAL; COMPACT_CONTINUOUS_IMAGE; - COMPACT_IMP_CLOSED] THEN - ANTS_TAC THENL [ASM SET_TAC[]; ALL_TAC] THEN - DISCH_THEN(X_CHOOSE_THEN `r:real^N->real^M` STRIP_ASSUME_TAC) THEN - EXISTS_TAC `(p:real^M->real^N) o (r:real^N->real^M)` THEN - REWRITE_TAC[SUBSET; FORALL_IN_IMAGE; o_THM; IN_UNIV] THEN - CONJ_TAC THENL [ALL_TAC; ASM SET_TAC[]] THEN - MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN ASM_REWRITE_TAC[] THEN - FIRST_X_ASSUM(MATCH_MP_TAC o MATCH_MP(REWRITE_RULE[IMP_CONJ] - CONTINUOUS_ON_SUBSET)) THEN ASM SET_TAC[]);; - -let UNBOUNDED_PATH_COMPONENTS_COMPLEMENT_HOMEOMORPHIC_INTERVAL = prove - (`!s:real^N->bool a b:real^M. - s homeomorphic (interval[a,b]) - ==> !x. ~(x IN s) ==> ~bounded(path_component((:real^N) DIFF s) x)`, - REPEAT GEN_TAC THEN REWRITE_TAC[homeomorphic; homeomorphism] THEN - REWRITE_TAC[LEFT_IMP_EXISTS_THM] THEN - MAP_EVERY X_GEN_TAC [`p':real^N->real^M`; `p:real^M->real^N`] THEN - DISCH_TAC THEN - SUBGOAL_THEN - `!x y. x IN interval[a,b] /\ y IN interval[a,b] /\ - (p:real^M->real^N) x = p y ==> x = y` - ASSUME_TAC THENL [ASM_MESON_TAC[]; ALL_TAC] THEN - FIRST_X_ASSUM(MP_TAC o funpow 4 CONJUNCT2) THEN - DISCH_THEN(CONJUNCTS_THEN2 (SUBST1_TAC o SYM) ASSUME_TAC) THEN - ASM_CASES_TAC `interval[a:real^M,b] = {}` THEN - ASM_REWRITE_TAC[IMAGE_CLAUSES; DIFF_EMPTY; PATH_COMPONENT_UNIV; - NOT_BOUNDED_UNIV] THEN - ABBREV_TAC `s = (:real^N) DIFF (IMAGE p (interval[a:real^M,b]))` THEN - X_GEN_TAC `c:real^N` THEN REPEAT STRIP_TAC THEN - SUBGOAL_THEN `(c:real^N) IN s` ASSUME_TAC THENL [ASM SET_TAC[]; ALL_TAC] THEN - SUBGOAL_THEN `bounded((path_component s c) UNION - (IMAGE (p:real^M->real^N) (interval[a,b])))` - MP_TAC THENL - [ASM_SIMP_TAC[BOUNDED_UNION; COMPACT_IMP_BOUNDED; COMPACT_IMP_BOUNDED; - COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; - ALL_TAC] THEN - DISCH_THEN(MP_TAC o SPEC `c:real^N` o MATCH_MP BOUNDED_SUBSET_BALL) THEN - REWRITE_TAC[UNION_SUBSET] THEN - DISCH_THEN(X_CHOOSE_THEN `B:real` STRIP_ASSUME_TAC) THEN - MP_TAC(ISPECL [`p:real^M->real^N`; `a:real^M`; `b:real^M`] - RETRACTION_INJECTIVE_IMAGE_INTERVAL) THEN - ASM_REWRITE_TAC[SUBSET; IN_UNIV] THEN - DISCH_THEN(X_CHOOSE_THEN `r:real^N->real^N` MP_TAC) THEN - DISCH_THEN(CONJUNCTS_THEN2 ASSUME_TAC - (CONJUNCTS_THEN2 MP_TAC ASSUME_TAC)) THEN - REWRITE_TAC[FORALL_IN_IMAGE; IN_UNIV] THEN DISCH_TAC THEN - ABBREV_TAC `q = \z:real^N. if z IN path_component s c then r(z) else z` THEN - SUBGOAL_THEN - `(q:real^N->real^N) continuous_on - (closure(path_component s c) UNION ((:real^N) DIFF (path_component s c)))` - MP_TAC THENL - [EXPAND_TAC "q" THEN MATCH_MP_TAC CONTINUOUS_ON_CASES THEN - REWRITE_TAC[CLOSED_CLOSURE; CONTINUOUS_ON_ID; GSYM OPEN_CLOSED] THEN - REPEAT CONJ_TAC THENL - [MATCH_MP_TAC OPEN_PATH_COMPONENT THEN EXPAND_TAC "s" THEN - ASM_SIMP_TAC[GSYM CLOSED_OPEN; COMPACT_IMP_CLOSED; - COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; - ASM_MESON_TAC[CONTINUOUS_ON_SUBSET; SUBSET_UNIV]; - ALL_TAC] THEN - X_GEN_TAC `z:real^N` THEN - REWRITE_TAC[SET_RULE `~(z IN (s DIFF t) /\ z IN t)`] THEN - STRIP_TAC THEN FIRST_X_ASSUM MATCH_MP_TAC THEN - MP_TAC(ISPECL - [`path_component s (z:real^N)`; `path_component s (c:real^N)`] - OPEN_INTER_CLOSURE_EQ_EMPTY) THEN - ASM_REWRITE_TAC[GSYM DISJOINT; PATH_COMPONENT_DISJOINT] THEN ANTS_TAC THENL - [MATCH_MP_TAC OPEN_PATH_COMPONENT THEN EXPAND_TAC "s" THEN - ASM_SIMP_TAC[GSYM CLOSED_OPEN; COMPACT_IMP_CLOSED; - COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; - REWRITE_TAC[DISJOINT; EXTENSION; IN_INTER; NOT_IN_EMPTY] THEN - DISCH_THEN(MP_TAC o SPEC `z:real^N`) THEN ASM_REWRITE_TAC[] THEN - GEN_REWRITE_TAC (LAND_CONV o ONCE_DEPTH_CONV) [IN] THEN - REWRITE_TAC[PATH_COMPONENT_REFL_EQ] THEN ASM SET_TAC[]]; - ALL_TAC] THEN - SUBGOAL_THEN - `closure(path_component s c) UNION ((:real^N) DIFF (path_component s c)) = - (:real^N)` - SUBST1_TAC THENL - [MATCH_MP_TAC(SET_RULE `s SUBSET t ==> t UNION (UNIV DIFF s) = UNIV`) THEN - REWRITE_TAC[CLOSURE_SUBSET]; - DISCH_TAC] THEN - MP_TAC(ISPECL - [`(\x. &2 % c - x) o - (\x. c + B / norm(x - c) % (x - c)) o (q:real^N->real^N)`; - `cball(c:real^N,B)`] - BROUWER) THEN - REWRITE_TAC[NOT_IMP; GSYM CONJ_ASSOC; COMPACT_CBALL; CONVEX_CBALL] THEN - ASM_SIMP_TAC[CBALL_EQ_EMPTY; REAL_LT_IMP_LE; REAL_NOT_LT] THEN - SUBGOAL_THEN `!x. ~((q:real^N->real^N) x = c)` ASSUME_TAC THENL - [X_GEN_TAC `x:real^N` THEN EXPAND_TAC "q" THEN - REWRITE_TAC[NORM_EQ_0; VECTOR_SUB_EQ] THEN COND_CASES_TAC THEN - ASM SET_TAC[PATH_COMPONENT_REFL_EQ]; - ALL_TAC] THEN - REPEAT CONJ_TAC THENL - [MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN - SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST] THEN - MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN CONJ_TAC THENL - [ASM_MESON_TAC[CONTINUOUS_ON_SUBSET; SUBSET_UNIV]; ALL_TAC] THEN - MATCH_MP_TAC CONTINUOUS_ON_ADD THEN REWRITE_TAC[CONTINUOUS_ON_CONST] THEN - MATCH_MP_TAC CONTINUOUS_ON_MUL THEN - SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST] THEN - REWRITE_TAC[o_DEF; real_div; LIFT_CMUL] THEN - MATCH_MP_TAC CONTINUOUS_ON_CMUL THEN - MATCH_MP_TAC(REWRITE_RULE[o_DEF] CONTINUOUS_ON_INV) THEN - ASM_REWRITE_TAC[FORALL_IN_IMAGE; NORM_EQ_0; VECTOR_SUB_EQ] THEN - SUBGOAL_THEN - `(\x:real^N. lift(norm(x - c))) = (lift o norm) o (\x. x - c)` - SUBST1_TAC THENL [REWRITE_TAC[o_DEF]; ALL_TAC] THEN - MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN - ASM_SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST; - CONTINUOUS_ON_LIFT_NORM]; - REWRITE_TAC[SUBSET; FORALL_IN_IMAGE; IN_CBALL; o_THM; dist] THEN - X_GEN_TAC `x:real^N` THEN DISCH_TAC THEN - REWRITE_TAC[VECTOR_ARITH `c - (&2 % c - (c + x)) = x`] THEN - REWRITE_TAC[NORM_MUL; REAL_ABS_DIV; REAL_ABS_NORM] THEN - ASM_SIMP_TAC[REAL_DIV_RMUL; NORM_EQ_0; VECTOR_SUB_EQ] THEN - ASM_REAL_ARITH_TAC; - REWRITE_TAC[NOT_EXISTS_THM; TAUT `~(c /\ b) <=> c ==> ~b`] THEN - REWRITE_TAC[IN_CBALL; o_THM; dist] THEN - X_GEN_TAC `x:real^N` THEN DISCH_TAC THEN - REWRITE_TAC[VECTOR_ARITH `&2 % c - (c + x') = x <=> --x' = x - c`] THEN - ASM_CASES_TAC `(x:real^N) IN path_component s c` THENL - [MATCH_MP_TAC(NORM_ARITH `norm(y) < B /\ norm(x) = B ==> ~(--x = y)`) THEN - REWRITE_TAC[NORM_MUL; REAL_ABS_DIV; REAL_ABS_NORM] THEN - ASM_SIMP_TAC[REAL_DIV_RMUL; NORM_EQ_0; VECTOR_SUB_EQ] THEN - ASM_SIMP_TAC[REAL_ARITH `&0 < B ==> abs B = B`] THEN - UNDISCH_TAC `path_component s c SUBSET ball(c:real^N,B)` THEN - REWRITE_TAC[SUBSET; IN_BALL] THEN ASM_MESON_TAC[dist; NORM_SUB]; - EXPAND_TAC "q" THEN REWRITE_TAC[] THEN ASM_REWRITE_TAC[] THEN - REWRITE_TAC[VECTOR_ARITH `--(c % x) = x <=> (&1 + c) % x = vec 0`] THEN - ASM_REWRITE_TAC[DE_MORGAN_THM; VECTOR_SUB_EQ; VECTOR_MUL_EQ_0] THEN - SUBGOAL_THEN `~(x:real^N = c)` ASSUME_TAC THENL - [ASM_MESON_TAC[PATH_COMPONENT_REFL; IN]; ALL_TAC] THEN - ASM_REWRITE_TAC[] THEN - MATCH_MP_TAC(REAL_ARITH `&0 < x ==> ~(&1 + x = &0)`) THEN - ASM_SIMP_TAC[REAL_LT_DIV; NORM_POS_LT; VECTOR_SUB_EQ]]]);; - -let PATH_CONNECTED_COMPLEMENT_HOMEOMORPHIC_INTERVAL = prove - (`!s:real^N->bool a b:real^M. - 2 <= dimindex(:N) /\ s homeomorphic interval[a,b] - ==> path_connected((:real^N) DIFF s)`, - REPEAT STRIP_TAC THEN REWRITE_TAC[PATH_CONNECTED_IFF_PATH_COMPONENT] THEN - FIRST_ASSUM(MP_TAC o MATCH_MP - UNBOUNDED_PATH_COMPONENTS_COMPLEMENT_HOMEOMORPHIC_INTERVAL) THEN - ASM_REWRITE_TAC[SET_RULE `~(x IN s) <=> x IN (UNIV DIFF s)`] THEN - ABBREV_TAC `t = (:real^N) DIFF s` THEN - DISCH_TAC THEN MAP_EVERY X_GEN_TAC [`x:real^N`; `y:real^N`] THEN - STRIP_TAC THEN FIRST_ASSUM(MP_TAC o MATCH_MP HOMEOMORPHIC_COMPACTNESS) THEN - REWRITE_TAC[COMPACT_INTERVAL] THEN - DISCH_THEN(MP_TAC o MATCH_MP COMPACT_IMP_BOUNDED) THEN - REWRITE_TAC[BOUNDED_POS; LEFT_IMP_EXISTS_THM] THEN - X_GEN_TAC `B:real` THEN STRIP_TAC THEN - SUBGOAL_THEN `(?u:real^N. u IN path_component t x /\ B < norm(u)) /\ - (?v:real^N. v IN path_component t y /\ B < norm(v))` - STRIP_ASSUME_TAC THENL - [ASM_MESON_TAC[BOUNDED_POS; REAL_NOT_LE]; ALL_TAC] THEN - MATCH_MP_TAC PATH_COMPONENT_TRANS THEN EXISTS_TAC `u:real^N` THEN - CONJ_TAC THENL [ASM_MESON_TAC[IN]; ALL_TAC] THEN - MATCH_MP_TAC PATH_COMPONENT_SYM THEN - MATCH_MP_TAC PATH_COMPONENT_TRANS THEN EXISTS_TAC `v:real^N` THEN - CONJ_TAC THENL [ASM_MESON_TAC[IN]; ALL_TAC] THEN - MATCH_MP_TAC PATH_COMPONENT_OF_SUBSET THEN - EXISTS_TAC `(:real^N) DIFF cball(vec 0,B)` THEN CONJ_TAC THENL - [EXPAND_TAC "t" THEN MATCH_MP_TAC(SET_RULE - `s SUBSET t ==> (u DIFF t) SUBSET (u DIFF s)`) THEN - ASM_REWRITE_TAC[SUBSET; IN_CBALL_0]; - MP_TAC(ISPEC `cball(vec 0:real^N,B)` - PATH_CONNECTED_COMPLEMENT_BOUNDED_CONVEX) THEN - ASM_REWRITE_TAC[BOUNDED_CBALL; CONVEX_CBALL] THEN - REWRITE_TAC[PATH_CONNECTED_IFF_PATH_COMPONENT] THEN - DISCH_THEN MATCH_MP_TAC THEN - ASM_REWRITE_TAC[IN_DIFF; IN_UNIV; IN_CBALL_0; REAL_NOT_LE]]);; - -(* ------------------------------------------------------------------------- *) -(* In particular, apply all these to the special case of an arc. *) -(* ------------------------------------------------------------------------- *) - -let RETRACTION_ARC = prove - (`!p. arc p - ==> ?f. f continuous_on (:real^N) /\ - IMAGE f (:real^N) SUBSET path_image p /\ - (!x. x IN path_image p ==> f x = x)`, - REWRITE_TAC[arc; path; path_image] THEN REPEAT STRIP_TAC THEN - MATCH_MP_TAC RETRACTION_INJECTIVE_IMAGE_INTERVAL THEN - ASM_REWRITE_TAC[INTERVAL_EQ_EMPTY_1; DROP_VEC; REAL_NOT_LT; REAL_POS]);; - -let PATH_CONNECTED_ARC_COMPLEMENT = prove - (`!p. 2 <= dimindex(:N) /\ arc p - ==> path_connected((:real^N) DIFF path_image p)`, - REWRITE_TAC[arc; path] THEN REPEAT STRIP_TAC THEN SIMP_TAC[path_image] THEN - MP_TAC(ISPECL [`path_image p:real^N->bool`; `vec 0:real^1`; `vec 1:real^1`] - PATH_CONNECTED_COMPLEMENT_HOMEOMORPHIC_INTERVAL) THEN - ASM_REWRITE_TAC[path_image] THEN DISCH_THEN MATCH_MP_TAC THEN - ONCE_REWRITE_TAC[HOMEOMORPHIC_SYM] THEN - MATCH_MP_TAC HOMEOMORPHIC_COMPACT THEN - EXISTS_TAC `p:real^1->real^N` THEN ASM_REWRITE_TAC[COMPACT_INTERVAL]);; - -let CONNECTED_ARC_COMPLEMENT = prove - (`!p. 2 <= dimindex(:N) /\ arc p - ==> connected((:real^N) DIFF path_image p)`, - SIMP_TAC[PATH_CONNECTED_ARC_COMPLEMENT; PATH_CONNECTED_IMP_CONNECTED]);; *) - end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Convex_Euclidean_Space.thy --- a/src/HOL/Multivariate_Analysis/Convex_Euclidean_Space.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Convex_Euclidean_Space.thy Tue May 04 20:30:22 2010 +0200 @@ -5,7 +5,7 @@ header {* Convex sets, functions and related things. *} theory Convex_Euclidean_Space -imports Topology_Euclidean_Space +imports Topology_Euclidean_Space Convex begin @@ -15,17 +15,11 @@ declare vector_add_ldistrib[simp] vector_ssub_ldistrib[simp] vector_smult_assoc[simp] vector_smult_rneg[simp] declare vector_sadd_rdistrib[simp] vector_sub_rdistrib[simp] -declare UNIV_1[simp] (*lemma dim1in[intro]:"Suc 0 \ {1::nat .. CARD(1)}" by auto*) lemmas vector_component_simps = vector_minus_component vector_smult_component vector_add_component vector_le_def Cart_lambda_beta basis_component vector_uminus_component -lemma dest_vec1_simps[simp]: fixes a::"real^1" - shows "a$1 = 0 \ a = 0" (*"a \ 1 \ dest_vec1 a \ 1" "0 \ a \ 0 \ dest_vec1 a"*) - "a \ b \ dest_vec1 a \ dest_vec1 b" "dest_vec1 (1::real^1) = 1" - by(auto simp add:vector_component_simps forall_1 Cart_eq) - lemma norm_not_0:"(x::real^'n)\0 \ norm x \ 0" by auto lemma setsum_delta_notmem: assumes "x\s" @@ -47,32 +41,10 @@ lemma if_smult:"(if P then x else (y::real)) *\<^sub>R v = (if P then x *\<^sub>R v else y *\<^sub>R v)" by auto -lemma mem_interval_1: fixes x :: "real^1" shows - "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b)" - "(x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" -by(simp_all add: Cart_eq vector_less_def vector_le_def forall_1) - lemma image_smult_interval:"(\x. m *\<^sub>R (x::real^'n)) ` {a..b} = (if {a..b} = {} then {} else if 0 \ m then {m *\<^sub>R a..m *\<^sub>R b} else {m *\<^sub>R b..m *\<^sub>R a})" using image_affinity_interval[of m 0 a b] by auto -lemma dest_vec1_inverval: - "dest_vec1 ` {a .. b} = {dest_vec1 a .. dest_vec1 b}" - "dest_vec1 ` {a<.. b} = {dest_vec1 a<.. dest_vec1 b}" - "dest_vec1 ` {a ..x. dest_vec1 (f x)) S" - using dest_vec1_sum[OF assms] by auto - lemma dist_triangle_eq: fixes x y z :: "real ^ _" shows "dist x z = dist x y + dist y z \ norm (x - y) *\<^sub>R (y - z) = norm (y - z) *\<^sub>R (x - y)" @@ -90,7 +62,7 @@ using one_le_card_finite by auto lemma real_dimindex_ge_1:"real (CARD('n::finite)) \ 1" - by(metis dimindex_ge_1 linorder_not_less real_eq_of_nat real_le_trans real_of_nat_1 real_of_nat_le_iff) + by(metis dimindex_ge_1 real_eq_of_nat real_of_nat_1 real_of_nat_le_iff) lemma real_dimindex_gt_0:"real (CARD('n::finite)) > 0" apply(rule less_le_trans[OF _ real_dimindex_ge_1]) by auto @@ -343,150 +315,6 @@ shows "affine hull s = {a + v | v. v \ span {x - a | x. x \ s - {a}}}" using affine_hull_insert_span[of a "s - {a}", unfolded insert_Diff[OF assms]] by auto -subsection {* Convexity. *} - -definition - convex :: "'a::real_vector set \ bool" where - "convex s \ (\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s)" - -lemma convex_alt: "convex s \ (\x\s. \y\s. \u. 0 \ u \ u \ 1 \ ((1 - u) *\<^sub>R x + u *\<^sub>R y) \ s)" -proof- have *:"\u v::real. u + v = 1 \ u = 1 - v" by auto - show ?thesis unfolding convex_def apply auto - apply(erule_tac x=x in ballE) apply(erule_tac x=y in ballE) apply(erule_tac x="1 - u" in allE) - by (auto simp add: *) qed - -lemma mem_convex: - assumes "convex s" "a \ s" "b \ s" "0 \ u" "u \ 1" - shows "((1 - u) *\<^sub>R a + u *\<^sub>R b) \ s" - using assms unfolding convex_alt by auto - -lemma convex_vec1:"convex (vec1 ` s) = convex (s::real set)" - unfolding convex_def Ball_def forall_vec1 unfolding vec1_dest_vec1_simps image_iff by auto - -lemma convex_empty[intro]: "convex {}" - unfolding convex_def by simp - -lemma convex_singleton[intro]: "convex {a}" - unfolding convex_def by (auto simp add:scaleR_left_distrib[THEN sym]) - -lemma convex_UNIV[intro]: "convex UNIV" - unfolding convex_def by auto - -lemma convex_Inter: "(\s\f. convex s) ==> convex(\ f)" - unfolding convex_def by auto - -lemma convex_Int: "convex s \ convex t \ convex (s \ t)" - unfolding convex_def by auto - -lemma convex_halfspace_le: "convex {x. inner a x \ b}" - unfolding convex_def apply auto - unfolding inner_add inner_scaleR - by (metis real_convex_bound_le) - -lemma convex_halfspace_ge: "convex {x. inner a x \ b}" -proof- have *:"{x. inner a x \ b} = {x. inner (-a) x \ -b}" by auto - show ?thesis apply(unfold *) using convex_halfspace_le[of "-a" "-b"] by auto qed - -lemma convex_hyperplane: "convex {x. inner a x = b}" -proof- - have *:"{x. inner a x = b} = {x. inner a x \ b} \ {x. inner a x \ b}" by auto - show ?thesis unfolding * apply(rule convex_Int) - using convex_halfspace_le convex_halfspace_ge by auto -qed - -lemma convex_halfspace_lt: "convex {x. inner a x < b}" - unfolding convex_def - by(auto simp add: real_convex_bound_lt inner_add) - -lemma convex_halfspace_gt: "convex {x. inner a x > b}" - using convex_halfspace_lt[of "-a" "-b"] by auto - -lemma convex_positive_orthant: "convex {x::real^'n. (\i. 0 \ x$i)}" - unfolding convex_def apply auto apply(erule_tac x=i in allE)+ - apply(rule add_nonneg_nonneg) by(auto simp add: mult_nonneg_nonneg) - -subsection {* Explicit expressions for convexity in terms of arbitrary sums. *} - -lemma convex: "convex s \ - (\(k::nat) u x. (\i. 1\i \ i\k \ 0 \ u i \ x i \s) \ (setsum u {1..k} = 1) - \ setsum (\i. u i *\<^sub>R x i) {1..k} \ s)" - unfolding convex_def apply rule apply(rule allI)+ defer apply(rule ballI)+ apply(rule allI)+ proof(rule,rule,rule,rule) - fix x y u v assume as:"\(k::nat) u x. (\i. 1 \ i \ i \ k \ 0 \ u i \ x i \ s) \ setsum u {1..k} = 1 \ (\i = 1..k. u i *\<^sub>R x i) \ s" - "x \ s" "y \ s" "0 \ u" "0 \ v" "u + v = (1::real)" - show "u *\<^sub>R x + v *\<^sub>R y \ s" using as(1)[THEN spec[where x=2], THEN spec[where x="\n. if n=1 then u else v"], THEN spec[where x="\n. if n=1 then x else y"]] and as(2-) - by (auto simp add: setsum_head_Suc) -next - fix k u x assume as:"\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s" - show "(\i::nat. 1 \ i \ i \ k \ 0 \ u i \ x i \ s) \ setsum u {1..k} = 1 \ (\i = 1..k. u i *\<^sub>R x i) \ s" apply(rule,erule conjE) proof(induct k arbitrary: u) - case (Suc k) show ?case proof(cases "u (Suc k) = 1") - case True hence "(\i = Suc 0..k. u i *\<^sub>R x i) = 0" apply(rule_tac setsum_0') apply(rule ccontr) unfolding ball_simps apply(erule bexE) proof- - fix i assume i:"i \ {Suc 0..k}" "u i *\<^sub>R x i \ 0" - hence ui:"u i \ 0" by auto - hence "setsum (\k. if k=i then u i else 0) {1 .. k} \ setsum u {1 .. k}" apply(rule_tac setsum_mono) using Suc(2) by auto - hence "setsum u {1 .. k} \ u i" using i(1) by(auto simp add: setsum_delta) - hence "setsum u {1 .. k} > 0" using ui apply(rule_tac less_le_trans[of _ "u i"]) using Suc(2)[THEN spec[where x=i]] and i(1) by auto - thus False using Suc(3) unfolding setsum_cl_ivl_Suc and True by simp qed - thus ?thesis unfolding setsum_cl_ivl_Suc using True and Suc(2) by auto - next - have *:"setsum u {1..k} = 1 - u (Suc k)" using Suc(3)[unfolded setsum_cl_ivl_Suc] by auto - have **:"u (Suc k) \ 1" unfolding not_le using Suc(3) using setsum_nonneg[of "{1..k}" u] using Suc(2) by auto - have ***:"\i k. (u i / (1 - u (Suc k))) *\<^sub>R x i = (inverse (1 - u (Suc k))) *\<^sub>R (u i *\<^sub>R x i)" unfolding real_divide_def by (auto simp add: algebra_simps) - case False hence nn:"1 - u (Suc k) \ 0" by auto - have "(\i = 1..k. (u i / (1 - u (Suc k))) *\<^sub>R x i) \ s" apply(rule Suc(1)) unfolding setsum_divide_distrib[THEN sym] and * - apply(rule_tac allI) apply(rule,rule) apply(rule divide_nonneg_pos) using nn Suc(2) ** by auto - hence "(1 - u (Suc k)) *\<^sub>R (\i = 1..k. (u i / (1 - u (Suc k))) *\<^sub>R x i) + u (Suc k) *\<^sub>R x (Suc k) \ s" - apply(rule as[THEN bspec, THEN bspec, THEN spec, THEN mp, THEN spec, THEN mp, THEN mp]) using Suc(2)[THEN spec[where x="Suc k"]] and ** by auto - thus ?thesis unfolding setsum_cl_ivl_Suc and *** and scaleR_right.setsum [symmetric] using nn by auto qed qed auto qed - - -lemma convex_explicit: - fixes s :: "'a::real_vector set" - shows "convex s \ - (\t u. finite t \ t \ s \ (\x\t. 0 \ u x) \ setsum u t = 1 \ setsum (\x. u x *\<^sub>R x) t \ s)" - unfolding convex_def apply(rule,rule,rule) apply(subst imp_conjL,rule) defer apply(rule,rule,rule,rule,rule,rule,rule) proof- - fix x y u v assume as:"\t u. finite t \ t \ s \ (\x\t. 0 \ u x) \ setsum u t = 1 \ (\x\t. u x *\<^sub>R x) \ s" "x \ s" "y \ s" "0 \ u" "0 \ v" "u + v = (1::real)" - show "u *\<^sub>R x + v *\<^sub>R y \ s" proof(cases "x=y") - case True show ?thesis unfolding True and scaleR_left_distrib[THEN sym] using as(3,6) by auto next - case False thus ?thesis using as(1)[THEN spec[where x="{x,y}"], THEN spec[where x="\z. if z=x then u else v"]] and as(2-) by auto qed -next - fix t u assume asm:"\x\s. \y\s. \u\0. \v\0. u + v = 1 \ u *\<^sub>R x + v *\<^sub>R y \ s" "finite (t::'a set)" - (*"finite t" "t \ s" "\x\t. (0::real) \ u x" "setsum u t = 1"*) - from this(2) have "\u. t \ s \ (\x\t. 0 \ u x) \ setsum u t = 1 \ (\x\t. u x *\<^sub>R x) \ s" apply(induct_tac t rule:finite_induct) - prefer 3 apply (rule,rule) apply(erule conjE)+ proof- - fix x f u assume ind:"\u. f \ s \ (\x\f. 0 \ u x) \ setsum u f = 1 \ (\x\f. u x *\<^sub>R x) \ s" - assume as:"finite f" "x \ f" "insert x f \ s" "\x\insert x f. 0 \ u x" "setsum u (insert x f) = (1::real)" - show "(\x\insert x f. u x *\<^sub>R x) \ s" proof(cases "u x = 1") - case True hence "setsum (\x. u x *\<^sub>R x) f = 0" apply(rule_tac setsum_0') apply(rule ccontr) unfolding ball_simps apply(erule bexE) proof- - fix y assume y:"y \ f" "u y *\<^sub>R y \ 0" - hence uy:"u y \ 0" by auto - hence "setsum (\k. if k=y then u y else 0) f \ setsum u f" apply(rule_tac setsum_mono) using as(4) by auto - hence "setsum u f \ u y" using y(1) and as(1) by(auto simp add: setsum_delta) - hence "setsum u f > 0" using uy apply(rule_tac less_le_trans[of _ "u y"]) using as(4) and y(1) by auto - thus False using as(2,5) unfolding setsum_clauses(2)[OF as(1)] and True by auto qed - thus ?thesis unfolding setsum_clauses(2)[OF as(1)] using as(2,3) unfolding True by auto - next - have *:"setsum u f = setsum u (insert x f) - u x" using as(2) unfolding setsum_clauses(2)[OF as(1)] by auto - have **:"u x \ 1" unfolding not_le using as(5)[unfolded setsum_clauses(2)[OF as(1)]] and as(2) - using setsum_nonneg[of f u] and as(4) by auto - case False hence "inverse (1 - u x) *\<^sub>R (\x\f. u x *\<^sub>R x) \ s" unfolding scaleR_right.setsum and scaleR_scaleR - apply(rule_tac ind[THEN spec, THEN mp]) apply rule defer apply rule apply rule apply(rule mult_nonneg_nonneg) - unfolding setsum_right_distrib[THEN sym] and * using as and ** by auto - hence "u x *\<^sub>R x + (1 - u x) *\<^sub>R ((inverse (1 - u x)) *\<^sub>R setsum (\x. u x *\<^sub>R x) f) \s" - apply(rule_tac asm(1)[THEN bspec, THEN bspec, THEN spec, THEN mp, THEN spec, THEN mp, THEN mp]) using as and ** False by auto - thus ?thesis unfolding setsum_clauses(2)[OF as(1)] using as(2) and False by auto qed - qed auto thus "t \ s \ (\x\t. 0 \ u x) \ setsum u t = 1 \ (\x\t. u x *\<^sub>R x) \ s" by auto -qed - -lemma convex_finite: assumes "finite s" - shows "convex s \ (\u. (\x\s. 0 \ u x) \ setsum u s = 1 - \ setsum (\x. u x *\<^sub>R x) s \ s)" - unfolding convex_explicit apply(rule, rule, rule) defer apply(rule,rule,rule)apply(erule conjE)+ proof- - fix t u assume as:"\u. (\x\s. 0 \ u x) \ setsum u s = 1 \ (\x\s. u x *\<^sub>R x) \ s" " finite t" "t \ s" "\x\t. 0 \ u x" "setsum u t = (1::real)" - have *:"s \ t = t" using as(3) by auto - show "(\x\t. u x *\<^sub>R x) \ s" using as(1)[THEN spec[where x="\x. if x\t then u x else 0"]] - unfolding if_smult and setsum_cases[OF assms] using as(2-) * by auto -qed (erule_tac x=s in allE, erule_tac x=u in allE, auto) - subsection {* Cones. *} definition @@ -597,49 +425,15 @@ lemma connected_UNIV[intro]: "connected (UNIV :: 'a::real_normed_vector set)" by(simp add: convex_connected convex_UNIV) -subsection {* Convex functions into the reals. *} - -definition - convex_on :: "'a::real_vector set \ ('a \ real) \ bool" where - "convex_on s f \ - (\x\s. \y\s. \u\0. \v\0. u + v = 1 \ f (u *\<^sub>R x + v *\<^sub>R y) \ u * f x + v * f y)" - -lemma convex_on_subset: "convex_on t f \ s \ t \ convex_on s f" - unfolding convex_on_def by auto +subsection {* Balls, being convex, are connected. *} -lemma convex_add[intro]: - assumes "convex_on s f" "convex_on s g" - shows "convex_on s (\x. f x + g x)" -proof- - { fix x y assume "x\s" "y\s" moreover - fix u v ::real assume "0 \ u" "0 \ v" "u + v = 1" - ultimately have "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ (u * f x + v * f y) + (u * g x + v * g y)" - using assms(1)[unfolded convex_on_def, THEN bspec[where x=x], THEN bspec[where x=y], THEN spec[where x=u]] - using assms(2)[unfolded convex_on_def, THEN bspec[where x=x], THEN bspec[where x=y], THEN spec[where x=u]] - apply - apply(rule add_mono) by auto - hence "f (u *\<^sub>R x + v *\<^sub>R y) + g (u *\<^sub>R x + v *\<^sub>R y) \ u * (f x + g x) + v * (f y + g y)" by (simp add: ring_simps) } - thus ?thesis unfolding convex_on_def by auto -qed +lemma convex_box: + assumes "\i. convex {x. P i x}" + shows "convex {x. \i. P i (x$i)}" +using assms unfolding convex_def by auto -lemma convex_cmul[intro]: - assumes "0 \ (c::real)" "convex_on s f" - shows "convex_on s (\x. c * f x)" -proof- - have *:"\u c fx v fy ::real. u * (c * fx) + v * (c * fy) = c * (u * fx + v * fy)" by (simp add: ring_simps) - show ?thesis using assms(2) and mult_mono1[OF _ assms(1)] unfolding convex_on_def and * by auto -qed - -lemma convex_lower: - assumes "convex_on s f" "x\s" "y \ s" "0 \ u" "0 \ v" "u + v = 1" - shows "f (u *\<^sub>R x + v *\<^sub>R y) \ max (f x) (f y)" -proof- - let ?m = "max (f x) (f y)" - have "u * f x + v * f y \ u * max (f x) (f y) + v * max (f x) (f y)" apply(rule add_mono) - using assms(4,5) by(auto simp add: mult_mono1) - also have "\ = max (f x) (f y)" using assms(6) unfolding distrib[THEN sym] by auto - finally show ?thesis using assms(1)[unfolded convex_on_def, THEN bspec[where x=x], THEN bspec[where x=y], THEN spec[where x=u]] - using assms(2-6) by auto -qed +lemma convex_positive_orthant: "convex {x::real^'n. (\i. 0 \ x$i)}" + by (rule convex_box) (simp add: atLeast_def[symmetric] convex_real_interval) lemma convex_local_global_minimum: fixes s :: "'a::real_normed_vector set" @@ -663,76 +457,6 @@ ultimately show False using mult_strict_left_mono[OF y `u>0`] unfolding left_diff_distrib by auto qed -lemma convex_distance[intro]: - fixes s :: "'a::real_normed_vector set" - shows "convex_on s (\x. dist a x)" -proof(auto simp add: convex_on_def dist_norm) - fix x y assume "x\s" "y\s" - fix u v ::real assume "0 \ u" "0 \ v" "u + v = 1" - have "a = u *\<^sub>R a + v *\<^sub>R a" unfolding scaleR_left_distrib[THEN sym] and `u+v=1` by simp - hence *:"a - (u *\<^sub>R x + v *\<^sub>R y) = (u *\<^sub>R (a - x)) + (v *\<^sub>R (a - y))" - by (auto simp add: algebra_simps) - show "norm (a - (u *\<^sub>R x + v *\<^sub>R y)) \ u * norm (a - x) + v * norm (a - y)" - unfolding * using norm_triangle_ineq[of "u *\<^sub>R (a - x)" "v *\<^sub>R (a - y)"] - using `0 \ u` `0 \ v` by auto -qed - -subsection {* Arithmetic operations on sets preserve convexity. *} - -lemma convex_scaling: "convex s \ convex ((\x. c *\<^sub>R x) ` s)" - unfolding convex_def and image_iff apply auto - apply (rule_tac x="u *\<^sub>R x+v *\<^sub>R y" in bexI) by (auto simp add: algebra_simps) - -lemma convex_negations: "convex s \ convex ((\x. -x)` s)" - unfolding convex_def and image_iff apply auto - apply (rule_tac x="u *\<^sub>R x+v *\<^sub>R y" in bexI) by auto - -lemma convex_sums: - assumes "convex s" "convex t" - shows "convex {x + y| x y. x \ s \ y \ t}" -proof(auto simp add: convex_def image_iff scaleR_right_distrib) - fix xa xb ya yb assume xy:"xa\s" "xb\s" "ya\t" "yb\t" - fix u v ::real assume uv:"0 \ u" "0 \ v" "u + v = 1" - show "\x y. u *\<^sub>R xa + u *\<^sub>R ya + (v *\<^sub>R xb + v *\<^sub>R yb) = x + y \ x \ s \ y \ t" - apply(rule_tac x="u *\<^sub>R xa + v *\<^sub>R xb" in exI) apply(rule_tac x="u *\<^sub>R ya + v *\<^sub>R yb" in exI) - using assms(1)[unfolded convex_def, THEN bspec[where x=xa], THEN bspec[where x=xb]] - using assms(2)[unfolded convex_def, THEN bspec[where x=ya], THEN bspec[where x=yb]] - using uv xy by auto -qed - -lemma convex_differences: - assumes "convex s" "convex t" - shows "convex {x - y| x y. x \ s \ y \ t}" -proof- - have "{x - y| x y. x \ s \ y \ t} = {x + y |x y. x \ s \ y \ uminus ` t}" unfolding image_iff apply auto - apply(rule_tac x=xa in exI) apply(rule_tac x="-y" in exI) apply simp - apply(rule_tac x=xa in exI) apply(rule_tac x=xb in exI) by simp - thus ?thesis using convex_sums[OF assms(1) convex_negations[OF assms(2)]] by auto -qed - -lemma convex_translation: assumes "convex s" shows "convex ((\x. a + x) ` s)" -proof- have "{a + y |y. y \ s} = (\x. a + x) ` s" by auto - thus ?thesis using convex_sums[OF convex_singleton[of a] assms] by auto qed - -lemma convex_affinity: assumes "convex s" shows "convex ((\x. a + c *\<^sub>R x) ` s)" -proof- have "(\x. a + c *\<^sub>R x) ` s = op + a ` op *\<^sub>R c ` s" by auto - thus ?thesis using convex_translation[OF convex_scaling[OF assms], of a c] by auto qed - -lemma convex_linear_image: - assumes c:"convex s" and l:"bounded_linear f" - shows "convex(f ` s)" -proof(auto simp add: convex_def) - interpret f: bounded_linear f by fact - fix x y assume xy:"x \ s" "y \ s" - fix u v ::real assume uv:"0 \ u" "0 \ v" "u + v = 1" - show "u *\<^sub>R f x + v *\<^sub>R f y \ f ` s" unfolding image_iff - apply(rule_tac x="u *\<^sub>R x + v *\<^sub>R y" in bexI) - unfolding f.add f.scaleR - using c[unfolded convex_def] xy uv by auto -qed - -subsection {* Balls, being convex, are connected. *} - lemma convex_ball: fixes x :: "'a::real_normed_vector" shows "convex (ball x e)" @@ -741,18 +465,18 @@ fix u v ::real assume uv:"0 \ u" "0 \ v" "u + v = 1" have "dist x (u *\<^sub>R y + v *\<^sub>R z) \ u * dist x y + v * dist x z" using uv yz using convex_distance[of "ball x e" x, unfolded convex_on_def, THEN bspec[where x=y], THEN bspec[where x=z]] by auto - thus "dist x (u *\<^sub>R y + v *\<^sub>R z) < e" using real_convex_bound_lt[OF yz uv] by auto + thus "dist x (u *\<^sub>R y + v *\<^sub>R z) < e" using convex_bound_lt[OF yz uv] by auto qed lemma convex_cball: fixes x :: "'a::real_normed_vector" shows "convex(cball x e)" -proof(auto simp add: convex_def Ball_def mem_cball) +proof(auto simp add: convex_def Ball_def) fix y z assume yz:"dist x y \ e" "dist x z \ e" fix u v ::real assume uv:" 0 \ u" "0 \ v" "u + v = 1" have "dist x (u *\<^sub>R y + v *\<^sub>R z) \ u * dist x y + v * dist x z" using uv yz using convex_distance[of "cball x e" x, unfolded convex_on_def, THEN bspec[where x=y], THEN bspec[where x=z]] by auto - thus "dist x (u *\<^sub>R y + v *\<^sub>R z) \ e" using real_convex_bound_le[OF yz uv] by auto + thus "dist x (u *\<^sub>R y + v *\<^sub>R z) \ e" using convex_bound_le[OF yz uv] by auto qed lemma connected_ball: @@ -1031,7 +755,7 @@ proof- have fin:"finite {a,b,c}" "finite {b,c}" "finite {c}" by auto have *:"\x y z ::real. x + y + z = 1 \ x = 1 - y - z" - "\x y z ::real^_. x + y + z = 1 \ x = 1 - y - z" by (auto simp add: ring_simps) + "\x y z ::real^_. x + y + z = 1 \ x = 1 - y - z" by (auto simp add: field_simps) show ?thesis unfolding convex_hull_finite[OF fin(1)] and Collect_def and convex_hull_finite_step[OF fin(2)] and * unfolding convex_hull_finite_step[OF fin(3)] apply(rule ext) apply simp apply auto apply(rule_tac x=va in exI) apply (rule_tac x="u c" in exI) apply simp @@ -1115,7 +839,7 @@ hence "x + y \ s" using `?lhs`[unfolded convex_def, THEN conjunct1] apply(erule_tac x="2*\<^sub>R x" in ballE) apply(erule_tac x="2*\<^sub>R y" in ballE) apply(erule_tac x="1/2" in allE) apply simp apply(erule_tac x="1/2" in allE) by auto } - thus ?thesis unfolding convex_def cone_def by auto + thus ?thesis unfolding convex_def cone_def by blast qed lemma affine_dependent_biggerset: fixes s::"(real^'n) set" @@ -1230,7 +954,7 @@ fixes s :: "'a::real_normed_vector set" assumes "open s" shows "open(convex hull s)" - unfolding open_contains_cball convex_hull_explicit unfolding mem_Collect_eq ball_simps(10) + unfolding open_contains_cball convex_hull_explicit unfolding mem_Collect_eq ball_simps(10) proof(rule, rule) fix a assume "\sa u. finite sa \ sa \ s \ (\x\sa. 0 \ u x) \ setsum u sa = 1 \ (\v\sa. u v *\<^sub>R v) = a" then obtain t u where obt:"finite t" "t\s" "\x\t. 0 \ u x" "setsum u t = 1" "(\v\t. u v *\<^sub>R v) = a" by auto @@ -1250,7 +974,7 @@ hence "Min i \ b x" unfolding i_def apply(rule_tac Min_le) using obt(1) by auto hence "x + (y - a) \ cball x (b x)" using y unfolding mem_cball dist_norm by auto moreover from `x\t` have "x\s" using obt(2) by auto - ultimately have "x + (y - a) \ s" using y and b[THEN bspec[where x=x]] unfolding subset_eq by auto } + ultimately have "x + (y - a) \ s" using y and b[THEN bspec[where x=x]] unfolding subset_eq by fast } moreover have *:"inj_on (\v. v + (y - a)) t" unfolding inj_on_def by auto have "(\v\(\v. v + (y - a)) ` t. u (v - (y - a))) = 1" @@ -1264,29 +988,14 @@ qed qed -lemma open_dest_vec1_vimage: "open S \ open (dest_vec1 -` S)" -unfolding open_vector_def forall_1 by auto - -lemma tendsto_dest_vec1 [tendsto_intros]: - "(f ---> l) net \ ((\x. dest_vec1 (f x)) ---> dest_vec1 l) net" -by(rule tendsto_Cart_nth) - -lemma continuous_dest_vec1: "continuous net f \ continuous net (\x. dest_vec1 (f x))" - unfolding continuous_def by (rule tendsto_dest_vec1) - (* TODO: move *) lemma compact_real_interval: fixes a b :: real shows "compact {a..b}" -proof - - have "continuous_on {vec1 a .. vec1 b} dest_vec1" - unfolding continuous_on - by (simp add: tendsto_dest_vec1 Lim_at_within Lim_ident_at) - moreover have "compact {vec1 a .. vec1 b}" by (rule compact_interval) - ultimately have "compact (dest_vec1 ` {vec1 a .. vec1 b})" - by (rule compact_continuous_image) - also have "dest_vec1 ` {vec1 a .. vec1 b} = {a..b}" - by (auto simp add: image_def Bex_def exists_vec1) - finally show ?thesis . +proof (rule bounded_closed_imp_compact) + have "\y\{a..b}. dist a y \ dist a b" + unfolding dist_real_def by auto + thus "bounded {a..b}" unfolding bounded_def by fast + show "closed {a..b}" by (rule closed_real_atLeastAtMost) qed lemma compact_convex_combinations: @@ -1320,7 +1029,7 @@ show ?thesis unfolding caratheodory[of s] proof(induct ("CARD('n) + 1")) have *:"{x.\sa. finite sa \ sa \ s \ card sa \ 0 \ x \ convex hull sa} = {}" - using compact_empty by (auto simp add: convex_hull_empty) + using compact_empty by auto case 0 thus ?case unfolding * by simp next case (Suc n) @@ -1330,11 +1039,11 @@ fix x assume "\t. finite t \ t \ s \ card t \ Suc n \ x \ convex hull t" then obtain t where t:"finite t" "t \ s" "card t \ Suc n" "x \ convex hull t" by auto show "x\s" proof(cases "card t = 0") - case True thus ?thesis using t(4) unfolding card_0_eq[OF t(1)] by(simp add: convex_hull_empty) + case True thus ?thesis using t(4) unfolding card_0_eq[OF t(1)] by simp next case False hence "card t = Suc 0" using t(3) `n=0` by auto then obtain a where "t = {a}" unfolding card_Suc_eq by auto - thus ?thesis using t(2,4) by (simp add: convex_hull_singleton) + thus ?thesis using t(2,4) by simp qed next fix x assume "x\s" @@ -1369,7 +1078,7 @@ show ?P proof(cases "u={}") case True hence "x=a" using t(4)[unfolded au] by auto show ?P unfolding `x=a` apply(rule_tac x=a in exI, rule_tac x=a in exI, rule_tac x=1 in exI) - using t and `n\0` unfolding au by(auto intro!: exI[where x="{a}"] simp add: convex_hull_singleton) + using t and `n\0` unfolding au by(auto intro!: exI[where x="{a}"]) next case False obtain ux vx b where obt:"ux\0" "vx\0" "ux + vx = 1" "b \ convex hull u" "x = ux *\<^sub>R a + vx *\<^sub>R b" using t(4)[unfolded au convex_hull_insert[OF False]] by auto @@ -1382,7 +1091,7 @@ qed thus ?thesis using compact_convex_combinations[OF assms Suc] by simp qed - qed + qed qed lemma finite_imp_compact_convex_hull: @@ -1822,7 +1531,7 @@ lemma convex_hull_scaling: "convex hull ((\x. c *\<^sub>R x) ` s) = (\x. c *\<^sub>R x) ` (convex hull s)" apply(cases "c=0") defer apply(rule convex_hull_bilemma[rule_format, of _ _ inverse]) apply(rule convex_hull_scaling_lemma) - unfolding image_image scaleR_scaleR by(auto simp add:image_constant_conv convex_hull_eq_empty) + unfolding image_image scaleR_scaleR by(auto simp add:image_constant_conv) lemma convex_hull_affinity: "convex hull ((\x. a + c *\<^sub>R x) ` s) = (\x. a + c *\<^sub>R x) ` (convex hull s)" @@ -1987,13 +1696,11 @@ proof- obtain b where b:"b>0" "\x\s. norm x \ b" using compact_imp_bounded[OF assms(1), unfolded bounded_pos] by auto let ?A = "{y. \u. 0 \ u \ u \ b / norm(x) \ (y = u *\<^sub>R x)}" - have A:"?A = (\u. dest_vec1 u *\<^sub>R x) ` {0 .. vec1 (b / norm x)}" - unfolding image_image[of "\u. u *\<^sub>R x" "\x. dest_vec1 x", THEN sym] - unfolding dest_vec1_inverval vec1_dest_vec1 by auto + have A:"?A = (\u. u *\<^sub>R x) ` {0 .. b / norm x}" + by auto have "compact ?A" unfolding A apply(rule compact_continuous_image, rule continuous_at_imp_continuous_on) apply(rule, rule continuous_vmul) - apply (rule continuous_dest_vec1) - apply(rule continuous_at_id) by(rule compact_interval) + apply(rule continuous_at_id) by(rule compact_real_interval) moreover have "{y. \u\0. u \ b / norm x \ y = u *\<^sub>R x} \ s \ {}" apply(rule not_disjointI[OF _ assms(2)]) unfolding mem_Collect_eq using `b>0` assms(3) by(auto intro!: divide_nonneg_pos) ultimately obtain u y where obt: "u\0" "u \ b / norm x" "y = u *\<^sub>R x" @@ -2129,7 +1836,7 @@ unfolding Ball_def mem_cball dist_norm by (auto simp add: norm_basis[unfolded One_nat_def]) case True show ?thesis unfolding True continuous_at Lim_at apply(rule,rule) apply(rule_tac x="e / B" in exI) apply(rule) apply(rule divide_pos_pos) prefer 3 apply(rule,rule,erule conjE) - unfolding norm_0 scaleR_zero_left dist_norm diff_0_right norm_scaleR abs_norm_cancel proof- + unfolding norm_zero scaleR_zero_left dist_norm diff_0_right norm_scaleR abs_norm_cancel proof- fix e and x::"real^'n" assume as:"norm x < e / B" "0 < norm x" "0 frontier s" using pi(1)[of x] unfolding surf(5)[THEN sym] by auto hence "norm (surf (pi x)) \ B" using B fs by auto @@ -2204,10 +1911,6 @@ lemma mem_epigraph: "(x, y) \ epigraph s f \ x \ s \ f x \ y" unfolding epigraph_def by auto -(** move this**) -lemma forall_dest_vec1: "(\x. P x) \ (\x. P(dest_vec1 x))" - apply safe defer apply(erule_tac x="vec1 x" in allE) by auto - (** This might break sooner or later. In fact it did already once. **) lemma convex_epigraph: "convex(epigraph s f) \ convex_on s f \ convex s" @@ -2228,27 +1931,6 @@ subsection {* Use this to derive general bound property of convex function. *} -lemma forall_of_pastecart: - "(\p. P (\x. fstcart (p x)) (\x. sndcart (p x))) \ (\x y. P x y)" apply meson - apply(erule_tac x="\a. pastecart (x a) (y a)" in allE) unfolding o_def by auto - -lemma forall_of_pastecart': - "(\p. P (fstcart p) (sndcart p)) \ (\x y. P x y)" apply meson - apply(erule_tac x="pastecart x y" in allE) unfolding o_def by auto - -lemma forall_of_dest_vec1: "(\v. P (\x. dest_vec1 (v x))) \ (\x. P x)" - apply rule apply rule apply(erule_tac x="(vec1 \ x)" in allE) unfolding o_def vec1_dest_vec1 by auto - -lemma forall_of_dest_vec1': "(\v. P (dest_vec1 v)) \ (\x. P x)" - apply rule apply rule apply(erule_tac x="(vec1 x)" in allE) defer apply rule - apply(erule_tac x="dest_vec1 v" in allE) unfolding o_def vec1_dest_vec1 by auto - -lemma fst_setsum: "fst (\x\A. f x) = (\x\A. fst (f x))" -by (cases "finite A", induct set: finite, simp_all) - -lemma snd_setsum: "snd (\x\A. f x) = (\x\A. snd (f x))" -by (cases "finite A", induct set: finite, simp_all) - lemma convex_on: assumes "convex s" shows "convex_on s f \ (\k u x. (\i\{1..k::nat}. 0 \ u i \ x i \ s) \ setsum u {1..k} = 1 \ @@ -2281,10 +1963,10 @@ } moreover { fix a b assume "\ u * a + v * b \ a" hence "v * b > (1 - u) * a" unfolding not_le using as(4) by(auto simp add: field_simps) - hence "a < b" unfolding * using as(4) apply(rule_tac mult_left_less_imp_less) by(auto simp add: ring_simps) + hence "a < b" unfolding * using as(4) apply(rule_tac mult_left_less_imp_less) by(auto simp add: field_simps) hence "u * a + v * b \ b" unfolding ** using **(2) as(3) by(auto simp add: field_simps intro!:mult_right_mono) } ultimately show "u *\<^sub>R x + v *\<^sub>R y \ s" apply- apply(rule assms[unfolded is_interval_def, rule_format, OF as(1,2)]) - using as(3-) dimindex_ge_1 apply- by(auto simp add: vector_component) qed + using as(3-) dimindex_ge_1 by auto qed lemma is_interval_connected: fixes s :: "(real ^ _) set" @@ -2294,6 +1976,7 @@ lemma convex_interval: "convex {a .. b}" "convex {a<.. s" have "y \ ?halfr \ ?halfl" apply(rule ccontr) - using as(6) `y\s` by (auto simp add: inner_vector_def dest_vec1_eq) } + using as(6) `y\s` by (auto simp add: inner_vector_def) } moreover have "a\?halfl" "b\?halfr" using * by (auto simp add: inner_vector_def) hence "?halfl \ s \ {}" "?halfr \ s \ {}" using as(2-3) by auto ultimately show False apply(rule_tac notE[OF as(1)[unfolded connected_def]]) @@ -2322,33 +2005,33 @@ lemma convex_connected_1: "connected s \ convex (s::(real^1) set)" by(metis is_interval_convex convex_connected is_interval_connected_1) - +*) subsection {* Another intermediate value theorem formulation. *} -lemma ivt_increasing_component_on_1: fixes f::"real^1 \ real^'n" - assumes "dest_vec1 a \ dest_vec1 b" "continuous_on {a .. b} f" "(f a)$k \ y" "y \ (f b)$k" +lemma ivt_increasing_component_on_1: fixes f::"real \ real^'n" + assumes "a \ b" "continuous_on {a .. b} f" "(f a)$k \ y" "y \ (f b)$k" shows "\x\{a..b}. (f x)$k = y" proof- have "f a \ f ` {a..b}" "f b \ f ` {a..b}" apply(rule_tac[!] imageI) using assms(1) by(auto simp add: vector_le_def) thus ?thesis using connected_ivt_component[of "f ` {a..b}" "f a" "f b" k y] - using connected_continuous_image[OF assms(2) convex_connected[OF convex_interval(1)]] + using connected_continuous_image[OF assms(2) convex_connected[OF convex_real_interval(5)]] using assms by(auto intro!: imageI) qed -lemma ivt_increasing_component_1: fixes f::"real^1 \ real^'n" - shows "dest_vec1 a \ dest_vec1 b \ \x\{a .. b}. continuous (at x) f +lemma ivt_increasing_component_1: fixes f::"real \ real^'n" + shows "a \ b \ \x\{a .. b}. continuous (at x) f \ f a$k \ y \ y \ f b$k \ \x\{a..b}. (f x)$k = y" by(rule ivt_increasing_component_on_1) (auto simp add: continuous_at_imp_continuous_on) -lemma ivt_decreasing_component_on_1: fixes f::"real^1 \ real^'n" - assumes "dest_vec1 a \ dest_vec1 b" "continuous_on {a .. b} f" "(f b)$k \ y" "y \ (f a)$k" +lemma ivt_decreasing_component_on_1: fixes f::"real \ real^'n" + assumes "a \ b" "continuous_on {a .. b} f" "(f b)$k \ y" "y \ (f a)$k" shows "\x\{a..b}. (f x)$k = y" apply(subst neg_equal_iff_equal[THEN sym]) unfolding vector_uminus_component[THEN sym] apply(rule ivt_increasing_component_on_1) using assms using continuous_on_neg - by(auto simp add:vector_uminus_component) + by auto -lemma ivt_decreasing_component_1: fixes f::"real^1 \ real^'n" - shows "dest_vec1 a \ dest_vec1 b \ \x\{a .. b}. continuous (at x) f +lemma ivt_decreasing_component_1: fixes f::"real \ real^'n" + shows "a \ b \ \x\{a .. b}. continuous (at x) f \ f b$k \ y \ y \ f a$k \ \x\{a..b}. (f x)$k = y" by(rule ivt_decreasing_component_on_1) (auto simp: continuous_at_imp_continuous_on) @@ -2386,7 +2069,7 @@ unfolding i'(1) xi_def apply(rule_tac Min_le) unfolding image_iff defer apply(rule_tac x=j in bexI) using i' by auto have i01:"x$i \ 1" "x$i > 0" using Suc(2)[unfolded mem_interval,rule_format,of i] using i'(2) `x$i \ 0` - by(auto simp add: Cart_lambda_beta) + by auto show ?thesis proof(cases "x$i=1") case True have "\j\{i. x$i \ 0}. x$j = 1" apply(rule, rule ccontr) unfolding mem_Collect_eq proof- fix j assume "x $ j \ 0" "x $ j \ 1" @@ -2395,21 +2078,21 @@ hence "x$j \ x$i" unfolding i'(1) xi_def apply(rule_tac Min_le) by auto thus False using True Suc(2) j by(auto simp add: vector_le_def elim!:ballE[where x=j]) qed thus "x\convex hull ?points" apply(rule_tac hull_subset[unfolded subset_eq, rule_format]) - by(auto simp add: Cart_lambda_beta) + by auto next let ?y = "\j. if x$j = 0 then 0 else (x$j - x$i) / (1 - x$i)" case False hence *:"x = x$i *\<^sub>R (\ j. if x$j = 0 then 0 else 1) + (1 - x$i) *\<^sub>R (\ j. ?y j)" unfolding Cart_eq - by(auto simp add: Cart_lambda_beta vector_add_component vector_smult_component vector_minus_component field_simps) + by(auto simp add: field_simps) { fix j have "x$j \ 0 \ 0 \ (x $ j - x $ i) / (1 - x $ i)" "(x $ j - x $ i) / (1 - x $ i) \ 1" apply(rule_tac divide_nonneg_pos) using i(1)[of j] using False i01 - using Suc(2)[unfolded mem_interval, rule_format, of j] by(auto simp add:field_simps Cart_lambda_beta) + using Suc(2)[unfolded mem_interval, rule_format, of j] by(auto simp add:field_simps) hence "0 \ ?y j \ ?y j \ 1" by auto } - moreover have "i\{j. x$j \ 0} - {j. ((\ j. ?y j)::real^'n) $ j \ 0}" using i01 by(auto simp add: Cart_lambda_beta) + moreover have "i\{j. x$j \ 0} - {j. ((\ j. ?y j)::real^'n) $ j \ 0}" using i01 by auto hence "{j. x$j \ 0} \ {j. ((\ j. ?y j)::real^'n) $ j \ 0}" by auto - hence **:"{j. ((\ j. ?y j)::real^'n) $ j \ 0} \ {j. x$j \ 0}" apply - apply rule by(auto simp add: Cart_lambda_beta) + hence **:"{j. ((\ j. ?y j)::real^'n) $ j \ 0} \ {j. x$j \ 0}" apply - apply rule by auto have "card {j. ((\ j. ?y j)::real^'n) $ j \ 0} \ n" using less_le_trans[OF psubset_card_mono[OF _ **] Suc(4)] by auto ultimately show ?thesis apply(subst *) apply(rule convex_convex_hull[unfolded convex_def, rule_format]) apply(rule_tac hull_subset[unfolded subset_eq, rule_format]) defer apply(rule Suc(1)) - unfolding mem_interval using i01 Suc(3) by (auto simp add: Cart_lambda_beta) + unfolding mem_interval using i01 Suc(3) by auto qed qed qed } note * = this show ?thesis apply rule defer apply(rule hull_minimal) unfolding subset_eq prefer 3 apply rule apply(rule_tac n2="CARD('n)" in *) prefer 3 apply(rule card_mono) using 01 and convex_interval(1) prefer 5 apply - apply rule @@ -2424,7 +2107,7 @@ prefer 3 apply(rule unit_interval_convex_hull) apply rule unfolding mem_Collect_eq proof- fix x::"real^'n" assume as:"\i. x $ i = 0 \ x $ i = 1" show "x \ (\s. \ i. if i \ s then 1 else 0) ` UNIV" apply(rule image_eqI[where x="{i. x$i = 1}"]) - unfolding Cart_eq using as by(auto simp add:Cart_lambda_beta) qed auto + unfolding Cart_eq using as by auto qed auto subsection {* Hence any cube (could do any nonempty interval). *} @@ -2435,23 +2118,23 @@ unfolding image_iff defer apply(erule bexE) proof- fix y assume as:"y\{x - ?d .. x + ?d}" { fix i::'n have "x $ i \ d + y $ i" "y $ i \ d + x $ i" using as[unfolded mem_interval, THEN spec[where x=i]] - by(auto simp add: vector_component) + by auto hence "1 \ inverse d * (x $ i - y $ i)" "1 \ inverse d * (y $ i - x $ i)" apply(rule_tac[!] mult_left_le_imp_le[OF _ assms]) unfolding mult_assoc[THEN sym] - using assms by(auto simp add: field_simps right_inverse) + using assms by(auto simp add: field_simps) hence "inverse d * (x $ i * 2) \ 2 + inverse d * (y $ i * 2)" "inverse d * (y $ i * 2) \ 2 + inverse d * (x $ i * 2)" by(auto simp add:field_simps) } hence "inverse (2 * d) *\<^sub>R (y - (x - ?d)) \ {0..1}" unfolding mem_interval using assms - by(auto simp add: Cart_eq vector_component_simps field_simps) + by(auto simp add: Cart_eq field_simps) thus "\z\{0..1}. y = x - ?d + (2 * d) *\<^sub>R z" apply- apply(rule_tac x="inverse (2 * d) *\<^sub>R (y - (x - ?d))" in bexI) - using assms by(auto simp add: Cart_eq vector_le_def Cart_lambda_beta) + using assms by(auto simp add: Cart_eq vector_le_def) next fix y z assume as:"z\{0..1}" "y = x - ?d + (2*d) *\<^sub>R z" have "\i. 0 \ d * z $ i \ d * z $ i \ d" using assms as(1)[unfolded mem_interval] apply(erule_tac x=i in allE) apply rule apply(rule mult_nonneg_nonneg) prefer 3 apply(rule mult_right_le_one_le) - using assms by(auto simp add: vector_component_simps Cart_eq) + using assms by(auto simp add: Cart_eq) thus "y \ {x - ?d..x + ?d}" unfolding as(2) mem_interval apply- apply rule using as(1)[unfolded mem_interval] - apply(erule_tac x=i in allE) using assms by(auto simp add: vector_component_simps Cart_eq) qed + apply(erule_tac x=i in allE) using assms by(auto simp add: Cart_eq) qed obtain s where "finite s" "{0..1::real^'n} = convex hull s" using unit_cube_convex_hull by auto thus ?thesis apply(rule_tac that[of "(\y. x - ?d + (2 * d) *\<^sub>R y)` s"]) unfolding * and convex_hull_affinity by auto qed @@ -2541,8 +2224,8 @@ have "0 < d" unfolding d_def using `e>0` dimge1 by(rule_tac divide_pos_pos, auto) let ?d = "(\ i. d)::real^'n" obtain c where c:"finite c" "{x - ?d..x + ?d} = convex hull c" using cube_convex_hull[OF `d>0`, of x] by auto - have "x\{x - ?d..x + ?d}" using `d>0` unfolding mem_interval by(auto simp add:vector_component_simps) - hence "c\{}" using c by(auto simp add:convex_hull_empty) + have "x\{x - ?d..x + ?d}" using `d>0` unfolding mem_interval by auto + hence "c\{}" using c by auto def k \ "Max (f ` c)" have "convex_on {x - ?d..x + ?d} f" apply(rule convex_on_subset[OF assms(2)]) apply(rule subset_trans[OF _ e(1)]) unfolding subset_eq mem_cball proof @@ -2550,7 +2233,7 @@ have e:"e = setsum (\i. d) (UNIV::'n set)" unfolding setsum_constant d_def using dimge1 by (metis eq_divide_imp mult_frac_num real_dimindex_gt_0 real_eq_of_nat real_less_def real_mult_commute) show "dist x z \ e" unfolding dist_norm e apply(rule_tac order_trans[OF norm_le_l1], rule setsum_mono) - using z[unfolded mem_interval] apply(erule_tac x=i in allE) by(auto simp add:field_simps vector_component_simps) qed + using z[unfolded mem_interval] apply(erule_tac x=i in allE) by auto qed hence k:"\y\{x - ?d..x + ?d}. f y \ k" unfolding c(2) apply(rule_tac convex_on_convex_hull_bound) apply assumption unfolding k_def apply(rule, rule Max_ge) using c(1) by auto have "d \ e" unfolding d_def apply(rule mult_imp_div_pos_le) using `e>0` dimge1 unfolding mult_le_cancel_left1 using real_dimindex_ge_1 by auto @@ -2559,9 +2242,9 @@ hence "\y\cball x d. abs (f y) \ k + 2 * abs (f x)" apply(rule_tac convex_bounds_lemma) apply assumption proof fix y assume y:"y\cball x d" { fix i::'n have "x $ i - d \ y $ i" "y $ i \ x $ i + d" - using order_trans[OF component_le_norm y[unfolded mem_cball dist_norm], of i] by(auto simp add: vector_component) } + using order_trans[OF component_le_norm y[unfolded mem_cball dist_norm], of i] by auto } thus "f y \ k" apply(rule_tac k[rule_format]) unfolding mem_cball mem_interval dist_norm - by(auto simp add: vector_component_simps) qed + by auto qed hence "continuous_on (ball x d) f" apply(rule_tac convex_on_bounded_continuous) apply(rule open_ball, rule convex_on_subset[OF conv], rule ball_subset_cball) apply force @@ -2580,11 +2263,11 @@ "midpoint a b = (inverse (2::real)) *\<^sub>R (a + b)" definition - open_segment :: "real ^ 'n \ real ^ 'n \ (real ^ 'n) set" where + open_segment :: "'a::real_vector \ 'a \ 'a set" where "open_segment a b = {(1 - u) *\<^sub>R a + u *\<^sub>R b | u::real. 0 < u \ u < 1}" definition - closed_segment :: "real ^ 'n \ real ^ 'n \ (real ^ 'n) set" where + closed_segment :: "'a::real_vector \ 'a \ 'a set" where "closed_segment a b = {(1 - u) *\<^sub>R a + u *\<^sub>R b | u::real. 0 \ u \ u \ 1}" definition "between = (\ (a,b). closed_segment a b)" @@ -2655,12 +2338,14 @@ unfolding segment_convex_hull apply(rule_tac[!] hull_subset[unfolded subset_eq, rule_format]) by auto lemma segment_furthest_le: + fixes a b x y :: "real ^ 'n" assumes "x \ closed_segment a b" shows "norm(y - x) \ norm(y - a) \ norm(y - x) \ norm(y - b)" proof- obtain z where "z\{a, b}" "norm (x - y) \ norm (z - y)" using simplex_furthest_le[of "{a, b}" y] using assms[unfolded segment_convex_hull] by auto thus ?thesis by(auto simp add:norm_minus_commute) qed lemma segment_bound: + fixes x a b :: "real ^ 'n" assumes "x \ closed_segment a b" shows "norm(x - a) \ norm(b - a)" "norm(x - b) \ norm(b - a)" using segment_furthest_le[OF assms, of a] @@ -2685,17 +2370,17 @@ unfolding as(1) by(auto simp add:algebra_simps) show "norm (a - x) *\<^sub>R (x - b) = norm (x - b) *\<^sub>R (a - x)" unfolding norm_minus_commute[of x a] * Cart_eq using as(2,3) - by(auto simp add: vector_component_simps field_simps) + by(auto simp add: field_simps) next assume as:"dist a b = dist a x + dist x b" have "norm (a - x) / norm (a - b) \ 1" unfolding divide_le_eq_1_pos[OF Fal2] unfolding as[unfolded dist_norm] norm_ge_zero by auto thus "\u. x = (1 - u) *\<^sub>R a + u *\<^sub>R b \ 0 \ u \ u \ 1" apply(rule_tac x="dist a x / dist a b" in exI) unfolding dist_norm Cart_eq apply- apply rule defer apply(rule, rule divide_nonneg_pos) prefer 4 proof rule fix i::'n have "((1 - norm (a - x) / norm (a - b)) *\<^sub>R a + (norm (a - x) / norm (a - b)) *\<^sub>R b) $ i = ((norm (a - b) - norm (a - x)) * (a $ i) + norm (a - x) * (b $ i)) / norm (a - b)" - using Fal by(auto simp add:vector_component_simps field_simps) + using Fal by(auto simp add: field_simps) also have "\ = x$i" apply(rule divide_eq_imp[OF Fal]) unfolding as[unfolded dist_norm] using as[unfolded dist_triangle_eq Cart_eq,rule_format, of i] - by(auto simp add:field_simps vector_component_simps) + by(auto simp add:field_simps) finally show "x $ i = ((1 - norm (a - x) / norm (a - b)) *\<^sub>R a + (norm (a - x) / norm (a - b)) *\<^sub>R b) $ i" by auto qed(insert Fal2, auto) qed qed @@ -2704,7 +2389,7 @@ "between (b,a) (midpoint a b)" (is ?t2) proof- have *:"\x y z. x = (1/2::real) *\<^sub>R z \ y = (1/2) *\<^sub>R z \ norm z = norm x + norm y" by auto show ?t1 ?t2 unfolding between midpoint_def dist_norm apply(rule_tac[!] *) - by(auto simp add:field_simps Cart_eq vector_component_simps) qed + by(auto simp add:field_simps Cart_eq) qed lemma between_mem_convex_hull: "between (a,b) x \ x \ convex hull {a,b}" @@ -2723,7 +2408,7 @@ have *:"y = (1 - (1 - e)) *\<^sub>R ((1 / e) *\<^sub>R y - ((1 - e) / e) *\<^sub>R x) + (1 - e) *\<^sub>R x" using `e>0` by (auto simp add: scaleR_left_diff_distrib scaleR_right_diff_distrib) have "dist c ((1 / e) *\<^sub>R y - ((1 - e) / e) *\<^sub>R x) = abs(1/e) * norm (e *\<^sub>R c - y + (1 - e) *\<^sub>R x)" unfolding dist_norm unfolding norm_scaleR[THEN sym] apply(rule norm_eqI) using `e>0` - by(auto simp add:vector_component_simps Cart_eq field_simps) + by(auto simp add: Cart_eq field_simps) also have "\ = abs(1/e) * norm (x - e *\<^sub>R (x - c) - y)" by(auto intro!:norm_eqI simp add: algebra_simps) also have "\ < d" using as[unfolded dist_norm] and `e>0` by(auto simp add:pos_divide_less_eq[OF `e>0`] real_mult_commute) @@ -2795,11 +2480,11 @@ fix x::"real^'n" and e assume "0xa. dist x xa < e \ (\x. 0 \ xa $ x) \ setsum (op $ xa) UNIV \ 1" show "(\xa. 0 < x $ xa) \ setsum (op $ x) UNIV < 1" apply(rule,rule) proof- fix i::'n show "0 < x $ i" using as[THEN spec[where x="x - (e / 2) *\<^sub>R basis i"]] and `e>0` - unfolding dist_norm by(auto simp add: norm_basis vector_component_simps basis_component elim:allE[where x=i]) + unfolding dist_norm by(auto simp add: norm_basis elim:allE[where x=i]) next guess a using UNIV_witness[where 'a='n] .. have **:"dist x (x + (e / 2) *\<^sub>R basis a) < e" using `e>0` and norm_basis[of a] - unfolding dist_norm by(auto simp add: vector_component_simps basis_component intro!: mult_strict_left_mono_comm) - have "\i. (x + (e / 2) *\<^sub>R basis a) $ i = x$i + (if i = a then e/2 else 0)" by(auto simp add:vector_component_simps) + unfolding dist_norm by(auto intro!: mult_strict_left_mono_comm) + have "\i. (x + (e / 2) *\<^sub>R basis a) $ i = x$i + (if i = a then e/2 else 0)" by auto hence *:"setsum (op $ (x + (e / 2) *\<^sub>R basis a)) UNIV = setsum (\i. x$i + (if a = i then e/2 else 0)) UNIV" by(rule_tac setsum_cong, auto) have "setsum (op $ x) UNIV < setsum (op $ (x + (e / 2) *\<^sub>R basis a)) UNIV" unfolding * setsum_addf using `0 setsum (\i. x$i + ?d) UNIV" proof(rule setsum_mono) fix i::'n have "abs (y$i - x$i) < ?d" apply(rule le_less_trans) using component_le_norm[of "y - x" i] - using y[unfolded min_less_iff_conj dist_norm, THEN conjunct2] by(auto simp add:vector_component_simps norm_minus_commute) + using y[unfolded min_less_iff_conj dist_norm, THEN conjunct2] by(auto simp add: norm_minus_commute) thus "y $ i \ x $ i + ?d" by auto qed also have "\ \ 1" unfolding setsum_addf setsum_constant card_enum real_eq_of_nat using dimindex_ge_1 by(auto simp add: Suc_le_eq) finally show "(\i. 0 \ y $ i) \ setsum (op $ y) UNIV \ 1" apply- proof(rule,rule) fix i::'n have "norm (x - y) < x$i" using y[unfolded min_less_iff_conj dist_norm, THEN conjunct1] - using Min_gr_iff[of "op $ x ` dimset x"] dimindex_ge_1 by auto - thus "0 \ y$i" using component_le_norm[of "x - y" i] and as(1)[rule_format, of i] by(auto simp add: vector_component_simps) + by auto + thus "0 \ y$i" using component_le_norm[of "x - y" i] and as(1)[rule_format, of i] by auto qed auto qed auto qed lemma interior_std_simplex_nonempty: obtains a::"real^'n" where @@ -2840,540 +2525,4 @@ also have "\ < 1" unfolding setsum_constant card_enum real_eq_of_nat real_divide_def[THEN sym] by (auto simp add:field_simps) finally show "setsum (op $ ?a) ?D < 1" by auto qed qed -subsection {* Paths. *} - -definition "path (g::real^1 \ real^'n) \ continuous_on {0 .. 1} g" - -definition "pathstart (g::real^1 \ real^'n) = g 0" - -definition "pathfinish (g::real^1 \ real^'n) = g 1" - -definition "path_image (g::real^1 \ real^'n) = g ` {0 .. 1}" - -definition "reversepath (g::real^1 \ real^'n) = (\x. g(1 - x))" - -definition joinpaths:: "(real^1 \ real^'n) \ (real^1 \ real^'n) \ (real^1 \ real^'n)" (infixr "+++" 75) - where "joinpaths g1 g2 = (\x. if dest_vec1 x \ ((1 / 2)::real) then g1 (2 *\<^sub>R x) else g2(2 *\<^sub>R x - 1))" -definition "simple_path (g::real^1 \ real^'n) \ - (\x\{0..1}. \y\{0..1}. g x = g y \ x = y \ x = 0 \ y = 1 \ x = 1 \ y = 0)" - -definition "injective_path (g::real^1 \ real^'n) \ - (\x\{0..1}. \y\{0..1}. g x = g y \ x = y)" - -subsection {* Some lemmas about these concepts. *} - -lemma injective_imp_simple_path: - "injective_path g \ simple_path g" - unfolding injective_path_def simple_path_def by auto - -lemma path_image_nonempty: "path_image g \ {}" - unfolding path_image_def image_is_empty interval_eq_empty by auto - -lemma pathstart_in_path_image[intro]: "(pathstart g) \ path_image g" - unfolding pathstart_def path_image_def apply(rule imageI) - unfolding mem_interval_1 vec_1[THEN sym] dest_vec1_vec by auto - -lemma pathfinish_in_path_image[intro]: "(pathfinish g) \ path_image g" - unfolding pathfinish_def path_image_def apply(rule imageI) - unfolding mem_interval_1 vec_1[THEN sym] dest_vec1_vec by auto - -lemma connected_path_image[intro]: "path g \ connected(path_image g)" - unfolding path_def path_image_def apply(rule connected_continuous_image, assumption) - by(rule convex_connected, rule convex_interval) - -lemma compact_path_image[intro]: "path g \ compact(path_image g)" - unfolding path_def path_image_def apply(rule compact_continuous_image, assumption) - by(rule compact_interval) - -lemma reversepath_reversepath[simp]: "reversepath(reversepath g) = g" - unfolding reversepath_def by auto - -lemma pathstart_reversepath[simp]: "pathstart(reversepath g) = pathfinish g" - unfolding pathstart_def reversepath_def pathfinish_def by auto - -lemma pathfinish_reversepath[simp]: "pathfinish(reversepath g) = pathstart g" - unfolding pathstart_def reversepath_def pathfinish_def by auto - -lemma pathstart_join[simp]: "pathstart(g1 +++ g2) = pathstart g1" - unfolding pathstart_def joinpaths_def pathfinish_def by auto - -lemma pathfinish_join[simp]:"pathfinish(g1 +++ g2) = pathfinish g2" proof- - have "2 *\<^sub>R 1 - 1 = (1::real^1)" unfolding Cart_eq by(auto simp add:vector_component_simps) - thus ?thesis unfolding pathstart_def joinpaths_def pathfinish_def - unfolding vec_1[THEN sym] dest_vec1_vec by auto qed - -lemma path_image_reversepath[simp]: "path_image(reversepath g) = path_image g" proof- - have *:"\g. path_image(reversepath g) \ path_image g" - unfolding path_image_def subset_eq reversepath_def Ball_def image_iff apply(rule,rule,erule bexE) - apply(rule_tac x="1 - xa" in bexI) by(auto simp add:vector_le_def vector_component_simps elim!:ballE) - show ?thesis using *[of g] *[of "reversepath g"] unfolding reversepath_reversepath by auto qed - -lemma path_reversepath[simp]: "path(reversepath g) \ path g" proof- - have *:"\g. path g \ path(reversepath g)" unfolding path_def reversepath_def - apply(rule continuous_on_compose[unfolded o_def, of _ "\x. 1 - x"]) - apply(rule continuous_on_sub, rule continuous_on_const, rule continuous_on_id) - apply(rule continuous_on_subset[of "{0..1}"], assumption) by auto - show ?thesis using *[of g] *[of "reversepath g"] unfolding reversepath_reversepath by auto qed - -lemmas reversepath_simps = path_reversepath path_image_reversepath pathstart_reversepath pathfinish_reversepath - -lemma path_join[simp]: assumes "pathfinish g1 = pathstart g2" shows "path (g1 +++ g2) \ path g1 \ path g2" - unfolding path_def pathfinish_def pathstart_def apply rule defer apply(erule conjE) proof- - assume as:"continuous_on {0..1} (g1 +++ g2)" - have *:"g1 = (\x. g1 (2 *\<^sub>R x)) \ (\x. (1/2) *\<^sub>R x)" - "g2 = (\x. g2 (2 *\<^sub>R x - 1)) \ (\x. (1/2) *\<^sub>R (x + 1))" unfolding o_def by auto - have "op *\<^sub>R (1 / 2) ` {0::real^1..1} \ {0..1}" "(\x. (1 / 2) *\<^sub>R (x + 1)) ` {(0::real^1)..1} \ {0..1}" - unfolding image_smult_interval by auto - thus "continuous_on {0..1} g1 \ continuous_on {0..1} g2" apply -apply rule - apply(subst *) defer apply(subst *) apply (rule_tac[!] continuous_on_compose) - apply (rule continuous_on_cmul, rule continuous_on_add, rule continuous_on_id, rule continuous_on_const) defer - apply (rule continuous_on_cmul, rule continuous_on_id) apply(rule_tac[!] continuous_on_eq[of _ "g1 +++ g2"]) defer prefer 3 - apply(rule_tac[1-2] continuous_on_subset[of "{0 .. 1}"]) apply(rule as, assumption, rule as, assumption) - apply(rule) defer apply rule proof- - fix x assume "x \ op *\<^sub>R (1 / 2) ` {0::real^1..1}" - hence "dest_vec1 x \ 1 / 2" unfolding image_iff by(auto simp add: vector_component_simps) - thus "(g1 +++ g2) x = g1 (2 *\<^sub>R x)" unfolding joinpaths_def by auto next - fix x assume "x \ (\x. (1 / 2) *\<^sub>R (x + 1)) ` {0::real^1..1}" - hence "dest_vec1 x \ 1 / 2" unfolding image_iff by(auto simp add: vector_component_simps) - thus "(g1 +++ g2) x = g2 (2 *\<^sub>R x - 1)" proof(cases "dest_vec1 x = 1 / 2") - case True hence "x = (1/2) *\<^sub>R 1" unfolding Cart_eq by(auto simp add: forall_1 vector_component_simps) - thus ?thesis unfolding joinpaths_def using assms[unfolded pathstart_def pathfinish_def] by auto - qed (auto simp add:le_less joinpaths_def) qed -next assume as:"continuous_on {0..1} g1" "continuous_on {0..1} g2" - have *:"{0 .. 1::real^1} = {0.. (1/2)*\<^sub>R 1} \ {(1/2) *\<^sub>R 1 .. 1}" by(auto simp add: vector_component_simps) - have **:"op *\<^sub>R 2 ` {0..(1 / 2) *\<^sub>R 1} = {0..1::real^1}" apply(rule set_ext, rule) unfolding image_iff - defer apply(rule_tac x="(1/2)*\<^sub>R x" in bexI) by(auto simp add: vector_component_simps) - have ***:"(\x. 2 *\<^sub>R x - 1) ` {(1 / 2) *\<^sub>R 1..1} = {0..1::real^1}" - unfolding image_affinity_interval[of _ "- 1", unfolded diff_def[symmetric]] and interval_eq_empty_1 - by(auto simp add: vector_component_simps) - have ****:"\x::real^1. x $ 1 * 2 = 1 \ x = (1/2) *\<^sub>R 1" unfolding Cart_eq by(auto simp add: forall_1 vector_component_simps) - show "continuous_on {0..1} (g1 +++ g2)" unfolding * apply(rule continuous_on_union) apply(rule closed_interval)+ proof- - show "continuous_on {0..(1 / 2) *\<^sub>R 1} (g1 +++ g2)" apply(rule continuous_on_eq[of _ "\x. g1 (2 *\<^sub>R x)"]) defer - unfolding o_def[THEN sym] apply(rule continuous_on_compose) apply(rule continuous_on_cmul, rule continuous_on_id) - unfolding ** apply(rule as(1)) unfolding joinpaths_def by(auto simp add: vector_component_simps) next - show "continuous_on {(1/2)*\<^sub>R1..1} (g1 +++ g2)" apply(rule continuous_on_eq[of _ "g2 \ (\x. 2 *\<^sub>R x - 1)"]) defer - apply(rule continuous_on_compose) apply(rule continuous_on_sub, rule continuous_on_cmul, rule continuous_on_id, rule continuous_on_const) - unfolding *** o_def joinpaths_def apply(rule as(2)) using assms[unfolded pathstart_def pathfinish_def] - by(auto simp add: vector_component_simps ****) qed qed - -lemma path_image_join_subset: "path_image(g1 +++ g2) \ (path_image g1 \ path_image g2)" proof - fix x assume "x \ path_image (g1 +++ g2)" - then obtain y where y:"y\{0..1}" "x = (if dest_vec1 y \ 1 / 2 then g1 (2 *\<^sub>R y) else g2 (2 *\<^sub>R y - 1))" - unfolding path_image_def image_iff joinpaths_def by auto - thus "x \ path_image g1 \ path_image g2" apply(cases "dest_vec1 y \ 1/2") - apply(rule_tac UnI1) defer apply(rule_tac UnI2) unfolding y(2) path_image_def using y(1) - by(auto intro!: imageI simp add: vector_component_simps) qed - -lemma subset_path_image_join: - assumes "path_image g1 \ s" "path_image g2 \ s" shows "path_image(g1 +++ g2) \ s" - using path_image_join_subset[of g1 g2] and assms by auto - -lemma path_image_join: - assumes "path g1" "path g2" "pathfinish g1 = pathstart g2" - shows "path_image(g1 +++ g2) = (path_image g1) \ (path_image g2)" -apply(rule, rule path_image_join_subset, rule) unfolding Un_iff proof(erule disjE) - fix x assume "x \ path_image g1" - then obtain y where y:"y\{0..1}" "x = g1 y" unfolding path_image_def image_iff by auto - thus "x \ path_image (g1 +++ g2)" unfolding joinpaths_def path_image_def image_iff - apply(rule_tac x="(1/2) *\<^sub>R y" in bexI) by(auto simp add: vector_component_simps) next - fix x assume "x \ path_image g2" - then obtain y where y:"y\{0..1}" "x = g2 y" unfolding path_image_def image_iff by auto - moreover have *:"y $ 1 = 0 \ y = 0" unfolding Cart_eq by auto - ultimately show "x \ path_image (g1 +++ g2)" unfolding joinpaths_def path_image_def image_iff - apply(rule_tac x="(1/2) *\<^sub>R (y + 1)" in bexI) using assms(3)[unfolded pathfinish_def pathstart_def] - by(auto simp add: vector_component_simps) qed - -lemma not_in_path_image_join: - assumes "x \ path_image g1" "x \ path_image g2" shows "x \ path_image(g1 +++ g2)" - using assms and path_image_join_subset[of g1 g2] by auto - -lemma simple_path_reversepath: assumes "simple_path g" shows "simple_path (reversepath g)" - using assms unfolding simple_path_def reversepath_def apply- apply(rule ballI)+ - apply(erule_tac x="1-x" in ballE, erule_tac x="1-y" in ballE) - unfolding mem_interval_1 by(auto simp add:vector_component_simps) - -(** move this **) -declare vector_scaleR_component[simp] - -lemma simple_path_join_loop: - assumes "injective_path g1" "injective_path g2" "pathfinish g2 = pathstart g1" - "(path_image g1 \ path_image g2) \ {pathstart g1,pathstart g2}" - shows "simple_path(g1 +++ g2)" -unfolding simple_path_def proof((rule ballI)+, rule impI) let ?g = "g1 +++ g2" - note inj = assms(1,2)[unfolded injective_path_def, rule_format] - fix x y::"real^1" assume xy:"x \ {0..1}" "y \ {0..1}" "?g x = ?g y" - show "x = y \ x = 0 \ y = 1 \ x = 1 \ y = 0" proof(case_tac "x$1 \ 1/2",case_tac[!] "y$1 \ 1/2", unfold not_le) - assume as:"x $ 1 \ 1 / 2" "y $ 1 \ 1 / 2" - hence "g1 (2 *\<^sub>R x) = g1 (2 *\<^sub>R y)" using xy(3) unfolding joinpaths_def by auto - moreover have "2 *\<^sub>R x \ {0..1}" "2 *\<^sub>R y \ {0..1}" using xy(1,2) as - unfolding mem_interval_1 by(auto simp add:vector_component_simps) - ultimately show ?thesis using inj(1)[of "2*\<^sub>R x" "2*\<^sub>R y"] by auto - next assume as:"x $ 1 > 1 / 2" "y $ 1 > 1 / 2" - hence "g2 (2 *\<^sub>R x - 1) = g2 (2 *\<^sub>R y - 1)" using xy(3) unfolding joinpaths_def by auto - moreover have "2 *\<^sub>R x - 1 \ {0..1}" "2 *\<^sub>R y - 1 \ {0..1}" using xy(1,2) as unfolding mem_interval_1 by(auto simp add:vector_component_simps) - ultimately show ?thesis using inj(2)[of "2*\<^sub>R x - 1" "2*\<^sub>R y - 1"] by auto - next assume as:"x $ 1 \ 1 / 2" "y $ 1 > 1 / 2" - hence "?g x \ path_image g1" "?g y \ path_image g2" unfolding path_image_def joinpaths_def - using xy(1,2)[unfolded mem_interval_1] by(auto simp add:vector_component_simps intro!: imageI) - moreover have "?g y \ pathstart g2" using as(2) unfolding pathstart_def joinpaths_def - using inj(2)[of "2 *\<^sub>R y - 1" 0] and xy(2)[unfolded mem_interval_1] - by(auto simp add:vector_component_simps field_simps Cart_eq) - ultimately have *:"?g x = pathstart g1" using assms(4) unfolding xy(3) by auto - hence "x = 0" unfolding pathstart_def joinpaths_def using as(1) and xy(1)[unfolded mem_interval_1] - using inj(1)[of "2 *\<^sub>R x" 0] by(auto simp add:vector_component_simps) - moreover have "y = 1" using * unfolding xy(3) assms(3)[THEN sym] - unfolding joinpaths_def pathfinish_def using as(2) and xy(2)[unfolded mem_interval_1] - using inj(2)[of "2 *\<^sub>R y - 1" 1] by (auto simp add:vector_component_simps Cart_eq) - ultimately show ?thesis by auto - next assume as:"x $ 1 > 1 / 2" "y $ 1 \ 1 / 2" - hence "?g x \ path_image g2" "?g y \ path_image g1" unfolding path_image_def joinpaths_def - using xy(1,2)[unfolded mem_interval_1] by(auto simp add:vector_component_simps intro!: imageI) - moreover have "?g x \ pathstart g2" using as(1) unfolding pathstart_def joinpaths_def - using inj(2)[of "2 *\<^sub>R x - 1" 0] and xy(1)[unfolded mem_interval_1] - by(auto simp add:vector_component_simps field_simps Cart_eq) - ultimately have *:"?g y = pathstart g1" using assms(4) unfolding xy(3) by auto - hence "y = 0" unfolding pathstart_def joinpaths_def using as(2) and xy(2)[unfolded mem_interval_1] - using inj(1)[of "2 *\<^sub>R y" 0] by(auto simp add:vector_component_simps) - moreover have "x = 1" using * unfolding xy(3)[THEN sym] assms(3)[THEN sym] - unfolding joinpaths_def pathfinish_def using as(1) and xy(1)[unfolded mem_interval_1] - using inj(2)[of "2 *\<^sub>R x - 1" 1] by(auto simp add:vector_component_simps Cart_eq) - ultimately show ?thesis by auto qed qed - -lemma injective_path_join: - assumes "injective_path g1" "injective_path g2" "pathfinish g1 = pathstart g2" - "(path_image g1 \ path_image g2) \ {pathstart g2}" - shows "injective_path(g1 +++ g2)" - unfolding injective_path_def proof(rule,rule,rule) let ?g = "g1 +++ g2" - note inj = assms(1,2)[unfolded injective_path_def, rule_format] - have *:"\x y::real^1. 2 *\<^sub>R x = 1 \ 2 *\<^sub>R y = 1 \ x = y" unfolding Cart_eq forall_1 by(auto simp del:dest_vec1_eq) - fix x y assume xy:"x \ {0..1}" "y \ {0..1}" "(g1 +++ g2) x = (g1 +++ g2) y" - show "x = y" proof(cases "x$1 \ 1/2", case_tac[!] "y$1 \ 1/2", unfold not_le) - assume "x $ 1 \ 1 / 2" "y $ 1 \ 1 / 2" thus ?thesis using inj(1)[of "2*\<^sub>R x" "2*\<^sub>R y"] and xy - unfolding mem_interval_1 joinpaths_def by(auto simp add:vector_component_simps) - next assume "x $ 1 > 1 / 2" "y $ 1 > 1 / 2" thus ?thesis using inj(2)[of "2*\<^sub>R x - 1" "2*\<^sub>R y - 1"] and xy - unfolding mem_interval_1 joinpaths_def by(auto simp add:vector_component_simps) - next assume as:"x $ 1 \ 1 / 2" "y $ 1 > 1 / 2" - hence "?g x \ path_image g1" "?g y \ path_image g2" unfolding path_image_def joinpaths_def - using xy(1,2)[unfolded mem_interval_1] by(auto simp add:vector_component_simps intro!: imageI) - hence "?g x = pathfinish g1" "?g y = pathstart g2" using assms(4) unfolding assms(3) xy(3) by auto - thus ?thesis using as and inj(1)[of "2 *\<^sub>R x" 1] inj(2)[of "2 *\<^sub>R y - 1" 0] and xy(1,2) - unfolding pathstart_def pathfinish_def joinpaths_def mem_interval_1 - by(auto simp add:vector_component_simps intro:*) - next assume as:"x $ 1 > 1 / 2" "y $ 1 \ 1 / 2" - hence "?g x \ path_image g2" "?g y \ path_image g1" unfolding path_image_def joinpaths_def - using xy(1,2)[unfolded mem_interval_1] by(auto simp add:vector_component_simps intro!: imageI) - hence "?g x = pathstart g2" "?g y = pathfinish g1" using assms(4) unfolding assms(3) xy(3) by auto - thus ?thesis using as and inj(2)[of "2 *\<^sub>R x - 1" 0] inj(1)[of "2 *\<^sub>R y" 1] and xy(1,2) - unfolding pathstart_def pathfinish_def joinpaths_def mem_interval_1 - by(auto simp add:vector_component_simps intro:*) qed qed - -lemmas join_paths_simps = path_join path_image_join pathstart_join pathfinish_join - -subsection {* Reparametrizing a closed curve to start at some chosen point. *} - -definition "shiftpath a (f::real^1 \ real^'n) = - (\x. if dest_vec1 (a + x) \ 1 then f(a + x) else f(a + x - 1))" - -lemma pathstart_shiftpath: "a \ 1 \ pathstart(shiftpath a g) = g a" - unfolding pathstart_def shiftpath_def by auto - -(** move this **) -declare forall_1[simp] ex_1[simp] - -lemma pathfinish_shiftpath: assumes "0 \ a" "pathfinish g = pathstart g" - shows "pathfinish(shiftpath a g) = g a" - using assms unfolding pathstart_def pathfinish_def shiftpath_def - by(auto simp add: vector_component_simps) - -lemma endpoints_shiftpath: - assumes "pathfinish g = pathstart g" "a \ {0 .. 1}" - shows "pathfinish(shiftpath a g) = g a" "pathstart(shiftpath a g) = g a" - using assms by(auto intro!:pathfinish_shiftpath pathstart_shiftpath) - -lemma closed_shiftpath: - assumes "pathfinish g = pathstart g" "a \ {0..1}" - shows "pathfinish(shiftpath a g) = pathstart(shiftpath a g)" - using endpoints_shiftpath[OF assms] by auto - -lemma path_shiftpath: - assumes "path g" "pathfinish g = pathstart g" "a \ {0..1}" - shows "path(shiftpath a g)" proof- - have *:"{0 .. 1} = {0 .. 1-a} \ {1-a .. 1}" using assms(3) by(auto simp add: vector_component_simps) - have **:"\x. x + a = 1 \ g (x + a - 1) = g (x + a)" - using assms(2)[unfolded pathfinish_def pathstart_def] by auto - show ?thesis unfolding path_def shiftpath_def * apply(rule continuous_on_union) - apply(rule closed_interval)+ apply(rule continuous_on_eq[of _ "g \ (\x. a + x)"]) prefer 3 - apply(rule continuous_on_eq[of _ "g \ (\x. a - 1 + x)"]) defer prefer 3 - apply(rule continuous_on_intros)+ prefer 2 apply(rule continuous_on_intros)+ - apply(rule_tac[1-2] continuous_on_subset[OF assms(1)[unfolded path_def]]) - using assms(3) and ** by(auto simp add:vector_component_simps field_simps Cart_eq) qed - -lemma shiftpath_shiftpath: assumes "pathfinish g = pathstart g" "a \ {0..1}" "x \ {0..1}" - shows "shiftpath (1 - a) (shiftpath a g) x = g x" - using assms unfolding pathfinish_def pathstart_def shiftpath_def - by(auto simp add: vector_component_simps) - -lemma path_image_shiftpath: - assumes "a \ {0..1}" "pathfinish g = pathstart g" - shows "path_image(shiftpath a g) = path_image g" proof- - { fix x assume as:"g 1 = g 0" "x \ {0..1::real^1}" " \y\{0..1} \ {x. \ a $ 1 + x $ 1 \ 1}. g x \ g (a + y - 1)" - hence "\y\{0..1} \ {x. a $ 1 + x $ 1 \ 1}. g x = g (a + y)" proof(cases "a \ x") - case False thus ?thesis apply(rule_tac x="1 + x - a" in bexI) - using as(1,2) and as(3)[THEN bspec[where x="1 + x - a"]] and assms(1) - by(auto simp add:vector_component_simps field_simps atomize_not) next - case True thus ?thesis using as(1-2) and assms(1) apply(rule_tac x="x - a" in bexI) - by(auto simp add:vector_component_simps field_simps) qed } - thus ?thesis using assms unfolding shiftpath_def path_image_def pathfinish_def pathstart_def - by(auto simp add:vector_component_simps image_iff) qed - -subsection {* Special case of straight-line paths. *} - -definition - linepath :: "real ^ 'n \ real ^ 'n \ real ^ 1 \ real ^ 'n" where - "linepath a b = (\x. (1 - dest_vec1 x) *\<^sub>R a + dest_vec1 x *\<^sub>R b)" - -lemma pathstart_linepath[simp]: "pathstart(linepath a b) = a" - unfolding pathstart_def linepath_def by auto - -lemma pathfinish_linepath[simp]: "pathfinish(linepath a b) = b" - unfolding pathfinish_def linepath_def by auto - -lemma continuous_linepath_at[intro]: "continuous (at x) (linepath a b)" - unfolding linepath_def - by (intro continuous_intros continuous_dest_vec1) - -lemma continuous_on_linepath[intro]: "continuous_on s (linepath a b)" - using continuous_linepath_at by(auto intro!: continuous_at_imp_continuous_on) - -lemma path_linepath[intro]: "path(linepath a b)" - unfolding path_def by(rule continuous_on_linepath) - -lemma path_image_linepath[simp]: "path_image(linepath a b) = (closed_segment a b)" - unfolding path_image_def segment linepath_def apply (rule set_ext, rule) defer - unfolding mem_Collect_eq image_iff apply(erule exE) apply(rule_tac x="u *\<^sub>R 1" in bexI) - by(auto simp add:vector_component_simps) - -lemma reversepath_linepath[simp]: "reversepath(linepath a b) = linepath b a" - unfolding reversepath_def linepath_def by(rule ext, auto simp add:vector_component_simps) - -lemma injective_path_linepath: assumes "a \ b" shows "injective_path(linepath a b)" proof- - { obtain i where i:"a$i \ b$i" using assms[unfolded Cart_eq] by auto - fix x y::"real^1" assume "x $ 1 *\<^sub>R b + y $ 1 *\<^sub>R a = x $ 1 *\<^sub>R a + y $ 1 *\<^sub>R b" - hence "x$1 * (b$i - a$i) = y$1 * (b$i - a$i)" unfolding Cart_eq by(auto simp add:field_simps vector_component_simps) - hence "x = y" unfolding mult_cancel_right Cart_eq using i(1) by(auto simp add:field_simps) } - thus ?thesis unfolding injective_path_def linepath_def by(auto simp add:vector_component_simps algebra_simps) qed - -lemma simple_path_linepath[intro]: "a \ b \ simple_path(linepath a b)" by(auto intro!: injective_imp_simple_path injective_path_linepath) - -subsection {* Bounding a point away from a path. *} - -lemma not_on_path_ball: assumes "path g" "z \ path_image g" - shows "\e>0. ball z e \ (path_image g) = {}" proof- - obtain a where "a\path_image g" "\y\path_image g. dist z a \ dist z y" - using distance_attains_inf[OF _ path_image_nonempty, of g z] - using compact_path_image[THEN compact_imp_closed, OF assms(1)] by auto - thus ?thesis apply(rule_tac x="dist z a" in exI) using assms(2) by(auto intro!: dist_pos_lt) qed - -lemma not_on_path_cball: assumes "path g" "z \ path_image g" - shows "\e>0. cball z e \ (path_image g) = {}" proof- - obtain e where "ball z e \ path_image g = {}" "e>0" using not_on_path_ball[OF assms] by auto - moreover have "cball z (e/2) \ ball z e" using `e>0` by auto - ultimately show ?thesis apply(rule_tac x="e/2" in exI) by auto qed - -subsection {* Path component, considered as a "joinability" relation (from Tom Hales). *} - -definition "path_component s x y \ (\g. path g \ path_image g \ s \ pathstart g = x \ pathfinish g = y)" - -lemmas path_defs = path_def pathstart_def pathfinish_def path_image_def path_component_def - -lemma path_component_mem: assumes "path_component s x y" shows "x \ s" "y \ s" - using assms unfolding path_defs by auto - -lemma path_component_refl: assumes "x \ s" shows "path_component s x x" - unfolding path_defs apply(rule_tac x="\u. x" in exI) using assms - by(auto intro!:continuous_on_intros) - -lemma path_component_refl_eq: "path_component s x x \ x \ s" - by(auto intro!: path_component_mem path_component_refl) - -lemma path_component_sym: "path_component s x y \ path_component s y x" - using assms unfolding path_component_def apply(erule exE) apply(rule_tac x="reversepath g" in exI) - by(auto simp add: reversepath_simps) - -lemma path_component_trans: assumes "path_component s x y" "path_component s y z" shows "path_component s x z" - using assms unfolding path_component_def apply- apply(erule exE)+ apply(rule_tac x="g +++ ga" in exI) by(auto simp add: path_image_join) - -lemma path_component_of_subset: "s \ t \ path_component s x y \ path_component t x y" - unfolding path_component_def by auto - -subsection {* Can also consider it as a set, as the name suggests. *} - -lemma path_component_set: "path_component s x = { y. (\g. path g \ path_image g \ s \ pathstart g = x \ pathfinish g = y )}" - apply(rule set_ext) unfolding mem_Collect_eq unfolding mem_def path_component_def by auto - -lemma mem_path_component_set:"x \ path_component s y \ path_component s y x" unfolding mem_def by auto - -lemma path_component_subset: "(path_component s x) \ s" - apply(rule, rule path_component_mem(2)) by(auto simp add:mem_def) - -lemma path_component_eq_empty: "path_component s x = {} \ x \ s" - apply rule apply(drule equals0D[of _ x]) defer apply(rule equals0I) unfolding mem_path_component_set - apply(drule path_component_mem(1)) using path_component_refl by auto - -subsection {* Path connectedness of a space. *} - -definition "path_connected s \ (\x\s. \y\s. \g. path g \ (path_image g) \ s \ pathstart g = x \ pathfinish g = y)" - -lemma path_connected_component: "path_connected s \ (\x\s. \y\s. path_component s x y)" - unfolding path_connected_def path_component_def by auto - -lemma path_connected_component_set: "path_connected s \ (\x\s. path_component s x = s)" - unfolding path_connected_component apply(rule, rule, rule, rule path_component_subset) - unfolding subset_eq mem_path_component_set Ball_def mem_def by auto - -subsection {* Some useful lemmas about path-connectedness. *} - -lemma convex_imp_path_connected: assumes "convex s" shows "path_connected s" - unfolding path_connected_def apply(rule,rule,rule_tac x="linepath x y" in exI) - unfolding path_image_linepath using assms[unfolded convex_contains_segment] by auto - -lemma path_connected_imp_connected: assumes "path_connected s" shows "connected s" - unfolding connected_def not_ex apply(rule,rule,rule ccontr) unfolding not_not apply(erule conjE)+ proof- - fix e1 e2 assume as:"open e1" "open e2" "s \ e1 \ e2" "e1 \ e2 \ s = {}" "e1 \ s \ {}" "e2 \ s \ {}" - then obtain x1 x2 where obt:"x1\e1\s" "x2\e2\s" by auto - then obtain g where g:"path g" "path_image g \ s" "pathstart g = x1" "pathfinish g = x2" - using assms[unfolded path_connected_def,rule_format,of x1 x2] by auto - have *:"connected {0..1::real^1}" by(auto intro!: convex_connected convex_interval) - have "{0..1} \ {x \ {0..1}. g x \ e1} \ {x \ {0..1}. g x \ e2}" using as(3) g(2)[unfolded path_defs] by blast - moreover have "{x \ {0..1}. g x \ e1} \ {x \ {0..1}. g x \ e2} = {}" using as(4) g(2)[unfolded path_defs] unfolding subset_eq by auto - moreover have "{x \ {0..1}. g x \ e1} \ {} \ {x \ {0..1}. g x \ e2} \ {}" using g(3,4)[unfolded path_defs] using obt by(auto intro!: exI) - ultimately show False using *[unfolded connected_local not_ex,rule_format, of "{x\{0..1}. g x \ e1}" "{x\{0..1}. g x \ e2}"] - using continuous_open_in_preimage[OF g(1)[unfolded path_def] as(1)] - using continuous_open_in_preimage[OF g(1)[unfolded path_def] as(2)] by auto qed - -lemma open_path_component: assumes "open s" shows "open(path_component s x)" - unfolding open_contains_ball proof - fix y assume as:"y \ path_component s x" - hence "y\s" apply- apply(rule path_component_mem(2)) unfolding mem_def by auto - then obtain e where e:"e>0" "ball y e \ s" using assms[unfolded open_contains_ball] by auto - show "\e>0. ball y e \ path_component s x" apply(rule_tac x=e in exI) apply(rule,rule `e>0`,rule) unfolding mem_ball mem_path_component_set proof- - fix z assume "dist y z < e" thus "path_component s x z" apply(rule_tac path_component_trans[of _ _ y]) defer - apply(rule path_component_of_subset[OF e(2)]) apply(rule convex_imp_path_connected[OF convex_ball, unfolded path_connected_component, rule_format]) using `e>0` - using as[unfolded mem_def] by auto qed qed - -lemma open_non_path_component: assumes "open s" shows "open(s - path_component s x)" unfolding open_contains_ball proof - fix y assume as:"y\s - path_component s x" - then obtain e where e:"e>0" "ball y e \ s" using assms[unfolded open_contains_ball] by auto - show "\e>0. ball y e \ s - path_component s x" apply(rule_tac x=e in exI) apply(rule,rule `e>0`,rule,rule) defer proof(rule ccontr) - fix z assume "z\ball y e" "\ z \ path_component s x" - hence "y \ path_component s x" unfolding not_not mem_path_component_set using `e>0` - apply- apply(rule path_component_trans,assumption) apply(rule path_component_of_subset[OF e(2)]) - apply(rule convex_imp_path_connected[OF convex_ball, unfolded path_connected_component, rule_format]) by auto - thus False using as by auto qed(insert e(2), auto) qed - -lemma connected_open_path_connected: assumes "open s" "connected s" shows "path_connected s" - unfolding path_connected_component_set proof(rule,rule,rule path_component_subset, rule) - fix x y assume "x \ s" "y \ s" show "y \ path_component s x" proof(rule ccontr) - assume "y \ path_component s x" moreover - have "path_component s x \ s \ {}" using `x\s` path_component_eq_empty path_component_subset[of s x] by auto - ultimately show False using `y\s` open_non_path_component[OF assms(1)] open_path_component[OF assms(1)] - using assms(2)[unfolded connected_def not_ex, rule_format, of"path_component s x" "s - path_component s x"] by auto -qed qed - -lemma path_connected_continuous_image: - assumes "continuous_on s f" "path_connected s" shows "path_connected (f ` s)" - unfolding path_connected_def proof(rule,rule) - fix x' y' assume "x' \ f ` s" "y' \ f ` s" - then obtain x y where xy:"x\s" "y\s" "x' = f x" "y' = f y" by auto - guess g using assms(2)[unfolded path_connected_def,rule_format,OF xy(1,2)] .. - thus "\g. path g \ path_image g \ f ` s \ pathstart g = x' \ pathfinish g = y'" - unfolding xy apply(rule_tac x="f \ g" in exI) unfolding path_defs - using assms(1) by(auto intro!: continuous_on_compose continuous_on_subset[of _ _ "g ` {0..1}"]) qed - -lemma homeomorphic_path_connectedness: - "s homeomorphic t \ (path_connected s \ path_connected t)" - unfolding homeomorphic_def homeomorphism_def apply(erule exE|erule conjE)+ apply rule - apply(drule_tac f=f in path_connected_continuous_image) prefer 3 - apply(drule_tac f=g in path_connected_continuous_image) by auto - -lemma path_connected_empty: "path_connected {}" - unfolding path_connected_def by auto - -lemma path_connected_singleton: "path_connected {a}" - unfolding path_connected_def apply(rule,rule) - apply(rule_tac x="linepath a a" in exI) by(auto simp add:segment scaleR_left_diff_distrib) - -lemma path_connected_Un: assumes "path_connected s" "path_connected t" "s \ t \ {}" - shows "path_connected (s \ t)" unfolding path_connected_component proof(rule,rule) - fix x y assume as:"x \ s \ t" "y \ s \ t" - from assms(3) obtain z where "z \ s \ t" by auto - thus "path_component (s \ t) x y" using as using assms(1-2)[unfolded path_connected_component] apply- - apply(erule_tac[!] UnE)+ apply(rule_tac[2-3] path_component_trans[of _ _ z]) - by(auto simp add:path_component_of_subset [OF Un_upper1] path_component_of_subset[OF Un_upper2]) qed - -subsection {* sphere is path-connected. *} - -lemma path_connected_punctured_universe: - assumes "2 \ CARD('n::finite)" shows "path_connected((UNIV::(real^'n) set) - {a})" proof- - obtain \ where \:"bij_betw \ {1..CARD('n)} (UNIV::'n set)" using ex_bij_betw_nat_finite_1[OF finite_UNIV] by auto - let ?U = "UNIV::(real^'n) set" let ?u = "?U - {0}" - let ?basis = "\k. basis (\ k)" - let ?A = "\k. {x::real^'n. \i\{1..k}. inner (basis (\ i)) x \ 0}" - have "\k\{2..CARD('n)}. path_connected (?A k)" proof - have *:"\k. ?A (Suc k) = {x. inner (?basis (Suc k)) x < 0} \ {x. inner (?basis (Suc k)) x > 0} \ ?A k" apply(rule set_ext,rule) defer - apply(erule UnE)+ unfolding mem_Collect_eq apply(rule_tac[1-2] x="Suc k" in bexI) - by(auto elim!: ballE simp add: not_less le_Suc_eq) - fix k assume "k \ {2..CARD('n)}" thus "path_connected (?A k)" proof(induct k) - case (Suc k) show ?case proof(cases "k = 1") - case False from Suc have d:"k \ {1..CARD('n)}" "Suc k \ {1..CARD('n)}" by auto - hence "\ k \ \ (Suc k)" using \[unfolded bij_betw_def inj_on_def, THEN conjunct1, THEN bspec[where x=k]] by auto - hence **:"?basis k + ?basis (Suc k) \ {x. 0 < inner (?basis (Suc k)) x} \ (?A k)" - "?basis k - ?basis (Suc k) \ {x. 0 > inner (?basis (Suc k)) x} \ ({x. 0 < inner (?basis (Suc k)) x} \ (?A k))" using d - by(auto simp add: inner_basis vector_component_simps intro!:bexI[where x=k]) - show ?thesis unfolding * Un_assoc apply(rule path_connected_Un) defer apply(rule path_connected_Un) - prefer 5 apply(rule_tac[1-2] convex_imp_path_connected, rule convex_halfspace_lt, rule convex_halfspace_gt) - apply(rule Suc(1)) using d ** False by auto - next case True hence d:"1\{1..CARD('n)}" "2\{1..CARD('n)}" using Suc(2) by auto - have ***:"Suc 1 = 2" by auto - have **:"\s t P Q. s \ t \ {x. P x \ Q x} = (s \ {x. P x}) \ (t \ {x. Q x})" by auto - have nequals0I:"\x A. x\A \ A \ {}" by auto - have "\ 2 \ \ (Suc 0)" using \[unfolded bij_betw_def inj_on_def, THEN conjunct1, THEN bspec[where x=2]] using assms by auto - thus ?thesis unfolding * True unfolding ** neq_iff bex_disj_distrib apply - - apply(rule path_connected_Un, rule_tac[1-2] path_connected_Un) defer 3 apply(rule_tac[1-4] convex_imp_path_connected) - apply(rule_tac[5] x=" ?basis 1 + ?basis 2" in nequals0I) - apply(rule_tac[6] x="-?basis 1 + ?basis 2" in nequals0I) - apply(rule_tac[7] x="-?basis 1 - ?basis 2" in nequals0I) - using d unfolding *** by(auto intro!: convex_halfspace_gt convex_halfspace_lt, auto simp add:vector_component_simps inner_basis) - qed qed auto qed note lem = this - - have ***:"\x::real^'n. (\i\{1..CARD('n)}. inner (basis (\ i)) x \ 0) \ (\i. inner (basis i) x \ 0)" - apply rule apply(erule bexE) apply(rule_tac x="\ i" in exI) defer apply(erule exE) proof- - fix x::"real^'n" and i assume as:"inner (basis i) x \ 0" - have "i\\ ` {1..CARD('n)}" using \[unfolded bij_betw_def, THEN conjunct2] by auto - then obtain j where "j\{1..CARD('n)}" "\ j = i" by auto - thus "\i\{1..CARD('n)}. inner (basis (\ i)) x \ 0" apply(rule_tac x=j in bexI) using as by auto qed auto - have *:"?U - {a} = (\x. x + a) ` {x. x \ 0}" apply(rule set_ext) unfolding image_iff - apply rule apply(rule_tac x="x - a" in bexI) by auto - have **:"\x::real^'n. x\0 \ (\i. inner (basis i) x \ 0)" unfolding Cart_eq by(auto simp add: inner_basis) - show ?thesis unfolding * apply(rule path_connected_continuous_image) apply(rule continuous_on_intros)+ - unfolding ** apply(rule lem[THEN bspec[where x="CARD('n)"], unfolded ***]) using assms by auto qed - -lemma path_connected_sphere: assumes "2 \ CARD('n::finite)" shows "path_connected {x::real^'n. norm(x - a) = r}" proof(cases "r\0") - case True thus ?thesis proof(cases "r=0") - case False hence "{x::real^'n. norm(x - a) = r} = {}" using True by auto - thus ?thesis using path_connected_empty by auto - qed(auto intro!:path_connected_singleton) next - case False hence *:"{x::real^'n. norm(x - a) = r} = (\x. a + r *\<^sub>R x) ` {x. norm x = 1}" unfolding not_le apply -apply(rule set_ext,rule) - unfolding image_iff apply(rule_tac x="(1/r) *\<^sub>R (x - a)" in bexI) unfolding mem_Collect_eq norm_scaleR by (auto simp add: scaleR_right_diff_distrib) - have **:"{x::real^'n. norm x = 1} = (\x. (1/norm x) *\<^sub>R x) ` (UNIV - {0})" apply(rule set_ext,rule) - unfolding image_iff apply(rule_tac x=x in bexI) unfolding mem_Collect_eq by(auto split:split_if_asm) - have "continuous_on (UNIV - {0}) (\x::real^'n. 1 / norm x)" unfolding o_def continuous_on_eq_continuous_within - apply(rule, rule continuous_at_within_inv[unfolded o_def inverse_eq_divide]) apply(rule continuous_at_within) - apply(rule continuous_at_norm[unfolded o_def]) by auto - thus ?thesis unfolding * ** using path_connected_punctured_universe[OF assms] - by(auto intro!: path_connected_continuous_image continuous_on_intros continuous_on_mul) qed - -lemma connected_sphere: "2 \ CARD('n) \ connected {x::real^'n. norm(x - a) = r}" - using path_connected_sphere path_connected_imp_connected by auto - end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Derivative.thy --- a/src/HOL/Multivariate_Analysis/Derivative.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Derivative.thy Tue May 04 20:30:22 2010 +0200 @@ -1,20 +1,18 @@ -(* Title: HOL/Library/Convex_Euclidean_Space.thy - Author: John Harrison - Translation from HOL light: Robert Himmelmann, TU Muenchen *) +(* Title: HOL/Multivariate_Analysis/Derivative.thy + Author: John Harrison + Translation from HOL Light: Robert Himmelmann, TU Muenchen +*) header {* Multivariate calculus in Euclidean space. *} theory Derivative - imports Brouwer_Fixpoint RealVector +imports Brouwer_Fixpoint Vec1 RealVector Operator_Norm begin (* Because I do not want to type this all the time *) lemmas linear_linear = linear_conv_bounded_linear[THEN sym] -(** move this **) -declare norm_vec1[simp] - subsection {* Derivatives *} text {* The definition is slightly tricky since we make it work over @@ -33,14 +31,14 @@ assume ?l note as = this[unfolded deriv_def LIM_def,rule_format] show ?r unfolding has_derivative_def Lim_at apply- apply(rule,rule mult.bounded_linear_right) apply safe apply(drule as,safe) apply(rule_tac x=s in exI) apply safe - apply(erule_tac x="xa - x" in allE) unfolding vector_dist_norm netlimit_at[of x] unfolding diff_0_right norm_scaleR + apply(erule_tac x="xa - x" in allE) unfolding dist_norm netlimit_at[of x] unfolding diff_0_right norm_scaleR by(auto simp add:field_simps) next assume ?r note this[unfolded has_derivative_def Lim_at] note as=conjunct2[OF this,rule_format] have *:"\x xa f'. xa \ 0 \ \(f (xa + x) - f x) / xa - f'\ = \(f (xa + x) - f x) - xa * f'\ / \xa\" by(auto simp add:field_simps) show ?l unfolding deriv_def LIM_def apply safe apply(drule as,safe) apply(rule_tac x=d in exI,safe) apply(erule_tac x="xa + x" in allE) - unfolding vector_dist_norm diff_0_right norm_scaleR - unfolding vector_dist_norm netlimit_at[of x] by(auto simp add:group_simps *) qed + unfolding dist_norm diff_0_right norm_scaleR + unfolding dist_norm netlimit_at[of x] by(auto simp add:algebra_simps *) qed lemma FDERIV_conv_has_derivative:"FDERIV f (x::'a::{real_normed_vector,perfect_space}) :> f' = (f has_derivative f') (at x)" (is "?l = ?r") proof assume ?l note as = this[unfolded fderiv_def] @@ -50,14 +48,14 @@ thus "\d>0. \xa. 0 < dist xa x \ dist xa x < d \ dist ((1 / norm (xa - netlimit (at x))) *\<^sub>R (f xa - (f (netlimit (at x)) + f' (xa - netlimit (at x))))) (0) < e" apply(rule_tac x=d in exI) apply(erule conjE,rule,assumption) apply rule apply(erule_tac x="xa - x" in allE) - unfolding vector_dist_norm netlimit_at[of x] by(auto simp add:group_simps) qed next + unfolding dist_norm netlimit_at[of x] by (auto simp add: diff_diff_eq) qed next assume ?r note as = this[unfolded has_derivative_def] show ?l unfolding fderiv_def LIM_def apply-apply(rule,rule as[THEN conjunct1]) proof(rule,rule) fix e::real assume "e>0" guess d using as[THEN conjunct2,unfolded Lim_at,rule_format,OF`e>0`] .. thus "\s>0. \xa. xa \ 0 \ dist xa 0 < s \ dist (norm (f (x + xa) - f x - f' xa) / norm xa) 0 < e" apply- apply(rule_tac x=d in exI) apply(erule conjE,rule,assumption) apply rule apply(erule_tac x="xa + x" in allE) - unfolding vector_dist_norm netlimit_at[of x] by(auto simp add:group_simps) qed qed + unfolding dist_norm netlimit_at[of x] by (auto simp add: diff_diff_eq add.commute) qed qed subsection {* These are the only cases we'll care about, probably. *} @@ -75,8 +73,8 @@ "(f has_derivative f')(at x within s) \ bounded_linear f' \ (\e>0. \d>0. \x'\s. 0 < norm(x' - x) \ norm(x' - x) < d \ norm(f x' - f x - f'(x' - x)) / norm(x' - x) < e)" - unfolding has_derivative_within Lim_within vector_dist_norm - unfolding diff_0_right norm_mul by(simp add: group_simps) + unfolding has_derivative_within Lim_within dist_norm + unfolding diff_0_right norm_mul by (simp add: diff_diff_eq) lemma has_derivative_at': "(f has_derivative f') (at x) \ bounded_linear f' \ @@ -93,16 +91,6 @@ subsection {* Derivatives on real = Derivatives on @{typ "real^1"} *} -lemma dist_vec1_0[simp]: "dist(vec1 (x::real)) 0 = norm x" unfolding vector_dist_norm by(auto simp add:vec1_dest_vec1_simps) - -lemma bounded_linear_vec1_dest_vec1: fixes f::"real \ real" - shows "linear (vec1 \ f \ dest_vec1) = bounded_linear f" (is "?l = ?r") proof- - { assume ?l guess K using linear_bounded[OF `?l`] .. - hence "\K. \x. \f x\ \ \x\ * K" apply(rule_tac x=K in exI) - unfolding vec1_dest_vec1_simps by (auto simp add:field_simps) } - thus ?thesis unfolding linear_def bounded_linear_def additive_def bounded_linear_axioms_def o_def - unfolding vec1_dest_vec1_simps by auto qed - lemma has_derivative_within_vec1_dest_vec1: fixes f::"real\real" shows "((vec1 \ f \ dest_vec1) has_derivative (vec1 \ f' \ dest_vec1)) (at (vec1 x) within vec1 ` s) = (f has_derivative f') (at x within s)" @@ -155,14 +143,14 @@ lemma has_derivative_const: "((\x. c) has_derivative (\h. 0)) net" unfolding has_derivative_def apply(rule,rule bounded_linear_zero) by(simp add: Lim_const) -lemma (in bounded_linear) cmul: shows "bounded_linear (\x. (c::real) *\<^sub>R f x)" proof - guess K using pos_bounded .. - thus "\K. \x. norm ((c::real) *\<^sub>R f x) \ norm x * K" apply(rule_tac x="abs c * K" in exI) proof - fix x case goal1 - hence "abs c * norm (f x) \ abs c * (norm x * K)" apply-apply(erule conjE,erule_tac x=x in allE) - apply(rule mult_left_mono) by auto - thus ?case by(auto simp add:field_simps) - qed qed(auto simp add: scaleR.add_right add scaleR) +lemma (in bounded_linear) cmul: shows "bounded_linear (\x. (c::real) *\<^sub>R f x)" +proof - + have "bounded_linear (\x. c *\<^sub>R x)" + by (rule scaleR.bounded_linear_right) + moreover have "bounded_linear f" .. + ultimately show ?thesis + by (rule bounded_linear_compose) +qed lemma has_derivative_cmul: assumes "(f has_derivative f') net" shows "((\x. c *\<^sub>R f(x)) has_derivative (\h. c *\<^sub>R f'(h))) net" unfolding has_derivative_def apply(rule,rule bounded_linear.cmul) @@ -186,14 +174,14 @@ note as = assms[unfolded has_derivative_def] show ?thesis unfolding has_derivative_def apply(rule,rule bounded_linear_add) using Lim_add[OF as(1)[THEN conjunct2] as(2)[THEN conjunct2]] and as - by(auto simp add:group_simps scaleR_right_diff_distrib scaleR_right_distrib) qed + by (auto simp add:algebra_simps scaleR_right_diff_distrib scaleR_right_distrib) qed lemma has_derivative_add_const:"(f has_derivative f') net \ ((\x. f x + c) has_derivative f') net" apply(drule has_derivative_add) apply(rule has_derivative_const) by auto lemma has_derivative_sub: "(f has_derivative f') net \ (g has_derivative g') net \ ((\x. f(x) - g(x)) has_derivative (\h. f'(h) - g'(h))) net" - apply(drule has_derivative_add) apply(drule has_derivative_neg,assumption) by(simp add:group_simps) + apply(drule has_derivative_add) apply(drule has_derivative_neg,assumption) by(simp add:algebra_simps) lemma has_derivative_setsum: assumes "finite s" "\a\s. ((f a) has_derivative (f' a)) net" shows "((\x. setsum (\a. f a x) s) has_derivative (\h. setsum (\a. f' a h) s)) net" @@ -228,7 +216,7 @@ using assms[unfolded has_derivative_def Lim] by auto thus "eventually (\x. dist (1 / norm (x - netlimit net) * (c x $ k - (c (netlimit net) $ k + c' (x - netlimit net) $ k))) 0 < e) net" proof (rule eventually_elim1) - case goal1 thus ?case apply - unfolding vector_dist_norm apply(rule le_less_trans) prefer 2 apply assumption unfolding * ** and norm_vec1 + case goal1 thus ?case apply - unfolding dist_norm apply(rule le_less_trans) prefer 2 apply assumption unfolding * ** and norm_vec1 using component_le_norm[of "(1 / norm (x - netlimit net)) *\<^sub>R (c x - (c (netlimit net) + c' (x - netlimit net))) - 0" k] by auto qed qed(insert assms[unfolded has_derivative_def], auto simp add:linear_conv_bounded_linear) qed @@ -281,6 +269,8 @@ subsection {* differentiability. *} +no_notation Deriv.differentiable (infixl "differentiable" 60) + definition differentiable :: "('a::real_normed_vector \ 'b::real_normed_vector) \ 'a net \ bool" (infixr "differentiable" 30) where "f differentiable net \ (\f'. (f has_derivative f') net)" @@ -336,7 +326,7 @@ lemma Lim_mul_norm_within: fixes f::"'a::real_normed_vector \ 'b::real_normed_vector" shows "(f ---> 0) (at a within s) \ ((\x. norm(x - a) *\<^sub>R f(x)) ---> 0) (at a within s)" unfolding Lim_within apply(rule,rule) apply(erule_tac x=e in allE,erule impE,assumption,erule exE,erule conjE) - apply(rule_tac x="min d 1" in exI) apply rule defer apply(rule,erule_tac x=x in ballE) unfolding vector_dist_norm diff_0_right norm_mul + apply(rule_tac x="min d 1" in exI) apply rule defer apply(rule,erule_tac x=x in ballE) unfolding dist_norm diff_0_right norm_mul by(auto intro!: mult_strict_mono[of _ "1::real", unfolded mult_1_left]) lemma differentiable_imp_continuous_within: assumes "f differentiable (at x within s)" @@ -349,7 +339,7 @@ apply(rule continuous_within_compose) apply(rule continuous_intros)+ by(rule linear_continuous_within[OF f'[THEN conjunct1]]) show ?thesis unfolding continuous_within using f'[THEN conjunct2, THEN Lim_mul_norm_within] - apply-apply(drule Lim_add) apply(rule **[unfolded continuous_within]) unfolding Lim_within and vector_dist_norm + apply-apply(drule Lim_add) apply(rule **[unfolded continuous_within]) unfolding Lim_within and dist_norm apply(rule,rule) apply(erule_tac x=e in allE) apply(erule impE|assumption)+ apply(erule exE,rule_tac x=d in exI) by(auto simp add:zero * elim!:allE) qed @@ -389,18 +379,18 @@ show "norm (f y - f x - f' (y - x)) \ e * norm (y - x)" proof(cases "y=x") case True thus ?thesis using `bounded_linear f'` by(auto simp add: zero) next case False hence "norm (f y - (f x + f' (y - x))) < e * norm (y - x)" using as(4)[rule_format, OF `y\s`] - unfolding vector_dist_norm diff_0_right norm_mul using as(3) - using pos_divide_less_eq[OF False[unfolded dist_nz], unfolded vector_dist_norm] - by(auto simp add:linear_0 linear_sub group_simps) - thus ?thesis by(auto simp add:group_simps) qed qed next + unfolding dist_norm diff_0_right norm_mul using as(3) + using pos_divide_less_eq[OF False[unfolded dist_nz], unfolded dist_norm] + by (auto simp add: linear_0 linear_sub) + thus ?thesis by(auto simp add:algebra_simps) qed qed next assume ?rhs thus ?lhs unfolding has_derivative_within Lim_within apply-apply(erule conjE,rule,assumption) apply(rule,erule_tac x="e/2" in allE,rule,erule impE) defer apply(erule exE,rule_tac x=d in exI) - apply(erule conjE,rule,assumption,rule,rule) unfolding vector_dist_norm diff_0_right norm_scaleR + apply(erule conjE,rule,assumption,rule,rule) unfolding dist_norm diff_0_right norm_scaleR apply(erule_tac x=xa in ballE,erule impE) proof- fix e d y assume "bounded_linear f'" "0 < e" "0 < d" "y \ s" "0 < norm (y - x) \ norm (y - x) < d" "norm (f y - f x - f' (y - x)) \ e / 2 * norm (y - x)" thus "\1 / norm (y - x)\ * norm (f y - (f x + f' (y - x))) < e" - apply(rule_tac le_less_trans[of _ "e/2"]) by(auto intro!:mult_imp_div_pos_le simp add:group_simps) qed auto qed + apply(rule_tac le_less_trans[of _ "e/2"]) by(auto intro!:mult_imp_div_pos_le simp add:algebra_simps) qed auto qed lemma has_derivative_at_alt: "(f has_derivative f') (at x) \ bounded_linear f' \ @@ -435,8 +425,8 @@ hence 1:"norm (f y - f x - f' (y - x)) \ min (norm (y - x)) (e / 2 / B2 * norm (y - x))" using d1 d2 d by auto have "norm (f y - f x) \ norm (f y - f x - f' (y - x)) + norm (f' (y - x))" - using norm_triangle_sub[of "f y - f x" "f' (y - x)"] by(auto simp add:group_simps) - also have "\ \ norm (f y - f x - f' (y - x)) + B1 * norm (y - x)" apply(rule add_left_mono) using B1 by(auto simp add:group_simps) + using norm_triangle_sub[of "f y - f x" "f' (y - x)"] by(auto simp add:algebra_simps) + also have "\ \ norm (f y - f x - f' (y - x)) + B1 * norm (y - x)" apply(rule add_left_mono) using B1 by(auto simp add:algebra_simps) also have "\ \ min (norm (y - x)) (e / 2 / B2 * norm (y - x)) + B1 * norm (y - x)" apply(rule add_right_mono) using d1 d2 d as by auto also have "\ \ norm (y - x) + B1 * norm (y - x)" by auto also have "\ = norm (y - x) * (1 + B1)" by(auto simp add:field_simps) @@ -453,8 +443,8 @@ interpret g': bounded_linear g' using assms(2) by auto interpret f': bounded_linear f' using assms(1) by auto have "norm (- g' (f' (y - x)) + g' (f y - f x)) = norm (g' (f y - f x - f' (y - x)))" - by(auto simp add:group_simps f'.diff g'.diff g'.add) - also have "\ \ B2 * norm (f y - f x - f' (y - x))" using B2 by(auto simp add:group_simps) + by(auto simp add:algebra_simps f'.diff g'.diff g'.add) + also have "\ \ B2 * norm (f y - f x - f' (y - x))" using B2 by(auto simp add:algebra_simps) also have "\ \ B2 * (e / 2 / B2 * norm (y - x))" apply(rule mult_left_mono) using as d1 d2 d B2 by auto also have "\ \ e / 2 * norm (y - x)" using B2 by auto finally have 5:"norm (- g' (f' (y - x)) + g' (f y - f x)) \ e / 2 * norm (y - x)" by auto @@ -523,7 +513,7 @@ guess a using UNIV_witness[where 'a='a] .. fix e::real assume "00`,of a] .. thus "\x'\s. x' \ x \ dist x' x < e" apply(rule_tac x="x + d*\<^sub>R basis a" in bexI) - using basis_nonzero[of a] norm_basis[of a] unfolding vector_dist_norm by auto qed + using basis_nonzero[of a] norm_basis[of a] unfolding dist_norm by auto qed hence *:"netlimit (at x within s) = x" apply-apply(rule netlimit_within) unfolding trivial_limit_within by simp show ?thesis apply(rule linear_eq_stdbasis) unfolding linear_conv_bounded_linear apply(rule as(1,2)[THEN conjunct1])+ proof(rule,rule ccontr) @@ -535,8 +525,8 @@ unfolding scaleR_right_distrib by auto also have "\ = norm ((1 / abs c) *\<^sub>R (c *\<^sub>R (- (f' (basis i)) + f'' (basis i))))" unfolding f'.scaleR f''.scaleR unfolding scaleR_right_distrib scaleR_minus_right by auto - also have "\ = e" unfolding e_def norm_mul using c[THEN conjunct1] using norm_minus_cancel[of "f' (basis i) - f'' (basis i)"] by(auto simp add:group_simps) - finally show False using c using d[THEN conjunct2,rule_format,of "x + c *\<^sub>R basis i"] using norm_basis[of i] unfolding vector_dist_norm + also have "\ = e" unfolding e_def norm_mul using c[THEN conjunct1] using norm_minus_cancel[of "f' (basis i) - f'' (basis i)"] by (auto simp add: add.commute ab_diff_minus) + finally show False using c using d[THEN conjunct2,rule_format,of "x + c *\<^sub>R basis i"] using norm_basis[of i] unfolding dist_norm unfolding f'.scaleR f''.scaleR f'.add f''.add f'.diff f''.diff scaleR_scaleR scaleR_right_diff_distrib scaleR_right_distrib by auto qed qed lemma frechet_derivative_unique_at: fixes f::"real^'a \ real^'b" @@ -617,13 +607,13 @@ unfolding vector_component_simps matrix_vector_mul_component unfolding smult_conv_scaleR[symmetric] unfolding inner_simps dot_basis smult_conv_scaleR by simp } note * = this have "x + d *\<^sub>R basis j \ ball x e" "x - d *\<^sub>R basis j \ ball x e" - unfolding mem_ball vector_dist_norm using norm_basis[of j] d by auto + unfolding mem_ball dist_norm using norm_basis[of j] d by auto hence **:"((f (x - d *\<^sub>R basis j))$k \ (f x)$k \ (f (x + d *\<^sub>R basis j))$k \ (f x)$k) \ ((f (x - d *\<^sub>R basis j))$k \ (f x)$k \ (f (x + d *\<^sub>R basis j))$k \ (f x)$k)" using assms(2) by auto have ***:"\y y1 y2 d dx::real. (y1\y\y2\y) \ (y\y1\y\y2) \ d < abs dx \ abs(y1 - y - - dx) \ d \ (abs (y2 - y - dx) \ d) \ False" by arith show False apply(rule ***[OF **, where dx="d * D $ k $ j" and d="\D $ k $ j\ / 2 * \d\"]) using *[of "-d"] and *[of d] and d[THEN conjunct1] and j unfolding mult_minus_left - unfolding abs_mult diff_minus_eq_add scaleR.minus_left unfolding group_simps by (auto intro: mult_pos_pos) + unfolding abs_mult diff_minus_eq_add scaleR.minus_left unfolding algebra_simps by (auto intro: mult_pos_pos) qed subsection {* In particular if we have a mapping into @{typ "real^1"}. *} @@ -644,11 +634,6 @@ subsection {* The traditional Rolle theorem in one dimension. *} -lemma vec1_le[simp]:fixes a::real shows "vec1 a \ vec1 b \ a \ b" - unfolding vector_le_def by auto -lemma vec1_less[simp]:fixes a::real shows "vec1 a < vec1 b \ a < b" - unfolding vector_less_def by auto - lemma rolle: fixes f::"real\real" assumes "a < b" "f a = f b" "continuous_on {a..b} f" "\x\{a<.. B * norm(x - y)" proof- let ?p = "\u. x + u *\<^sub>R (y - x)" have *:"\u. u\{0..1} \ x + u *\<^sub>R (y - x) \ s" - using assms(1)[unfolded convex_alt,rule_format,OF x y] unfolding scaleR_left_diff_distrib scaleR_right_diff_distrib by(auto simp add:group_simps) + using assms(1)[unfolded convex_alt,rule_format,OF x y] unfolding scaleR_left_diff_distrib scaleR_right_diff_distrib by(auto simp add:algebra_simps) hence 1:"continuous_on {0..1} (f \ ?p)" apply- apply(rule continuous_on_intros continuous_on_vmul)+ unfolding continuous_on_eq_continuous_within apply(rule,rule differentiable_imp_continuous_within) unfolding differentiable_def apply(rule_tac x="f' xa" in exI) @@ -754,11 +739,14 @@ lemma onorm_vec1: fixes f::"real \ real" shows "onorm (\x. vec1 (f (dest_vec1 x))) = onorm f" proof- have "\x::real^1. norm x = 1 \ x\{vec1 -1, vec1 (1::real)}" unfolding forall_vec1 by(auto simp add:Cart_eq) - hence 1:"{x. norm x = 1} = {vec1 -1, vec1 (1::real)}" by(auto simp add:norm_vec1) + hence 1:"{x. norm x = 1} = {vec1 -1, vec1 (1::real)}" by auto have 2:"{norm (vec1 (f (dest_vec1 x))) |x. norm x = 1} = (\x. norm (vec1 (f (dest_vec1 x)))) ` {x. norm x=1}" by auto have "\x::real. norm x = 1 \ x\{-1, 1}" by auto hence 3:"{x. norm x = 1} = {-1, (1::real)}" by auto have 4:"{norm (f x) |x. norm x = 1} = (\x. norm (f x)) ` {x. norm x=1}" by auto - show ?thesis unfolding onorm_def 1 2 3 4 by(simp add:Sup_finite_Max norm_vec1) qed + show ?thesis unfolding onorm_def 1 2 3 4 by(simp add:Sup_finite_Max) qed + +lemma convex_vec1:"convex (vec1 ` s) = convex (s::real set)" + unfolding convex_def Ball_def forall_vec1 unfolding vec1_dest_vec1_simps image_iff by auto lemma differentiable_bound_real: fixes f::"real \ real" assumes "convex s" "\x\s. (f has_derivative f' x) (at x within s)" "\x\s. onorm(f' x) \ B" and x:"x\s" and y:"y\s" @@ -799,14 +787,14 @@ guess d2 using assms(5)[unfolded open_dist,rule_format,OF assms(6)] .. note d2=this guess d using real_lbound_gt_zero[OF d1[THEN conjunct1] d2[THEN conjunct1]] .. note d=this thus ?case apply(rule_tac x=d in exI) apply rule defer proof(rule,rule) - fix z assume as:"norm (z - y) < d" hence "z\t" using d2 d unfolding vector_dist_norm by auto + fix z assume as:"norm (z - y) < d" hence "z\t" using d2 d unfolding dist_norm by auto have "norm (g z - g y - g' (z - y)) \ norm (g' (f (g z) - y - f' (g z - g y)))" unfolding g'.diff f'.diff unfolding assms(3)[unfolded o_def id_def, THEN fun_cong] unfolding assms(7)[rule_format,OF `z\t`] apply(subst norm_minus_cancel[THEN sym]) by auto also have "\ \ norm(f (g z) - y - f' (g z - g y)) * C" by(rule C[THEN conjunct2,rule_format]) also have "\ \ (e / C) * norm (g z - g y) * C" apply(rule mult_right_mono) apply(rule d0[THEN conjunct2,rule_format,unfolded assms(7)[rule_format,OF `y\t`]]) apply(cases "z=y") defer - apply(rule d1[THEN conjunct2, unfolded vector_dist_norm,rule_format]) using as d C d0 by auto + apply(rule d1[THEN conjunct2, unfolded dist_norm,rule_format]) using as d C d0 by auto also have "\ \ e * norm (g z - g y)" using C by(auto simp add:field_simps) finally show "norm (g z - g y - g' (z - y)) \ e * norm (g z - g y)" by simp qed auto qed have *:"(0::real) < 1 / 2" by auto guess d using lem1[rule_format,OF *] .. note d=this def B\"C*2" @@ -862,7 +850,7 @@ assumes "compact t" "convex t" "t \ {}" "continuous_on t f" "\x\s. \y\t. x + (y - f y) \ t" "x\s" shows "\y\t. f y = x" proof- - have *:"\x y. f y = x \ x + (y - f y) = y" by(auto simp add:group_simps) + have *:"\x y. f y = x \ x + (y - f y) = y" by(auto simp add:algebra_simps) show ?thesis unfolding * apply(rule brouwer[OF assms(1-3), of "\y. x + (y - f y)"]) apply(rule continuous_on_intros assms)+ using assms(4-6) by auto qed @@ -894,36 +882,36 @@ apply(rule continuous_on_intros linear_continuous_on[OF assms(5)])+ apply(rule continuous_on_subset[OF assms(2)]) apply(rule,unfold image_iff,erule bexE) proof- fix y z assume as:"y \cball (f x) e" "z = x + (g' y - g' (f x))" - have "dist x z = norm (g' (f x) - g' y)" unfolding as(2) and vector_dist_norm by auto + have "dist x z = norm (g' (f x) - g' y)" unfolding as(2) and dist_norm by auto also have "\ \ norm (f x - y) * B" unfolding g'.diff[THEN sym] using B by auto - also have "\ \ e * B" using as(1)[unfolded mem_cball vector_dist_norm] using B by auto + also have "\ \ e * B" using as(1)[unfolded mem_cball dist_norm] using B by auto also have "\ \ e1" using e unfolding less_divide_eq using B by auto finally have "z\cball x e1" unfolding mem_cball by force thus "z \ s" using e1 assms(7) by auto qed next fix y z assume as:"y \ cball (f x) (e / 2)" "z \ cball (f x) e" have "norm (g' (z - f x)) \ norm (z - f x) * B" using B by auto - also have "\ \ e * B" apply(rule mult_right_mono) using as(2)[unfolded mem_cball vector_dist_norm] and B unfolding norm_minus_commute by auto + also have "\ \ e * B" apply(rule mult_right_mono) using as(2)[unfolded mem_cball dist_norm] and B unfolding norm_minus_commute by auto also have "\ < e0" using e and B unfolding less_divide_eq by auto finally have *:"norm (x + g' (z - f x) - x) < e0" by auto have **:"f x + f' (x + g' (z - f x) - x) = z" using assms(6)[unfolded o_def id_def,THEN cong] by auto have "norm (f x - (y + (z - f (x + g' (z - f x))))) \ norm (f (x + g' (z - f x)) - z) + norm (f x - y)" - using norm_triangle_ineq[of "f (x + g'(z - f x)) - z" "f x - y"] by(auto simp add:group_simps) - also have "\ \ 1 / (B * 2) * norm (g' (z - f x)) + norm (f x - y)" using e0[THEN conjunct2,rule_format,OF *] unfolding group_simps ** by auto - also have "\ \ 1 / (B * 2) * norm (g' (z - f x)) + e/2" using as(1)[unfolded mem_cball vector_dist_norm] by auto + using norm_triangle_ineq[of "f (x + g'(z - f x)) - z" "f x - y"] by(auto simp add:algebra_simps) + also have "\ \ 1 / (B * 2) * norm (g' (z - f x)) + norm (f x - y)" using e0[THEN conjunct2,rule_format,OF *] unfolding algebra_simps ** by auto + also have "\ \ 1 / (B * 2) * norm (g' (z - f x)) + e/2" using as(1)[unfolded mem_cball dist_norm] by auto also have "\ \ 1 / (B * 2) * B * norm (z - f x) + e/2" using * and B by(auto simp add:field_simps) also have "\ \ 1 / 2 * norm (z - f x) + e/2" by auto - also have "\ \ e/2 + e/2" apply(rule add_right_mono) using as(2)[unfolded mem_cball vector_dist_norm] unfolding norm_minus_commute by auto - finally show "y + (z - f (x + g' (z - f x))) \ cball (f x) e" unfolding mem_cball vector_dist_norm by auto + also have "\ \ e/2 + e/2" apply(rule add_right_mono) using as(2)[unfolded mem_cball dist_norm] unfolding norm_minus_commute by auto + finally show "y + (z - f (x + g' (z - f x))) \ cball (f x) e" unfolding mem_cball dist_norm by auto qed(insert e, auto) note lem = this show ?thesis unfolding mem_interior apply(rule_tac x="e/2" in exI) apply(rule,rule divide_pos_pos) prefer 3 proof fix y assume "y \ ball (f x) (e/2)" hence *:"y\cball (f x) (e/2)" by auto guess z using lem[rule_format,OF *] .. note z=this hence "norm (g' (z - f x)) \ norm (z - f x) * B" using B by(auto simp add:field_simps) - also have "\ \ e * B" apply(rule mult_right_mono) using z(1) unfolding mem_cball vector_dist_norm norm_minus_commute using B by auto + also have "\ \ e * B" apply(rule mult_right_mono) using z(1) unfolding mem_cball dist_norm norm_minus_commute using B by auto also have "\ \ e1" using e B unfolding less_divide_eq by auto finally have "x + g'(z - f x) \ t" apply- apply(rule e1[THEN conjunct2,unfolded subset_eq,rule_format]) - unfolding mem_cball vector_dist_norm by auto + unfolding mem_cball dist_norm by auto thus "y \ f ` t" using z by auto qed(insert e, auto) qed text {* Hence the following eccentric variant of the inverse function theorem. *) @@ -983,7 +971,7 @@ (* we know for some other reason that the inverse function exists, it's OK. *} lemma bounded_linear_sub: "bounded_linear f \ bounded_linear g ==> bounded_linear (\x. f x - g x)" - using bounded_linear_add[of f "\x. - g x"] bounded_linear_minus[of g] by(auto simp add:group_simps) + using bounded_linear_add[of f "\x. - g x"] bounded_linear_minus[of g] by(auto simp add:algebra_simps) lemma has_derivative_locally_injective: fixes f::"real^'n \ real^'m" assumes "a \ s" "open s" "bounded_linear g'" "g' o f'(a) = id" @@ -1004,7 +992,7 @@ show "\x\ball a d. \x'\ball a d. f x' = f x \ x' = x" proof(intro strip) fix x y assume as:"x\ball a d" "y\ball a d" "f x = f y" def ph \ "\w. w - g'(f w - f x)" have ph':"ph = g' \ (\w. f' a w - (f w - f x))" - unfolding ph_def o_def unfolding diff using f'g' by(auto simp add:group_simps) + unfolding ph_def o_def unfolding diff using f'g' by(auto simp add:algebra_simps) have "norm (ph x - ph y) \ (1/2) * norm (x - y)" apply(rule differentiable_bound[OF convex_ball _ _ as(1-2), where f'="\x v. v - g'(f' x v)"]) apply(rule_tac[!] ballI) proof- fix u assume u:"u \ ball a d" hence "u\s" using d d2 by auto @@ -1013,14 +1001,14 @@ unfolding ph' * apply(rule diff_chain_within) defer apply(rule bounded_linear.has_derivative[OF assms(3)]) apply(rule has_derivative_intros) defer apply(rule has_derivative_sub[where g'="\x.0",unfolded diff_0_right]) apply(rule has_derivative_at_within) using assms(5) and `u\s` `a\s` - by(auto intro!: has_derivative_intros derivative_linear) + by(auto intro!: has_derivative_intros derivative_linear) have **:"bounded_linear (\x. f' u x - f' a x)" "bounded_linear (\x. f' a x - f' u x)" apply(rule_tac[!] bounded_linear_sub) apply(rule_tac[!] derivative_linear) using assms(5) `u\s` `a\s` by auto have "onorm (\v. v - g' (f' u v)) \ onorm g' * onorm (\w. f' a w - f' u w)" unfolding * apply(rule onorm_compose) unfolding linear_conv_bounded_linear by(rule assms(3) **)+ also have "\ \ onorm g' * k" apply(rule mult_left_mono) using d1[THEN conjunct2,rule_format,of u] using onorm_neg[OF **(1)[unfolded linear_linear]] - using d and u and onorm_pos_le[OF assms(3)[unfolded linear_linear]] by(auto simp add:group_simps) + using d and u and onorm_pos_le[OF assms(3)[unfolded linear_linear]] by(auto simp add:algebra_simps) also have "\ \ 1/2" unfolding k_def by auto finally show "onorm (\v. v - g' (f' u v)) \ 1 / 2" by assumption qed moreover have "norm (ph y - ph x) = norm (y - x)" apply(rule arg_cong[where f=norm]) @@ -1039,7 +1027,7 @@ fix x assume "x\s" show "((\a. f m a - f n a) has_derivative (\h. f' m x h - f' n x h)) (at x within s)" by(rule has_derivative_intros assms(2)[rule_format] `x\s`)+ { fix h have "norm (f' m x h - f' n x h) \ norm (f' m x h - g' x h) + norm (f' n x h - g' x h)" - using norm_triangle_ineq[of "f' m x h - g' x h" "- f' n x h + g' x h"] unfolding norm_minus_commute by(auto simp add:group_simps) + using norm_triangle_ineq[of "f' m x h - g' x h" "- f' n x h + g' x h"] unfolding norm_minus_commute by(auto simp add:algebra_simps) also have "\ \ e * norm h+ e * norm h" using assms(3)[rule_format,OF `N\m` `x\s`, of h] assms(3)[rule_format,OF `N\n` `x\s`, of h] by(auto simp add:field_simps) finally have "norm (f' m x h - f' n x h) \ 2 * e * norm h" by auto } @@ -1071,9 +1059,9 @@ show " \M. \m\M. \n\M. dist (f m x) (f n x) < e" apply(rule_tac x="max M N" in exI) proof(default+) fix m n assume as:"max M N \m" "max M N\n" have "dist (f m x) (f n x) \ norm (f m x0 - f n x0) + norm (f m x - f n x - (f m x0 - f n x0))" - unfolding vector_dist_norm by(rule norm_triangle_sub) + unfolding dist_norm by(rule norm_triangle_sub) also have "\ \ norm (f m x0 - f n x0) + e / 2" using N[rule_format,OF _ _ `x\s` `x0\s`, of m n] and as and False by auto - also have "\ < e / 2 + e / 2" apply(rule add_strict_right_mono) using as and M[rule_format] unfolding vector_dist_norm by auto + also have "\ < e / 2 + e / 2" apply(rule add_strict_right_mono) using as and M[rule_format] unfolding dist_norm by auto finally show "dist (f m x) (f n x) < e" by auto qed qed qed qed then guess g .. note g = this have lem2:"\e>0. \N. \n\N. \x\s. \y\s. norm((f n x - f n y) - (g x - g y)) \ e * norm(x - y)" proof(rule,rule) @@ -1083,7 +1071,7 @@ have "eventually (\xa. norm (f n x - f n y - (f xa x - f xa y)) \ e * norm (x - y)) sequentially" unfolding eventually_sequentially apply(rule_tac x=N in exI) proof(rule,rule) fix m assume "N\m" thus "norm (f n x - f n y - (f m x - f m y)) \ e * norm (x - y)" - using N[rule_format, of n m x y] and as by(auto simp add:group_simps) qed + using N[rule_format, of n m x y] and as by(auto simp add:algebra_simps) qed thus "norm (f n x - f n y - (g x - g y)) \ e * norm (x - y)" apply- apply(rule Lim_norm_ubound[OF trivial_limit_sequentially, where f="\m. (f n x - f n y) - (f m x - f m y)"]) apply(rule Lim_sub Lim_const g[rule_format] as)+ by assumption qed qed @@ -1097,12 +1085,12 @@ case False hence *:"e / 2 / norm u > 0" using `e>0` by(auto intro!: divide_pos_pos) guess N using assms(3)[rule_format,OF *] .. note N=this show ?thesis apply(rule_tac x=N in exI) proof(rule,rule) case goal1 - show ?case unfolding vector_dist_norm using N[rule_format,OF goal1 `x\s`, of u] False `e>0` + show ?case unfolding dist_norm using N[rule_format,OF goal1 `x\s`, of u] False `e>0` by (auto simp add:field_simps) qed qed qed show "bounded_linear (g' x)" unfolding linear_linear linear_def apply(rule,rule,rule) defer proof(rule,rule) fix x' y z::"real^'m" and c::real note lin = assms(2)[rule_format,OF `x\s`,THEN derivative_linear] - show "g' x (c *s x') = c *s g' x x'" apply(rule Lim_unique[OF trivial_limit_sequentially]) + show "g' x (c *\<^sub>R x') = c *\<^sub>R g' x x'" apply(rule Lim_unique[OF trivial_limit_sequentially]) apply(rule lem3[rule_format]) unfolding smult_conv_scaleR unfolding lin[unfolded bounded_linear_def bounded_linear_axioms_def,THEN conjunct2,THEN conjunct1,rule_format] apply(rule Lim_cmul) by(rule lem3[rule_format]) @@ -1120,10 +1108,10 @@ have "norm (f ?N y - f ?N x - f' ?N x (y - x)) \ e / 3 * norm (y - x)" using d1 and as by auto ultimately have "norm (g y - g x - f' ?N x (y - x)) \ 2 * e / 3 * norm (y - x)" using norm_triangle_le[of "g y - g x - (f ?N y - f ?N x)" "f ?N y - f ?N x - f' ?N x (y - x)" "2 * e / 3 * norm (y - x)"] - by (auto simp add:group_simps) moreover + by (auto simp add:algebra_simps) moreover have " norm (f' ?N x (y - x) - g' x (y - x)) \ e / 3 * norm (y - x)" using N1 `x\s` by auto ultimately show "norm (g y - g x - g' x (y - x)) \ e * norm (y - x)" - using norm_triangle_le[of "g y - g x - f' (max N1 N2) x (y - x)" "f' (max N1 N2) x (y - x) - g' x (y - x)"] by(auto simp add:group_simps) + using norm_triangle_le[of "g y - g x - f' (max N1 N2) x (y - x)" "f' (max N1 N2) x (y - x) - g' x (y - x)"] by(auto simp add:algebra_simps) qed qed qed qed subsection {* Can choose to line up antiderivatives if we want. *} @@ -1196,9 +1184,9 @@ apply(rule mult_mono) using B C D by (auto simp add: field_simps intro!:mult_nonneg_nonneg) also have "\ = (B * C * D * norm (y - x)) * norm (y - x)" by(auto simp add:field_simps) also have "\ < e * norm (y - x)" apply(rule mult_strict_right_mono) - using as(3)[unfolded vector_dist_norm] and as(2) unfolding pos_less_divide_eq[OF bcd] by (auto simp add:field_simps) + using as(3)[unfolded dist_norm] and as(2) unfolding pos_less_divide_eq[OF bcd] by (auto simp add:field_simps) finally show "dist ((1 / norm (y - x)) *\<^sub>R h (f' (y - x)) (g' (y - x))) 0 < e" - unfolding vector_dist_norm apply-apply(cases "y = x") by(auto simp add:field_simps) qed qed + unfolding dist_norm apply-apply(cases "y = x") by(auto simp add:field_simps) qed qed have "bounded_linear (\d. h (f x) (g' d) + h (f' d) (g x))" unfolding linear_linear linear_def unfolding smult_conv_scaleR unfolding g'.add f'.scaleR f'.add g'.scaleR unfolding h.add_right h.add_left scaleR_right_distrib h.scaleR_left h.scaleR_right by auto @@ -1274,7 +1262,7 @@ unfolding has_vector_derivative_def using has_derivative_id by auto lemma has_vector_derivative_cmul: "(f has_vector_derivative f') net \ ((\x. c *\<^sub>R f x) has_vector_derivative (c *\<^sub>R f')) net" - unfolding has_vector_derivative_def apply(drule has_derivative_cmul) by(auto simp add:group_simps) + unfolding has_vector_derivative_def apply(drule has_derivative_cmul) by(auto simp add:algebra_simps) lemma has_vector_derivative_cmul_eq: assumes "c \ 0" shows "(((\x. c *\<^sub>R f x) has_vector_derivative (c *\<^sub>R f')) net \ (f has_vector_derivative f') net)" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Determinants.thy --- a/src/HOL/Multivariate_Analysis/Determinants.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Determinants.thy Tue May 04 20:30:22 2010 +0200 @@ -5,7 +5,7 @@ header {* Traces, Determinant of square matrices and some properties *} theory Determinants -imports Euclidean_Space Permutations +imports Euclidean_Space Permutations Vec1 begin subsection{* First some facts about products*} @@ -55,7 +55,7 @@ done (* FIXME: In Finite_Set there is a useless further assumption *) -lemma setprod_inversef: "finite A ==> setprod (inverse \ f) A = (inverse (setprod f A) :: 'a:: {division_by_zero, field})" +lemma setprod_inversef: "finite A ==> setprod (inverse \ f) A = (inverse (setprod f A) :: 'a:: field_inverse_zero)" apply (erule finite_induct) apply (simp) apply simp @@ -352,13 +352,13 @@ apply (rule setprod_insert) apply simp by blast - also have "\ = (a k $ p k * setprod (\i. ?f i $ p i) ?Uk) + (b k$ p k * setprod (\i. ?f i $ p i) ?Uk)" by (simp add: ring_simps) + also have "\ = (a k $ p k * setprod (\i. ?f i $ p i) ?Uk) + (b k$ p k * setprod (\i. ?f i $ p i) ?Uk)" by (simp add: field_simps) also have "\ = (a k $ p k * setprod (\i. ?g i $ p i) ?Uk) + (b k$ p k * setprod (\i. ?h i $ p i) ?Uk)" by (metis th1 th2) also have "\ = setprod (\i. ?g i $ p i) (insert k ?Uk) + setprod (\i. ?h i $ p i) (insert k ?Uk)" unfolding setprod_insert[OF th3] by simp finally have "setprod (\i. ?f i $ p i) ?U = setprod (\i. ?g i $ p i) ?U + setprod (\i. ?h i $ p i) ?U" unfolding kU[symmetric] . then show "of_int (sign p) * setprod (\i. ?f i $ p i) ?U = of_int (sign p) * setprod (\i. ?g i $ p i) ?U + of_int (sign p) * setprod (\i. ?h i $ p i) ?U" - by (simp add: ring_simps) + by (simp add: field_simps) qed lemma det_row_mul: @@ -389,14 +389,14 @@ apply (rule setprod_insert) apply simp by blast - also have "\ = (c*s a k) $ p k * setprod (\i. ?f i $ p i) ?Uk" by (simp add: ring_simps) + also have "\ = (c*s a k) $ p k * setprod (\i. ?f i $ p i) ?Uk" by (simp add: field_simps) also have "\ = c* (a k $ p k * setprod (\i. ?g i $ p i) ?Uk)" unfolding th1 by (simp add: mult_ac) also have "\ = c* (setprod (\i. ?g i $ p i) (insert k ?Uk))" unfolding setprod_insert[OF th3] by simp finally have "setprod (\i. ?f i $ p i) ?U = c* (setprod (\i. ?g i $ p i) ?U)" unfolding kU[symmetric] . then show "of_int (sign p) * setprod (\i. ?f i $ p i) ?U = c * (of_int (sign p) * setprod (\i. ?g i $ p i) ?U)" - by (simp add: ring_simps) + by (simp add: field_simps) qed lemma det_row_0: @@ -421,7 +421,7 @@ qed lemma det_row_span: - fixes A :: "'a:: linordered_idom^'n^'n" + fixes A :: "real^'n^'n" assumes x: "x \ span {row j A |j. j \ i}" shows "det (\ k. if k = i then row i A + x else row k A) = det A" proof- @@ -450,7 +450,7 @@ ultimately show ?thesis apply - - apply (rule span_induct_alt[of ?P ?S, OF P0]) + apply (rule span_induct_alt[of ?P ?S, OF P0, folded smult_conv_scaleR]) apply blast apply (rule x) done @@ -462,7 +462,7 @@ (* ------------------------------------------------------------------------- *) lemma det_dependent_rows: - fixes A:: "'a::linordered_idom^'n^'n" + fixes A:: "real^'n^'n" assumes d: "dependent (rows A)" shows "det A = 0" proof- @@ -483,12 +483,12 @@ from det_row_span[OF th0] have "det A = det (\ k. if k = i then 0 *s 1 else row k A)" unfolding right_minus vector_smult_lzero .. - with det_row_mul[of i "0::'a" "\i. 1"] + with det_row_mul[of i "0::real" "\i. 1"] have "det A = 0" by simp} ultimately show ?thesis by blast qed -lemma det_dependent_columns: assumes d: "dependent(columns (A::'a::linordered_idom^'n^'n))" shows "det A = 0" +lemma det_dependent_columns: assumes d: "dependent(columns (A::real^'n^'n))" shows "det A = 0" by (metis d det_dependent_rows rows_transpose det_transpose) (* ------------------------------------------------------------------------- *) @@ -604,7 +604,7 @@ have "setprod (\i. c i * a i $ p i) ?U = setprod c ?U * setprod (\i. a i $ p i) ?U" unfolding setprod_timesf .. then show "?s * (\xa\?U. c xa * a xa $ p xa) = - setprod c ?U * (?s* (\xa\?U. a xa $ p xa))" by (simp add: ring_simps) + setprod c ?U * (?s* (\xa\?U. a xa $ p xa))" by (simp add: field_simps) qed lemma det_mul: @@ -681,7 +681,7 @@ using permutes_in_image[OF q] by vector show "?s q * setprod (\i. (((\ i. A$i$p i *s B$p i) :: 'a^'n^'n)$i$q i)) ?U = ?s p * (setprod (\i. A$i$p i) ?U) * (?s (q o inv p) * setprod (\i. B$i$(q o inv p) i) ?U)" using ths thp pp pq permutation_inverse[OF pp] sign_inverse[OF pp] - by (simp add: sign_nz th00 ring_simps sign_idempotent sign_compose) + by (simp add: sign_nz th00 field_simps sign_idempotent sign_compose) qed } then have th2: "setsum (\f. det (\ i. A$i$f i *s B$f i)) ?PU = det A * det B" @@ -744,7 +744,7 @@ apply (rule span_setsum) apply simp apply (rule ballI) - apply (rule span_mul)+ + apply (rule span_mul [where 'a="real^'n", folded smult_conv_scaleR])+ apply (rule span_superset) apply auto done @@ -761,9 +761,9 @@ (* ------------------------------------------------------------------------- *) lemma cramer_lemma_transpose: - fixes A:: "'a::linordered_idom^'n^'n" and x :: "'a ^'n" + fixes A:: "real^'n^'n" and x :: "real^'n" shows "det ((\ i. if i = k then setsum (\i. x$i *s row i A) (UNIV::'n set) - else row i A)::'a^'n^'n) = x$k * det A" + else row i A)::real^'n^'n) = x$k * det A" (is "?lhs = ?rhs") proof- let ?U = "UNIV :: 'n set" @@ -772,7 +772,7 @@ have fUk: "finite ?Uk" by simp have kUk: "k \ ?Uk" by simp have th00: "\k s. x$k *s row k A + s = (x$k - 1) *s row k A + row k A + s" - by (vector ring_simps) + by (vector field_simps) have th001: "\f k . (\x. if x = k then f k else f x) = f" by (auto intro: ext) have "(\ i. row i A) = A" by (vector row_def) then have thd1: "det (\ i. row i A) = det A" by simp @@ -780,7 +780,7 @@ apply (rule det_row_span) apply (rule span_setsum[OF fUk]) apply (rule ballI) - apply (rule span_mul) + apply (rule span_mul [where 'a="real^'n", folded smult_conv_scaleR])+ apply (rule span_superset) apply auto done @@ -793,12 +793,12 @@ unfolding thd0 unfolding det_row_mul unfolding th001[of k "\i. row i A"] - unfolding thd1 by (simp add: ring_simps) + unfolding thd1 by (simp add: field_simps) qed lemma cramer_lemma: - fixes A :: "'a::linordered_idom ^'n^'n" - shows "det((\ i j. if j = k then (A *v x)$i else A$i$j):: 'a^'n^'n) = x$k * det A" + fixes A :: "real^'n^'n" + shows "det((\ i j. if j = k then (A *v x)$i else A$i$j):: real^'n^'n) = x$k * det A" proof- let ?U = "UNIV :: 'n set" have stupid: "\c. setsum (\i. c i *s row i (transpose A)) ?U = setsum (\i. c i *s column i A) ?U" @@ -813,7 +813,7 @@ lemma cramer: fixes A ::"real^'n^'n" assumes d0: "det A \ 0" - shows "A *v x = b \ x = (\ k. det(\ i j. if j=k then b$i else A$i$j :: real^'n^'n) / det A)" + shows "A *v x = b \ x = (\ k. det(\ i j. if j=k then b$i else A$i$j) / det A)" proof- from d0 obtain B where B: "A ** B = mat 1" "B ** A = mat 1" unfolding invertible_det_nz[symmetric] invertible_def by blast @@ -821,7 +821,7 @@ hence "A *v (B *v b) = b" by (simp add: matrix_vector_mul_assoc) then have xe: "\x. A*v x = b" by blast {fix x assume x: "A *v x = b" - have "x = (\ k. det(\ i j. if j=k then b$i else A$i$j :: real^'n^'n) / det A)" + have "x = (\ k. det(\ i j. if j=k then b$i else A$i$j) / det A)" unfolding x[symmetric] using d0 by (simp add: Cart_eq cramer_lemma field_simps)} with xe show ?thesis by auto @@ -901,7 +901,7 @@ have th: "\x::'a. x = 1 \ x = - 1 \ x*x = 1" (is "\x::'a. ?ths x") proof- fix x:: 'a - have th0: "x*x - 1 = (x - 1)*(x + 1)" by (simp add: ring_simps) + have th0: "x*x - 1 = (x - 1)*(x + 1)" by (simp add: field_simps) have th1: "\(x::'a) y. x = - y \ x + y = 0" apply (subst eq_iff_diff_eq_0) by simp have "x*x = 1 \ x*x - 1 = 0" by simp @@ -929,7 +929,7 @@ unfolding dot_norm_neg dist_norm[symmetric] unfolding th0 fd[rule_format] by (simp add: power2_eq_square field_simps)} note fc = this - show ?thesis unfolding linear_def vector_eq smult_conv_scaleR by (simp add: inner_simps fc ring_simps) + show ?thesis unfolding linear_def vector_eq[where 'a="real^'n"] smult_conv_scaleR by (simp add: inner_simps fc field_simps) qed lemma isometry_linear: @@ -980,7 +980,7 @@ using H(5-9) apply (simp add: norm_eq norm_eq_1) apply (simp add: inner_simps smult_conv_scaleR) unfolding * - by (simp add: ring_simps) } + by (simp add: field_simps) } note th0 = this let ?g = "\x. if x = 0 then 0 else norm x *s f (inverse (norm x) *s x)" {fix x:: "real ^'n" assume nx: "norm x = 1" @@ -993,15 +993,15 @@ moreover {assume "x = 0" "y \ 0" then have "dist (?g x) (?g y) = dist x y" - apply (simp add: dist_norm norm_mul) + apply (simp add: dist_norm) apply (rule f1[rule_format]) - by(simp add: norm_mul field_simps)} + by(simp add: field_simps)} moreover {assume "x \ 0" "y = 0" then have "dist (?g x) (?g y) = dist x y" - apply (simp add: dist_norm norm_mul) + apply (simp add: dist_norm) apply (rule f1[rule_format]) - by(simp add: norm_mul field_simps)} + by(simp add: field_simps)} moreover {assume z: "x \ 0" "y \ 0" have th00: "x = norm x *s (inverse (norm x) *s x)" "y = norm y *s (inverse (norm y) *s y)" "norm x *s f ((inverse (norm x) *s x)) = norm x *s f (inverse (norm x) *s x)" @@ -1013,7 +1013,7 @@ "norm (f (inverse (norm x) *s x) - f (inverse (norm y) *s y)) = norm (inverse (norm x) *s x - inverse (norm y) *s y)" using z - by (auto simp add: vector_smult_assoc field_simps norm_mul intro: f1[rule_format] fd1[rule_format, unfolded dist_norm]) + by (auto simp add: vector_smult_assoc field_simps intro: f1[rule_format] fd1[rule_format, unfolded dist_norm]) from z th0[OF th00] have "dist (?g x) (?g y) = dist x y" by (simp add: dist_norm)} ultimately have "dist (?g x) (?g y) = dist x y" by blast} @@ -1047,7 +1047,7 @@ by (simp add: nat_number setprod_numseg mult_commute) lemma det_1: "det (A::'a::comm_ring_1^1^1) = A$1$1" - by (simp add: det_def permutes_sing sign_id UNIV_1) + by (simp add: det_def sign_id UNIV_1) lemma det_2: "det (A::'a::comm_ring_1^2^2) = A$1$1 * A$2$2 - A$1$2 * A$2$1" proof- @@ -1057,7 +1057,7 @@ unfolding setsum_over_permutations_insert[OF f12] unfolding permutes_sing apply (simp add: sign_swap_id sign_id swap_id_eq) - by (simp add: arith_simps(31)[symmetric] of_int_minus of_int_1 del: arith_simps(31)) + by (simp add: arith_simps(31)[symmetric] del: arith_simps(31)) qed lemma det_3: "det (A::'a::comm_ring_1^3^3) = @@ -1078,8 +1078,8 @@ unfolding permutes_sing apply (simp add: sign_swap_id permutation_swap_id sign_compose sign_id swap_id_eq) - apply (simp add: arith_simps(31)[symmetric] of_int_minus of_int_1 del: arith_simps(31)) - by (simp add: ring_simps) + apply (simp add: arith_simps(31)[symmetric] del: arith_simps(31)) + by (simp add: field_simps) qed end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Euclidean_Space.thy --- a/src/HOL/Multivariate_Analysis/Euclidean_Space.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Euclidean_Space.thy Tue May 04 20:30:22 2010 +0200 @@ -8,104 +8,24 @@ imports Complex_Main "~~/src/HOL/Decision_Procs/Dense_Linear_Order" Finite_Cartesian_Product Infinite_Set Numeral_Type - Inner_Product L2_Norm + Inner_Product L2_Norm Convex uses "positivstellensatz.ML" ("normarith.ML") begin -text{* Some common special cases.*} - -lemma forall_1[simp]: "(\i::1. P i) \ P 1" - by (metis num1_eq_iff) - -lemma ex_1[simp]: "(\x::1. P x) \ P 1" - by auto (metis num1_eq_iff) - -lemma exhaust_2: - fixes x :: 2 shows "x = 1 \ x = 2" -proof (induct x) - case (of_int z) - then have "0 <= z" and "z < 2" by simp_all - then have "z = 0 | z = 1" by arith - then show ?case by auto -qed - -lemma forall_2: "(\i::2. P i) \ P 1 \ P 2" - by (metis exhaust_2) - -lemma exhaust_3: - fixes x :: 3 shows "x = 1 \ x = 2 \ x = 3" -proof (induct x) - case (of_int z) - then have "0 <= z" and "z < 3" by simp_all - then have "z = 0 \ z = 1 \ z = 2" by arith - then show ?case by auto -qed - -lemma forall_3: "(\i::3. P i) \ P 1 \ P 2 \ P 3" - by (metis exhaust_3) - -lemma UNIV_1: "UNIV = {1::1}" - by (auto simp add: num1_eq_iff) - -lemma UNIV_2: "UNIV = {1::2, 2::2}" - using exhaust_2 by auto - -lemma UNIV_3: "UNIV = {1::3, 2::3, 3::3}" - using exhaust_3 by auto - -lemma setsum_1: "setsum f (UNIV::1 set) = f 1" - unfolding UNIV_1 by simp - -lemma setsum_2: "setsum f (UNIV::2 set) = f 1 + f 2" - unfolding UNIV_2 by simp - -lemma setsum_3: "setsum f (UNIV::3 set) = f 1 + f 2 + f 3" - unfolding UNIV_3 by (simp add: add_ac) - subsection{* Basic componentwise operations on vectors. *} -instantiation cart :: (plus,finite) plus -begin - definition vector_add_def : "op + \ (\ x y. (\ i. (x$i) + (y$i)))" - instance .. -end - instantiation cart :: (times,finite) times begin definition vector_mult_def : "op * \ (\ x y. (\ i. (x$i) * (y$i)))" instance .. end -instantiation cart :: (minus,finite) minus -begin - definition vector_minus_def : "op - \ (\ x y. (\ i. (x$i) - (y$i)))" - instance .. -end - -instantiation cart :: (uminus,finite) uminus -begin - definition vector_uminus_def : "uminus \ (\ x. (\ i. - (x$i)))" - instance .. -end - -instantiation cart :: (zero,finite) zero -begin - definition vector_zero_def : "0 \ (\ i. 0)" - instance .. -end - instantiation cart :: (one,finite) one begin definition vector_one_def : "1 \ (\ i. 1)" instance .. end -instantiation cart :: (scaleR, finite) scaleR -begin - definition vector_scaleR_def: "scaleR = (\ r x. (\ i. scaleR r (x$i)))" - instance .. -end - instantiation cart :: (ord,finite) ord begin definition vector_le_def: @@ -114,7 +34,7 @@ instance by (intro_classes) end -text{* The ordering on @{typ "real^1"} is linear. *} +text{* The ordering on one-dimensional vectors is linear. *} class cart_one = assumes UNIV_one: "card (UNIV \ 'a set) = Suc 0" begin @@ -123,11 +43,6 @@ by (auto intro!: card_ge_0_finite) qed end -instantiation num1 :: cart_one begin -instance proof - show "CARD(1) = Suc 0" by auto -qed end - instantiation cart :: (linorder,cart_one) linorder begin instance proof guess a B using UNIV_one[where 'a='b] unfolding card_Suc_eq apply- by(erule exE)+ @@ -181,24 +96,12 @@ lemma vec_component [simp]: "vec x $ i = x" by (vector vec_def) -lemma vector_add_component [simp]: "(x + y)$i = x$i + y$i" - by vector - -lemma vector_minus_component [simp]: "(x - y)$i = x$i - y$i" - by vector - lemma vector_mult_component [simp]: "(x * y)$i = x$i * y$i" by vector lemma vector_smult_component [simp]: "(c *s y)$i = c * (y$i)" by vector -lemma vector_uminus_component [simp]: "(- x)$i = - (x$i)" - by vector - -lemma vector_scaleR_component [simp]: "(scaleR r x)$i = scaleR r (x$i)" - by vector - lemma cond_component: "(if b then x else y)$i = (if b then x$i else y$i)" by vector lemmas vector_component = @@ -208,35 +111,6 @@ subsection {* Some frequently useful arithmetic lemmas over vectors. *} -instance cart :: (semigroup_add,finite) semigroup_add - apply (intro_classes) by (vector add_assoc) - -instance cart :: (monoid_add,finite) monoid_add - apply (intro_classes) by vector+ - -instance cart :: (group_add,finite) group_add - apply (intro_classes) by (vector algebra_simps)+ - -instance cart :: (ab_semigroup_add,finite) ab_semigroup_add - apply (intro_classes) by (vector add_commute) - -instance cart :: (comm_monoid_add,finite) comm_monoid_add - apply (intro_classes) by vector - -instance cart :: (ab_group_add,finite) ab_group_add - apply (intro_classes) by vector+ - -instance cart :: (cancel_semigroup_add,finite) cancel_semigroup_add - apply (intro_classes) - by (vector Cart_eq)+ - -instance cart :: (cancel_ab_semigroup_add,finite) cancel_ab_semigroup_add - apply (intro_classes) - by (vector Cart_eq) - -instance cart :: (real_vector, finite) real_vector - by default (vector scaleR_left_distrib scaleR_right_distrib)+ - instance cart :: (semigroup_mult,finite) semigroup_mult apply (intro_classes) by (vector mult_assoc) @@ -252,19 +126,15 @@ instance cart :: (comm_monoid_mult,finite) comm_monoid_mult apply (intro_classes) by vector -fun vector_power where - "vector_power x 0 = 1" - | "vector_power x (Suc n) = x * vector_power x n" - instance cart :: (semiring,finite) semiring - apply (intro_classes) by (vector ring_simps)+ + apply (intro_classes) by (vector field_simps)+ instance cart :: (semiring_0,finite) semiring_0 - apply (intro_classes) by (vector ring_simps)+ + apply (intro_classes) by (vector field_simps)+ instance cart :: (semiring_1,finite) semiring_1 apply (intro_classes) by vector instance cart :: (comm_semiring,finite) comm_semiring - apply (intro_classes) by (vector ring_simps)+ + apply (intro_classes) by (vector field_simps)+ instance cart :: (comm_semiring_0,finite) comm_semiring_0 by (intro_classes) instance cart :: (cancel_comm_monoid_add, finite) cancel_comm_monoid_add .. @@ -278,7 +148,7 @@ instance cart :: (real_algebra,finite) real_algebra apply intro_classes - apply (simp_all add: vector_scaleR_def ring_simps) + apply (simp_all add: vector_scaleR_def field_simps) apply vector apply vector done @@ -292,19 +162,9 @@ apply vector done -lemma zero_index[simp]: - "(0 :: 'a::zero ^'n)$i = 0" by vector - lemma one_index[simp]: "(1 :: 'a::one ^'n)$i = 1" by vector -lemma one_plus_of_nat_neq_0: "(1::'a::semiring_char_0) + of_nat n \ 0" -proof- - have "(1::'a) + of_nat n = 0 \ of_nat 1 + of_nat n = (of_nat 0 :: 'a)" by simp - also have "\ \ 1 + n = 0" by (simp only: of_nat_add[symmetric] of_nat_eq_iff) - finally show ?thesis by simp -qed - instance cart :: (semiring_char_0,finite) semiring_char_0 proof (intro_classes) fix m n ::nat @@ -318,375 +178,25 @@ lemma vector_smult_assoc: "a *s (b *s x) = ((a::'a::semigroup_mult) * b) *s x" by (vector mult_assoc) lemma vector_sadd_rdistrib: "((a::'a::semiring) + b) *s x = a *s x + b *s x" - by (vector ring_simps) + by (vector field_simps) lemma vector_add_ldistrib: "(c::'a::semiring) *s (x + y) = c *s x + c *s y" - by (vector ring_simps) + by (vector field_simps) lemma vector_smult_lzero[simp]: "(0::'a::mult_zero) *s x = 0" by vector lemma vector_smult_lid[simp]: "(1::'a::monoid_mult) *s x = x" by vector lemma vector_ssub_ldistrib: "(c::'a::ring) *s (x - y) = c *s x - c *s y" - by (vector ring_simps) + by (vector field_simps) lemma vector_smult_rneg: "(c::'a::ring) *s -x = -(c *s x)" by vector lemma vector_smult_lneg: "- (c::'a::ring) *s x = -(c *s x)" by vector lemma vector_sneg_minus1: "-x = (- (1::'a::ring_1)) *s x" by vector lemma vector_smult_rzero[simp]: "c *s 0 = (0::'a::mult_zero ^ 'n)" by vector lemma vector_sub_rdistrib: "((a::'a::ring) - b) *s x = a *s x - b *s x" - by (vector ring_simps) + by (vector field_simps) lemma vec_eq[simp]: "(vec m = vec n) \ (m = n)" by (simp add: Cart_eq) -subsection {* Topological space *} - -instantiation cart :: (topological_space, finite) topological_space -begin - -definition open_vector_def: - "open (S :: ('a ^ 'b) set) \ - (\x\S. \A. (\i. open (A i) \ x$i \ A i) \ - (\y. (\i. y$i \ A i) \ y \ S))" - -instance proof - show "open (UNIV :: ('a ^ 'b) set)" - unfolding open_vector_def by auto -next - fix S T :: "('a ^ 'b) set" - assume "open S" "open T" thus "open (S \ T)" - unfolding open_vector_def - apply clarify - apply (drule (1) bspec)+ - apply (clarify, rename_tac Sa Ta) - apply (rule_tac x="\i. Sa i \ Ta i" in exI) - apply (simp add: open_Int) - done -next - fix K :: "('a ^ 'b) set set" - assume "\S\K. open S" thus "open (\K)" - unfolding open_vector_def - apply clarify - apply (drule (1) bspec) - apply (drule (1) bspec) - apply clarify - apply (rule_tac x=A in exI) - apply fast - done -qed - -end - -lemma open_vector_box: "\i. open (S i) \ open {x. \i. x $ i \ S i}" -unfolding open_vector_def by auto - -lemma open_vimage_Cart_nth: "open S \ open ((\x. x $ i) -` S)" -unfolding open_vector_def -apply clarify -apply (rule_tac x="\k. if k = i then S else UNIV" in exI, simp) -done - -lemma closed_vimage_Cart_nth: "closed S \ closed ((\x. x $ i) -` S)" -unfolding closed_open vimage_Compl [symmetric] -by (rule open_vimage_Cart_nth) - -lemma closed_vector_box: "\i. closed (S i) \ closed {x. \i. x $ i \ S i}" -proof - - have "{x. \i. x $ i \ S i} = (\i. (\x. x $ i) -` S i)" by auto - thus "\i. closed (S i) \ closed {x. \i. x $ i \ S i}" - by (simp add: closed_INT closed_vimage_Cart_nth) -qed - -lemma tendsto_Cart_nth [tendsto_intros]: - assumes "((\x. f x) ---> a) net" - shows "((\x. f x $ i) ---> a $ i) net" -proof (rule topological_tendstoI) - fix S assume "open S" "a $ i \ S" - then have "open ((\y. y $ i) -` S)" "a \ ((\y. y $ i) -` S)" - by (simp_all add: open_vimage_Cart_nth) - with assms have "eventually (\x. f x \ (\y. y $ i) -` S) net" - by (rule topological_tendstoD) - then show "eventually (\x. f x $ i \ S) net" - by simp -qed - -subsection {* Metric *} - -(* TODO: move somewhere else *) -lemma finite_choice: "finite A \ \x\A. \y. P x y \ \f. \x\A. P x (f x)" -apply (induct set: finite, simp_all) -apply (clarify, rename_tac y) -apply (rule_tac x="f(x:=y)" in exI, simp) -done - -instantiation cart :: (metric_space, finite) metric_space -begin - -definition dist_vector_def: - "dist (x::'a^'b) (y::'a^'b) = setL2 (\i. dist (x$i) (y$i)) UNIV" - -lemma dist_nth_le: "dist (x $ i) (y $ i) \ dist x y" -unfolding dist_vector_def -by (rule member_le_setL2) simp_all - -instance proof - fix x y :: "'a ^ 'b" - show "dist x y = 0 \ x = y" - unfolding dist_vector_def - by (simp add: setL2_eq_0_iff Cart_eq) -next - fix x y z :: "'a ^ 'b" - show "dist x y \ dist x z + dist y z" - unfolding dist_vector_def - apply (rule order_trans [OF _ setL2_triangle_ineq]) - apply (simp add: setL2_mono dist_triangle2) - done -next - (* FIXME: long proof! *) - fix S :: "('a ^ 'b) set" - show "open S \ (\x\S. \e>0. \y. dist y x < e \ y \ S)" - unfolding open_vector_def open_dist - apply safe - apply (drule (1) bspec) - apply clarify - apply (subgoal_tac "\e>0. \i y. dist y (x$i) < e \ y \ A i") - apply clarify - apply (rule_tac x=e in exI, clarify) - apply (drule spec, erule mp, clarify) - apply (drule spec, drule spec, erule mp) - apply (erule le_less_trans [OF dist_nth_le]) - apply (subgoal_tac "\i\UNIV. \e>0. \y. dist y (x$i) < e \ y \ A i") - apply (drule finite_choice [OF finite], clarify) - apply (rule_tac x="Min (range f)" in exI, simp) - apply clarify - apply (drule_tac x=i in spec, clarify) - apply (erule (1) bspec) - apply (drule (1) bspec, clarify) - apply (subgoal_tac "\r. (\i::'b. 0 < r i) \ e = setL2 r UNIV") - apply clarify - apply (rule_tac x="\i. {y. dist y (x$i) < r i}" in exI) - apply (rule conjI) - apply clarify - apply (rule conjI) - apply (clarify, rename_tac y) - apply (rule_tac x="r i - dist y (x$i)" in exI, rule conjI, simp) - apply clarify - apply (simp only: less_diff_eq) - apply (erule le_less_trans [OF dist_triangle]) - apply simp - apply clarify - apply (drule spec, erule mp) - apply (simp add: dist_vector_def setL2_strict_mono) - apply (rule_tac x="\i. e / sqrt (of_nat CARD('b))" in exI) - apply (simp add: divide_pos_pos setL2_constant) - done -qed - -end - -lemma LIMSEQ_Cart_nth: - "(X ----> a) \ (\n. X n $ i) ----> a $ i" -unfolding LIMSEQ_conv_tendsto by (rule tendsto_Cart_nth) - -lemma LIM_Cart_nth: - "(f -- x --> y) \ (\x. f x $ i) -- x --> y $ i" -unfolding LIM_conv_tendsto by (rule tendsto_Cart_nth) - -lemma Cauchy_Cart_nth: - "Cauchy (\n. X n) \ Cauchy (\n. X n $ i)" -unfolding Cauchy_def by (fast intro: le_less_trans [OF dist_nth_le]) - -lemma LIMSEQ_vector: - fixes X :: "nat \ 'a::metric_space ^ 'n" - assumes X: "\i. (\n. X n $ i) ----> (a $ i)" - shows "X ----> a" -proof (rule metric_LIMSEQ_I) - fix r :: real assume "0 < r" - then have "0 < r / of_nat CARD('n)" (is "0 < ?s") - by (simp add: divide_pos_pos) - def N \ "\i. LEAST N. \n\N. dist (X n $ i) (a $ i) < ?s" - def M \ "Max (range N)" - have "\i. \N. \n\N. dist (X n $ i) (a $ i) < ?s" - using X `0 < ?s` by (rule metric_LIMSEQ_D) - hence "\i. \n\N i. dist (X n $ i) (a $ i) < ?s" - unfolding N_def by (rule LeastI_ex) - hence M: "\i. \n\M. dist (X n $ i) (a $ i) < ?s" - unfolding M_def by simp - { - fix n :: nat assume "M \ n" - have "dist (X n) a = setL2 (\i. dist (X n $ i) (a $ i)) UNIV" - unfolding dist_vector_def .. - also have "\ \ setsum (\i. dist (X n $ i) (a $ i)) UNIV" - by (rule setL2_le_setsum [OF zero_le_dist]) - also have "\ < setsum (\i::'n. ?s) UNIV" - by (rule setsum_strict_mono, simp_all add: M `M \ n`) - also have "\ = r" - by simp - finally have "dist (X n) a < r" . - } - hence "\n\M. dist (X n) a < r" - by simp - then show "\M. \n\M. dist (X n) a < r" .. -qed - -lemma Cauchy_vector: - fixes X :: "nat \ 'a::metric_space ^ 'n" - assumes X: "\i. Cauchy (\n. X n $ i)" - shows "Cauchy (\n. X n)" -proof (rule metric_CauchyI) - fix r :: real assume "0 < r" - then have "0 < r / of_nat CARD('n)" (is "0 < ?s") - by (simp add: divide_pos_pos) - def N \ "\i. LEAST N. \m\N. \n\N. dist (X m $ i) (X n $ i) < ?s" - def M \ "Max (range N)" - have "\i. \N. \m\N. \n\N. dist (X m $ i) (X n $ i) < ?s" - using X `0 < ?s` by (rule metric_CauchyD) - hence "\i. \m\N i. \n\N i. dist (X m $ i) (X n $ i) < ?s" - unfolding N_def by (rule LeastI_ex) - hence M: "\i. \m\M. \n\M. dist (X m $ i) (X n $ i) < ?s" - unfolding M_def by simp - { - fix m n :: nat - assume "M \ m" "M \ n" - have "dist (X m) (X n) = setL2 (\i. dist (X m $ i) (X n $ i)) UNIV" - unfolding dist_vector_def .. - also have "\ \ setsum (\i. dist (X m $ i) (X n $ i)) UNIV" - by (rule setL2_le_setsum [OF zero_le_dist]) - also have "\ < setsum (\i::'n. ?s) UNIV" - by (rule setsum_strict_mono, simp_all add: M `M \ m` `M \ n`) - also have "\ = r" - by simp - finally have "dist (X m) (X n) < r" . - } - hence "\m\M. \n\M. dist (X m) (X n) < r" - by simp - then show "\M. \m\M. \n\M. dist (X m) (X n) < r" .. -qed - -instance cart :: (complete_space, finite) complete_space -proof - fix X :: "nat \ 'a ^ 'b" assume "Cauchy X" - have "\i. (\n. X n $ i) ----> lim (\n. X n $ i)" - using Cauchy_Cart_nth [OF `Cauchy X`] - by (simp add: Cauchy_convergent_iff convergent_LIMSEQ_iff) - hence "X ----> Cart_lambda (\i. lim (\n. X n $ i))" - by (simp add: LIMSEQ_vector) - then show "convergent X" - by (rule convergentI) -qed - -subsection {* Norms *} - -instantiation cart :: (real_normed_vector, finite) real_normed_vector -begin - -definition norm_vector_def: - "norm (x::'a^'b) = setL2 (\i. norm (x$i)) UNIV" - -definition vector_sgn_def: - "sgn (x::'a^'b) = scaleR (inverse (norm x)) x" - -instance proof - fix a :: real and x y :: "'a ^ 'b" - show "0 \ norm x" - unfolding norm_vector_def - by (rule setL2_nonneg) - show "norm x = 0 \ x = 0" - unfolding norm_vector_def - by (simp add: setL2_eq_0_iff Cart_eq) - show "norm (x + y) \ norm x + norm y" - unfolding norm_vector_def - apply (rule order_trans [OF _ setL2_triangle_ineq]) - apply (simp add: setL2_mono norm_triangle_ineq) - done - show "norm (scaleR a x) = \a\ * norm x" - unfolding norm_vector_def - by (simp add: setL2_right_distrib) - show "sgn x = scaleR (inverse (norm x)) x" - by (rule vector_sgn_def) - show "dist x y = norm (x - y)" - unfolding dist_vector_def norm_vector_def - by (simp add: dist_norm) -qed - -end - -lemma norm_nth_le: "norm (x $ i) \ norm x" -unfolding norm_vector_def -by (rule member_le_setL2) simp_all - -interpretation Cart_nth: bounded_linear "\x. x $ i" -apply default -apply (rule vector_add_component) -apply (rule vector_scaleR_component) -apply (rule_tac x="1" in exI, simp add: norm_nth_le) -done - -instance cart :: (banach, finite) banach .. - -subsection {* Inner products *} - abbreviation inner_bullet (infix "\" 70) where "x \ y \ inner x y" -instantiation cart :: (real_inner, finite) real_inner -begin - -definition inner_vector_def: - "inner x y = setsum (\i. inner (x$i) (y$i)) UNIV" - -instance proof - fix r :: real and x y z :: "'a ^ 'b" - show "inner x y = inner y x" - unfolding inner_vector_def - by (simp add: inner_commute) - show "inner (x + y) z = inner x z + inner y z" - unfolding inner_vector_def - by (simp add: inner_add_left setsum_addf) - show "inner (scaleR r x) y = r * inner x y" - unfolding inner_vector_def - by (simp add: setsum_right_distrib) - show "0 \ inner x x" - unfolding inner_vector_def - by (simp add: setsum_nonneg) - show "inner x x = 0 \ x = 0" - unfolding inner_vector_def - by (simp add: Cart_eq setsum_nonneg_eq_0_iff) - show "norm x = sqrt (inner x x)" - unfolding inner_vector_def norm_vector_def setL2_def - by (simp add: power2_norm_eq_inner) -qed - -end - -lemma setsum_squares_eq_0_iff: assumes fS: "finite F" and fp: "\x \ F. f x \ (0 ::'a::ordered_ab_group_add)" shows "setsum f F = 0 \ (ALL x:F. f x = 0)" -using fS fp setsum_nonneg[OF fp] -proof (induct set: finite) - case empty thus ?case by simp -next - case (insert x F) - from insert.prems have Fx: "f x \ 0" and Fp: "\ a \ F. f a \ 0" by simp_all - from insert.hyps Fp setsum_nonneg[OF Fp] - have h: "setsum f F = 0 \ (\a \F. f a = 0)" by metis - from add_nonneg_eq_0_iff[OF Fx setsum_nonneg[OF Fp]] insert.hyps(1,2) - show ?case by (simp add: h) -qed - -subsection{* The collapse of the general concepts to dimension one. *} - -lemma vector_one: "(x::'a ^1) = (\ i. (x$1))" - by (simp add: Cart_eq forall_1) - -lemma forall_one: "(\(x::'a ^1). P x) \ (\x. P(\ i. x))" - apply auto - apply (erule_tac x= "x$1" in allE) - apply (simp only: vector_one[symmetric]) - done - -lemma norm_vector_1: "norm (x :: _^1) = norm (x$1)" - by (simp add: norm_vector_def UNIV_1) - -lemma norm_real: "norm(x::real ^ 1) = abs(x$1)" - by (simp add: norm_vector_1) - -lemma dist_real: "dist(x::real ^ 1) y = abs((x$1) - (y$1))" - by (auto simp add: norm_real dist_norm) - subsection {* A connectedness or intermediate value lemma with several applications. *} lemma connected_real_lemma: @@ -747,12 +257,12 @@ ultimately show ?thesis using alb by metis qed -text{* One immediately useful corollary is the existence of square roots! --- Should help to get rid of all the development of square-root for reals as a special case @{typ "real^1"} *} +text{* One immediately useful corollary is the existence of square roots! --- Should help to get rid of all the development of square-root for reals as a special case *} lemma square_bound_lemma: "(x::real) < (1 + x) * (1 + x)" proof- have "(x + 1/2)^2 + 3/4 > 0" using zero_le_power2[of "x+1/2"] by arith - thus ?thesis by (simp add: ring_simps power2_eq_square) + thus ?thesis by (simp add: field_simps power2_eq_square) qed lemma square_continuous: "0 < (e::real) ==> \d. 0 < d \ (\y. abs(y - x) < d \ abs(y * y - x * x) < e)" @@ -775,8 +285,7 @@ lemma sqrt_even_pow2: assumes n: "even n" shows "sqrt(2 ^ n) = 2 ^ (n div 2)" proof- - from n obtain m where m: "n = 2*m" unfolding even_nat_equiv_def2 - by (auto simp add: nat_number) + from n obtain m where m: "n = 2*m" unfolding even_mult_two_ex .. from m have "sqrt(2 ^ n) = sqrt ((2 ^ m) ^ 2)" by (simp only: power_mult[symmetric] mult_commute) then show ?thesis using m by simp @@ -785,21 +294,14 @@ lemma real_div_sqrt: "0 <= x ==> x / sqrt(x) = sqrt(x)" apply (cases "x = 0", simp_all) using sqrt_divide_self_eq[of x] - apply (simp add: inverse_eq_divide real_sqrt_ge_0_iff field_simps) + apply (simp add: inverse_eq_divide field_simps) done text{* Hence derive more interesting properties of the norm. *} -text {* - This type-specific version is only here - to make @{text normarith.ML} happy. -*} -lemma norm_0: "norm (0::real ^ _) = 0" - by (rule norm_zero) - lemma norm_mul[simp]: "norm(a *s x) = abs(a) * norm x" - by (simp add: norm_vector_def vector_component setL2_right_distrib - abs_mult cong: strong_setL2_cong) + by (simp add: norm_vector_def setL2_right_distrib abs_mult) + lemma norm_eq_0_dot: "(norm x = 0) \ (inner x x = (0::real))" by (simp add: norm_vector_def setL2_def power2_eq_square) lemma norm_eq_0_imp: "norm x = 0 ==> x = (0::real ^'n)" by (metis norm_eq_zero) @@ -815,20 +317,17 @@ by (metis vector_mul_rcancel) lemma norm_cauchy_schwarz: - fixes x y :: "real ^ 'n" shows "inner x y <= norm x * norm y" using Cauchy_Schwarz_ineq2[of x y] by auto lemma norm_cauchy_schwarz_abs: - fixes x y :: "real ^ 'n" shows "\inner x y\ \ norm x * norm y" - using norm_cauchy_schwarz[of x y] norm_cauchy_schwarz[of x "-y"] - by (simp add: real_abs_def) + by (rule Cauchy_Schwarz_ineq2) lemma norm_triangle_sub: fixes x y :: "'a::real_normed_vector" shows "norm x \ norm y + norm (x - y)" - using norm_triangle_ineq[of "y" "x - y"] by (simp add: ring_simps) + using norm_triangle_ineq[of "y" "x - y"] by (simp add: field_simps) lemma component_le_norm: "\x$i\ <= norm x" apply (simp add: norm_vector_def) @@ -846,15 +345,15 @@ lemma real_abs_norm: "\norm x\ = norm x" by (rule abs_norm_cancel) -lemma real_abs_sub_norm: "\norm (x::real ^ 'n) - norm y\ <= norm(x - y)" +lemma real_abs_sub_norm: "\norm x - norm y\ <= norm(x - y)" by (rule norm_triangle_ineq3) -lemma norm_le: "norm(x::real ^ 'n) <= norm(y) \ x \ x <= y \ y" +lemma norm_le: "norm(x) <= norm(y) \ x \ x <= y \ y" by (simp add: norm_eq_sqrt_inner) -lemma norm_lt: "norm(x::real ^ 'n) < norm(y) \ x \ x < y \ y" +lemma norm_lt: "norm(x) < norm(y) \ x \ x < y \ y" by (simp add: norm_eq_sqrt_inner) -lemma norm_eq: "norm(x::real ^ 'n) = norm (y::real ^ 'n) \ x \ x = y \ y" +lemma norm_eq: "norm(x) = norm (y) \ x \ x = y \ y" apply(subst order_eq_iff) unfolding norm_le by auto -lemma norm_eq_1: "norm(x::real ^ 'n) = 1 \ x \ x = 1" +lemma norm_eq_1: "norm(x) = 1 \ x \ x = 1" unfolding norm_eq_sqrt_inner by auto text{* Squaring equations and inequalities involving norms. *} @@ -866,10 +365,14 @@ by (auto simp add: norm_eq_sqrt_inner) lemma real_abs_le_square_iff: "\x\ \ \y\ \ (x::real)^2 \ y^2" -proof- - have "x^2 \ y^2 \ (x -y) * (y + x) \ 0" by (simp add: ring_simps power2_eq_square) - also have "\ \ \x\ \ \y\" apply (simp add: zero_compare_simps real_abs_def not_less) by arith -finally show ?thesis .. +proof + assume "\x\ \ \y\" + then have "\x\\ \ \y\\" by (rule power_mono, simp) + then show "x\ \ y\" by simp +next + assume "x\ \ y\" + then have "sqrt (x\) \ sqrt (y\)" by (rule real_sqrt_le_mono) + then show "\x\ \ \y\" by simp qed lemma norm_le_square: "norm(x) <= a \ 0 <= a \ x \ x <= a^2" @@ -898,18 +401,18 @@ unfolding power2_norm_eq_inner inner_simps inner_commute by auto lemma dot_norm_neg: "x \ y = ((norm x ^ 2 + norm y ^ 2) - norm(x - y) ^ 2) / 2" - unfolding power2_norm_eq_inner inner_simps inner_commute by(auto simp add:group_simps) + unfolding power2_norm_eq_inner inner_simps inner_commute by(auto simp add:algebra_simps) text{* Equality of vectors in terms of @{term "op \"} products. *} -lemma vector_eq: "(x:: real ^ 'n) = y \ x \ x = x \ y\ y \ y = x \ x" (is "?lhs \ ?rhs") +lemma vector_eq: "x = y \ x \ x = x \ y \ y \ y = x \ x" (is "?lhs \ ?rhs") proof - assume "?lhs" then show ?rhs by simp + assume ?lhs then show ?rhs by simp next assume ?rhs then have "x \ x - x \ y = 0 \ x \ y - y \ y = 0" by simp hence "x \ (x - y) = 0 \ y \ (x - y) = 0" by (simp add: inner_simps inner_commute) - then have "(x - y) \ (x - y) = 0" by (simp add: ring_simps inner_simps inner_commute) + then have "(x - y) \ (x - y) = 0" by (simp add: field_simps inner_simps inner_commute) then show "x = y" by (simp) qed @@ -930,7 +433,7 @@ by (rule order_trans [OF norm_triangle_ineq add_mono]) lemma ge_iff_diff_ge_0: "(a::'a::linordered_ring) \ b == a - b \ 0" - by (simp add: ring_simps) + by (simp add: field_simps) lemma pth_1: fixes x :: "'a::real_normed_vector" @@ -1010,11 +513,6 @@ "x \ y \ \ (norm (x - y) \ 0)" using norm_ge_zero[of "x - y"] by auto -lemma vector_dist_norm: - fixes x :: "'a::real_normed_vector" - shows "dist x y = norm (x - y)" - by (rule dist_norm) - use "normarith.ML" method_setup norm = {* Scan.succeed (SIMPLE_METHOD' o NormArith.norm_arith_tac) @@ -1026,7 +524,7 @@ lemma dist_triangle_alt: fixes x y z :: "'a::metric_space" shows "dist y z <= dist x y + dist x z" -using dist_triangle [of y z x] by (simp add: dist_commute) +by (rule dist_triangle3) lemma dist_pos_lt: fixes x y :: "'a::metric_space" @@ -1061,12 +559,12 @@ lemma norm_triangle_half_r: shows "norm (y - x1) < e / 2 \ norm (y - x2) < e / 2 \ norm (x1 - x2) < e" - using dist_triangle_half_r unfolding vector_dist_norm[THEN sym] by auto + using dist_triangle_half_r unfolding dist_norm[THEN sym] by auto lemma norm_triangle_half_l: assumes "norm (x - y) < e / 2" "norm (x' - (y)) < e / 2" shows "norm (x - x') < e" - using dist_triangle_half_l[OF assms[unfolded vector_dist_norm[THEN sym]]] - unfolding vector_dist_norm[THEN sym] . + using dist_triangle_half_l[OF assms[unfolded dist_norm[THEN sym]]] + unfolding dist_norm[THEN sym] . lemma norm_triangle_le: "norm(x) + norm y <= e ==> norm(x + y) <= e" by (metis order_trans norm_triangle_ineq) @@ -1120,20 +618,6 @@ finally show ?case using "2.hyps" by simp qed -lemma real_setsum_norm: - fixes f :: "'a \ real ^'n" - assumes fS: "finite S" - shows "norm (setsum f S) <= setsum (\x. norm(f x)) S" -proof(induct rule: finite_induct[OF fS]) - case 1 thus ?case by simp -next - case (2 x S) - from "2.hyps" have "norm (setsum f (insert x S)) \ norm (f x) + norm (setsum f S)" by (simp add: norm_triangle_ineq) - also have "\ \ norm (f x) + setsum (\x. norm(f x)) S" - using "2.hyps" by simp - finally show ?case using "2.hyps" by simp -qed - lemma setsum_norm_le: fixes f :: "'a \ 'b::real_normed_vector" assumes fS: "finite S" @@ -1146,18 +630,6 @@ by arith qed -lemma real_setsum_norm_le: - fixes f :: "'a \ real ^ 'n" - assumes fS: "finite S" - and fg: "\x \ S. norm (f x) \ g x" - shows "norm (setsum f S) \ setsum g S" -proof- - from fg have "setsum (\x. norm(f x)) S <= setsum g S" - by - (rule setsum_mono, simp) - then show ?thesis using real_setsum_norm[OF fS, of f] fg - by arith -qed - lemma setsum_norm_bound: fixes f :: "'a \ 'b::real_normed_vector" assumes fS: "finite S" @@ -1166,20 +638,12 @@ using setsum_norm_le[OF fS K] setsum_constant[symmetric] by simp -lemma real_setsum_norm_bound: - fixes f :: "'a \ real ^ 'n" - assumes fS: "finite S" - and K: "\x \ S. norm (f x) \ K" - shows "norm (setsum f S) \ of_nat (card S) * K" - using real_setsum_norm_le[OF fS K] setsum_constant[symmetric] - by simp - lemma setsum_vmul: - fixes f :: "'a \ 'b::{real_normed_vector,semiring, mult_zero}" + fixes f :: "'a \ 'b::semiring_0" assumes fS: "finite S" shows "setsum f S *s v = setsum (\x. f x *s v) S" proof(induct rule: finite_induct[OF fS]) - case 1 then show ?case by (simp add: vector_smult_lzero) + case 1 then show ?case by simp next case (2 x F) from "2.hyps" have "setsum f (insert x F) *s v = (f x + setsum f F) *s v" @@ -1241,10 +705,10 @@ finally show ?thesis . qed -lemma dot_lsum: "finite S \ setsum f S \ (y::'a::{real_inner}^'n) = setsum (\x. f x \ y) S " +lemma dot_lsum: "finite S \ setsum f S \ y = setsum (\x. f x \ y) S " apply(induct rule: finite_induct) by(auto simp add: inner_simps) -lemma dot_rsum: "finite S \ (y::'a::{real_inner}^'n) \ setsum f S = setsum (\x. y \ f x) S " +lemma dot_rsum: "finite S \ y \ setsum f S = setsum (\x. y \ f x) S " apply(induct rule: finite_induct) by(auto simp add: inner_simps) subsection{* Basis vectors in coordinate directions. *} @@ -1289,6 +753,13 @@ "setsum (\i. (x$i) *s basis i) UNIV = (x::('a::ring_1) ^'n)" (is "?lhs = ?rhs" is "setsum ?f ?S = _") by (auto simp add: Cart_eq cond_value_iff setsum_delta[of "?S", where ?'b = "'a", simplified] cong del: if_weak_cong) +lemma smult_conv_scaleR: "c *s x = scaleR c x" + unfolding vector_scalar_mult_def vector_scaleR_def by simp + +lemma basis_expansion': + "setsum (\i. (x$i) *\<^sub>R basis i) UNIV = x" + by (rule basis_expansion [where 'a=real, unfolded smult_conv_scaleR]) + lemma basis_expansion_unique: "setsum (\i. f i *s basis i) UNIV = (x::('a::comm_ring_1) ^'n) \ (\i. f i = x$i)" by (simp add: Cart_eq setsum_delta cond_value_iff cong del: if_weak_cong) @@ -1314,23 +785,21 @@ shows "basis k \ (0:: 'a::semiring_1 ^'n)" by (simp add: basis_eq_0) -lemma vector_eq_ldot: "(\x. x \ y = x \ z) \ y = (z::real^'n)" - apply (auto simp add: Cart_eq dot_basis) - apply (erule_tac x="basis i" in allE) - apply (simp add: dot_basis) - apply (subgoal_tac "y = z") - apply simp - apply (simp add: Cart_eq) - done - -lemma vector_eq_rdot: "(\z. x \ z = y \ z) \ x = (y::real^'n)" - apply (auto simp add: Cart_eq dot_basis) - apply (erule_tac x="basis i" in allE) - apply (simp add: dot_basis) - apply (subgoal_tac "x = y") - apply simp - apply (simp add: Cart_eq) - done +lemma vector_eq_ldot: "(\x. x \ y = x \ z) \ y = z" +proof + assume "\x. x \ y = x \ z" + hence "\x. x \ (y - z) = 0" by (simp add: inner_simps) + hence "(y - z) \ (y - z) = 0" .. + thus "y = z" by simp +qed simp + +lemma vector_eq_rdot: "(\z. x \ z = y \ z) \ x = y" +proof + assume "\z. x \ z = y \ z" + hence "\z. (x - y) \ z = 0" by (simp add: inner_simps) + hence "(x - y) \ (x - y) = 0" .. + thus "x = y" by simp +qed simp subsection{* Orthogonality. *} @@ -1344,9 +813,8 @@ shows "orthogonal (basis i :: real^'n) (basis j) \ i \ j" unfolding orthogonal_basis[of i] basis_component[of j] by simp - (* FIXME : Maybe some of these require less than comm_ring, but not all*) lemma orthogonal_clauses: - "orthogonal a (0::real ^'n)" + "orthogonal a 0" "orthogonal a x ==> orthogonal a (c *\<^sub>R x)" "orthogonal a x ==> orthogonal a (-x)" "orthogonal a x \ orthogonal a y ==> orthogonal a (x + y)" @@ -1358,130 +826,68 @@ "orthogonal x a \ orthogonal y a ==> orthogonal (x - y) a" unfolding orthogonal_def inner_simps by auto -lemma orthogonal_commute: "orthogonal (x::real ^'n)y \ orthogonal y x" +lemma orthogonal_commute: "orthogonal x y \ orthogonal y x" by (simp add: orthogonal_def inner_commute) -subsection{* Explicit vector construction from lists. *} - -primrec from_nat :: "nat \ 'a::{monoid_add,one}" -where "from_nat 0 = 0" | "from_nat (Suc n) = 1 + from_nat n" - -lemma from_nat [simp]: "from_nat = of_nat" -by (rule ext, induct_tac x, simp_all) - -primrec - list_fun :: "nat \ _ list \ _ \ _" -where - "list_fun n [] = (\x. 0)" -| "list_fun n (x # xs) = fun_upd (list_fun (Suc n) xs) (from_nat n) x" - -definition "vector l = (\ i. list_fun 1 l i)" -(*definition "vector l = (\ i. if i <= length l then l ! (i - 1) else 0)"*) - -lemma vector_1: "(vector[x]) $1 = x" - unfolding vector_def by simp - -lemma vector_2: - "(vector[x,y]) $1 = x" - "(vector[x,y] :: 'a^2)$2 = (y::'a::zero)" - unfolding vector_def by simp_all - -lemma vector_3: - "(vector [x,y,z] ::('a::zero)^3)$1 = x" - "(vector [x,y,z] ::('a::zero)^3)$2 = y" - "(vector [x,y,z] ::('a::zero)^3)$3 = z" - unfolding vector_def by simp_all - -lemma forall_vector_1: "(\v::'a::zero^1. P v) \ (\x. P(vector[x]))" - apply auto - apply (erule_tac x="v$1" in allE) - apply (subgoal_tac "vector [v$1] = v") - apply simp - apply (vector vector_def) - apply (simp add: forall_1) - done - -lemma forall_vector_2: "(\v::'a::zero^2. P v) \ (\x y. P(vector[x, y]))" - apply auto - apply (erule_tac x="v$1" in allE) - apply (erule_tac x="v$2" in allE) - apply (subgoal_tac "vector [v$1, v$2] = v") - apply simp - apply (vector vector_def) - apply (simp add: forall_2) - done - -lemma forall_vector_3: "(\v::'a::zero^3. P v) \ (\x y z. P(vector[x, y, z]))" - apply auto - apply (erule_tac x="v$1" in allE) - apply (erule_tac x="v$2" in allE) - apply (erule_tac x="v$3" in allE) - apply (subgoal_tac "vector [v$1, v$2, v$3] = v") - apply simp - apply (vector vector_def) - apply (simp add: forall_3) - done - subsection{* Linear functions. *} -definition "linear f \ (\x y. f(x + y) = f x + f y) \ (\c x. f(c *s x) = c *s f x)" - -lemma linearI: assumes "\x y. f (x + y) = f x + f y" "\c x. f (c *s x) = c *s f x" +definition + linear :: "('a::real_vector \ 'b::real_vector) \ bool" where + "linear f \ (\x y. f(x + y) = f x + f y) \ (\c x. f(c *\<^sub>R x) = c *\<^sub>R f x)" + +lemma linearI: assumes "\x y. f (x + y) = f x + f y" "\c x. f (c *\<^sub>R x) = c *\<^sub>R f x" shows "linear f" using assms unfolding linear_def by auto -lemma linear_compose_cmul: "linear f ==> linear (\x. (c::'a::comm_semiring) *s f x)" - by (vector linear_def Cart_eq ring_simps) - -lemma linear_compose_neg: "linear (f :: 'a ^'n \ 'a::comm_ring ^'m) ==> linear (\x. -(f(x)))" by (vector linear_def Cart_eq) - -lemma linear_compose_add: "linear (f :: 'a ^'n \ 'a::semiring_1 ^'m) \ linear g ==> linear (\x. f(x) + g(x))" - by (vector linear_def Cart_eq ring_simps) - -lemma linear_compose_sub: "linear (f :: 'a ^'n \ 'a::ring_1 ^'m) \ linear g ==> linear (\x. f x - g x)" - by (vector linear_def Cart_eq ring_simps) +lemma linear_compose_cmul: "linear f ==> linear (\x. c *\<^sub>R f x)" + by (simp add: linear_def algebra_simps) + +lemma linear_compose_neg: "linear f ==> linear (\x. -(f(x)))" + by (simp add: linear_def) + +lemma linear_compose_add: "linear f \ linear g ==> linear (\x. f(x) + g(x))" + by (simp add: linear_def algebra_simps) + +lemma linear_compose_sub: "linear f \ linear g ==> linear (\x. f x - g x)" + by (simp add: linear_def algebra_simps) lemma linear_compose: "linear f \ linear g ==> linear (g o f)" by (simp add: linear_def) lemma linear_id: "linear id" by (simp add: linear_def id_def) -lemma linear_zero: "linear (\x. 0::'a::semiring_1 ^ 'n)" by (simp add: linear_def) +lemma linear_zero: "linear (\x. 0)" by (simp add: linear_def) lemma linear_compose_setsum: - assumes fS: "finite S" and lS: "\a \ S. linear (f a :: 'a::semiring_1 ^ 'n \ 'a ^'m)" - shows "linear(\x. setsum (\a. f a x :: 'a::semiring_1 ^'m) S)" + assumes fS: "finite S" and lS: "\a \ S. linear (f a)" + shows "linear(\x. setsum (\a. f a x) S)" using lS apply (induct rule: finite_induct[OF fS]) by (auto simp add: linear_zero intro: linear_compose_add) lemma linear_vmul_component: - fixes f:: "'a::semiring_1^'m \ 'a^'n" assumes lf: "linear f" - shows "linear (\x. f x $ k *s v)" + shows "linear (\x. f x $ k *\<^sub>R v)" using lf - apply (auto simp add: linear_def ) - by (vector ring_simps)+ - -lemma linear_0: "linear f ==> f 0 = (0::'a::semiring_1 ^'n)" + by (auto simp add: linear_def algebra_simps) + +lemma linear_0: "linear f ==> f 0 = 0" unfolding linear_def apply clarsimp apply (erule allE[where x="0::'a"]) apply simp done -lemma linear_cmul: "linear f ==> f(c*s x) = c *s f x" by (simp add: linear_def) - -lemma linear_neg: "linear (f :: 'a::ring_1 ^'n \ _) ==> f (-x) = - f x" - unfolding vector_sneg_minus1 - using linear_cmul[of f] by auto +lemma linear_cmul: "linear f ==> f(c *\<^sub>R x) = c *\<^sub>R f x" by (simp add: linear_def) + +lemma linear_neg: "linear f ==> f (-x) = - f x" + using linear_cmul [where c="-1"] by simp lemma linear_add: "linear f ==> f(x + y) = f x + f y" by (metis linear_def) -lemma linear_sub: "linear (f::'a::ring_1 ^'n \ _) ==> f(x - y) = f x - f y" +lemma linear_sub: "linear f ==> f(x - y) = f x - f y" by (simp add: diff_def linear_add linear_neg) lemma linear_setsum: - fixes f:: "'a::semiring_1^'n \ _" assumes lf: "linear f" and fS: "finite S" shows "f (setsum g S) = setsum (f o g) S" proof (induct rule: finite_induct[OF fS]) @@ -1496,14 +902,13 @@ qed lemma linear_setsum_mul: - fixes f:: "'a ^'n \ 'a::semiring_1^'m" assumes lf: "linear f" and fS: "finite S" - shows "f (setsum (\i. c i *s v i) S) = setsum (\i. c i *s f (v i)) S" - using linear_setsum[OF lf fS, of "\i. c i *s v i" , unfolded o_def] + shows "f (setsum (\i. c i *\<^sub>R v i) S) = setsum (\i. c i *\<^sub>R f (v i)) S" + using linear_setsum[OF lf fS, of "\i. c i *\<^sub>R v i" , unfolded o_def] linear_cmul[OF lf] by simp lemma linear_injective_0: - assumes lf: "linear (f:: 'a::ring_1 ^ 'n \ _)" + assumes lf: "linear f" shows "inj f \ (\x. f x = 0 \ x = 0)" proof- have "inj f \ (\ x y. f x = f y \ x = y)" by (simp add: inj_on_def) @@ -1523,22 +928,22 @@ let ?B = "setsum (\i. norm(f(basis i))) ?S" have fS: "finite ?S" by simp {fix x:: "real ^ 'm" - let ?g = "(\i. (x$i) *s (basis i) :: real ^ 'm)" - have "norm (f x) = norm (f (setsum (\i. (x$i) *s (basis i)) ?S))" - by (simp only: basis_expansion) - also have "\ = norm (setsum (\i. (x$i) *s f (basis i))?S)" + let ?g = "(\i. (x$i) *\<^sub>R (basis i) :: real ^ 'm)" + have "norm (f x) = norm (f (setsum (\i. (x$i) *\<^sub>R (basis i)) ?S))" + by (simp add: basis_expansion') + also have "\ = norm (setsum (\i. (x$i) *\<^sub>R f (basis i))?S)" using linear_setsum[OF lf fS, of ?g, unfolded o_def] linear_cmul[OF lf] by auto - finally have th0: "norm (f x) = norm (setsum (\i. (x$i) *s f (basis i))?S)" . + finally have th0: "norm (f x) = norm (setsum (\i. (x$i) *\<^sub>R f (basis i))?S)" . {fix i assume i: "i \ ?S" from component_le_norm[of x i] - have "norm ((x$i) *s f (basis i :: real ^'m)) \ norm (f (basis i)) * norm x" - unfolding norm_mul + have "norm ((x$i) *\<^sub>R f (basis i :: real ^'m)) \ norm (f (basis i)) * norm x" + unfolding norm_scaleR apply (simp only: mult_commute) apply (rule mult_mono) - by (auto simp add: ring_simps norm_ge_zero) } - then have th: "\i\ ?S. norm ((x$i) *s f (basis i :: real ^'m)) \ norm (f (basis i)) * norm x" by metis - from real_setsum_norm_le[OF fS, of "\i. (x$i) *s (f (basis i))", OF th] + by (auto simp add: field_simps) } + then have th: "\i\ ?S. norm ((x$i) *\<^sub>R f (basis i :: real ^'m)) \ norm (f (basis i)) * norm x" by metis + from setsum_norm_le[OF fS, of "\i. (x$i) *\<^sub>R (f (basis i))", OF th] have "norm (f x) \ ?B * norm x" unfolding th0 setsum_left_distrib by metis} then show ?thesis by blast qed @@ -1553,25 +958,22 @@ let ?K = "\B\ + 1" have Kp: "?K > 0" by arith {assume C: "B < 0" - have "norm (1::real ^ 'n) > 0" by (simp add: zero_less_norm_iff) + have "norm (1::real ^ 'n) > 0" by simp with C have "B * norm (1:: real ^ 'n) < 0" - by (simp add: zero_compare_simps) + by (simp add: mult_less_0_iff) with B[rule_format, of 1] norm_ge_zero[of "f 1"] have False by simp } then have Bp: "B \ 0" by ferrack {fix x::"real ^ 'n" have "norm (f x) \ ?K * norm x" using B[rule_format, of x] norm_ge_zero[of x] norm_ge_zero[of "f x"] Bp - apply (auto simp add: ring_simps split add: abs_split) + apply (auto simp add: field_simps split add: abs_split) apply (erule order_trans, simp) done } then show ?thesis using Kp by blast qed -lemma smult_conv_scaleR: "c *s x = scaleR c x" - unfolding vector_scalar_mult_def vector_scaleR_def by simp - lemma linear_conv_bounded_linear: fixes f :: "real ^ _ \ real ^ _" shows "linear f \ bounded_linear f" @@ -1600,7 +1002,7 @@ qed lemma bounded_linearI': fixes f::"real^'n \ real^'m" - assumes "\x y. f (x + y) = f x + f y" "\c x. f (c *s x) = c *s f x" + assumes "\x y. f (x + y) = f x + f y" "\c x. f (c *\<^sub>R x) = c *\<^sub>R f x" shows "bounded_linear f" unfolding linear_conv_bounded_linear[THEN sym] by(rule linearI[OF assms]) @@ -1613,39 +1015,38 @@ lemma bilinear_radd: "bilinear h ==> h x (y + z) = (h x y) + (h x z)" by (simp add: bilinear_def linear_def) -lemma bilinear_lmul: "bilinear h ==> h (c *s x) y = c *s (h x y)" +lemma bilinear_lmul: "bilinear h ==> h (c *\<^sub>R x) y = c *\<^sub>R (h x y)" by (simp add: bilinear_def linear_def) -lemma bilinear_rmul: "bilinear h ==> h x (c *s y) = c *s (h x y)" +lemma bilinear_rmul: "bilinear h ==> h x (c *\<^sub>R y) = c *\<^sub>R (h x y)" by (simp add: bilinear_def linear_def) -lemma bilinear_lneg: "bilinear h ==> h (- (x:: 'a::ring_1 ^ 'n)) y = -(h x y)" - by (simp only: vector_sneg_minus1 bilinear_lmul) - -lemma bilinear_rneg: "bilinear h ==> h x (- (y:: 'a::ring_1 ^ 'n)) = - h x y" - by (simp only: vector_sneg_minus1 bilinear_rmul) +lemma bilinear_lneg: "bilinear h ==> h (- x) y = -(h x y)" + by (simp only: scaleR_minus1_left [symmetric] bilinear_lmul) + +lemma bilinear_rneg: "bilinear h ==> h x (- y) = - h x y" + by (simp only: scaleR_minus1_left [symmetric] bilinear_rmul) lemma (in ab_group_add) eq_add_iff: "x = x + y \ y = 0" using add_imp_eq[of x y 0] by auto lemma bilinear_lzero: - fixes h :: "'a::ring^'n \ _" assumes bh: "bilinear h" shows "h 0 x = 0" + assumes bh: "bilinear h" shows "h 0 x = 0" using bilinear_ladd[OF bh, of 0 0 x] - by (simp add: eq_add_iff ring_simps) + by (simp add: eq_add_iff field_simps) lemma bilinear_rzero: - fixes h :: "'a::ring^_ \ _" assumes bh: "bilinear h" shows "h x 0 = 0" + assumes bh: "bilinear h" shows "h x 0 = 0" using bilinear_radd[OF bh, of x 0 0 ] - by (simp add: eq_add_iff ring_simps) - -lemma bilinear_lsub: "bilinear h ==> h (x - (y:: 'a::ring_1 ^ _)) z = h x z - h y z" + by (simp add: eq_add_iff field_simps) + +lemma bilinear_lsub: "bilinear h ==> h (x - y) z = h x z - h y z" by (simp add: diff_def bilinear_ladd bilinear_lneg) -lemma bilinear_rsub: "bilinear h ==> h z (x - (y:: 'a::ring_1 ^ _)) = h z x - h z y" +lemma bilinear_rsub: "bilinear h ==> h z (x - y) = h z x - h z y" by (simp add: diff_def bilinear_radd bilinear_rneg) lemma bilinear_setsum: - fixes h:: "'a ^_ \ 'a::semiring_1^_\ 'a ^ _" assumes bh: "bilinear h" and fS: "finite S" and fT: "finite T" shows "h (setsum f S) (setsum g T) = setsum (\(i,j). h (f i) (g j)) (S \ T) " proof- @@ -1669,19 +1070,19 @@ let ?B = "setsum (\(i,j). norm (h (basis i) (basis j))) (?M \ ?N)" have fM: "finite ?M" and fN: "finite ?N" by simp_all {fix x:: "real ^ 'm" and y :: "real^'n" - have "norm (h x y) = norm (h (setsum (\i. (x$i) *s basis i) ?M) (setsum (\i. (y$i) *s basis i) ?N))" unfolding basis_expansion .. - also have "\ = norm (setsum (\ (i,j). h ((x$i) *s basis i) ((y$j) *s basis j)) (?M \ ?N))" unfolding bilinear_setsum[OF bh fM fN] .. + have "norm (h x y) = norm (h (setsum (\i. (x$i) *\<^sub>R basis i) ?M) (setsum (\i. (y$i) *\<^sub>R basis i) ?N))" unfolding basis_expansion' .. + also have "\ = norm (setsum (\ (i,j). h ((x$i) *\<^sub>R basis i) ((y$j) *\<^sub>R basis j)) (?M \ ?N))" unfolding bilinear_setsum[OF bh fM fN] .. finally have th: "norm (h x y) = \" . have "norm (h x y) \ ?B * norm x * norm y" apply (simp add: setsum_left_distrib th) - apply (rule real_setsum_norm_le) + apply (rule setsum_norm_le) using fN fM apply simp - apply (auto simp add: bilinear_rmul[OF bh] bilinear_lmul[OF bh] norm_mul ring_simps) + apply (auto simp add: bilinear_rmul[OF bh] bilinear_lmul[OF bh] field_simps simp del: scaleR_scaleR) apply (rule mult_mono) - apply (auto simp add: norm_ge_zero zero_le_mult_iff component_le_norm) + apply (auto simp add: zero_le_mult_iff component_le_norm) apply (rule mult_mono) - apply (auto simp add: norm_ge_zero zero_le_mult_iff component_le_norm) + apply (auto simp add: zero_le_mult_iff component_le_norm) done} then show ?thesis by metis qed @@ -1701,7 +1102,7 @@ have "B * norm x * norm y \ ?K * norm x * norm y" apply - apply (rule mult_right_mono, rule mult_right_mono) - by (auto simp add: norm_ge_zero) + by auto then have "norm (h x y) \ ?K * norm x * norm y" using B[rule_format, of x y] by simp} with Kp show ?thesis by blast @@ -1746,8 +1147,28 @@ definition "adjoint f = (SOME f'. \x y. f x \ y = x \ f' y)" +lemma adjoint_unique: + assumes "\x y. inner (f x) y = inner x (g y)" + shows "adjoint f = g" +unfolding adjoint_def +proof (rule some_equality) + show "\x y. inner (f x) y = inner x (g y)" using assms . +next + fix h assume "\x y. inner (f x) y = inner x (h y)" + hence "\x y. inner x (g y) = inner x (h y)" using assms by simp + hence "\x y. inner x (g y - h y) = 0" by (simp add: inner_diff_right) + hence "\y. inner (g y - h y) (g y - h y) = 0" by simp + hence "\y. h y = g y" by simp + thus "h = g" by (simp add: ext) +qed + lemma choice_iff: "(\x. \y. P x y) \ (\f. \x. P x (f x))" by metis +text {* TODO: The following lemmas about adjoints should hold for any +Hilbert space (i.e. complete inner product space). +(see \url{http://en.wikipedia.org/wiki/Hermitian_adjoint}) +*} + lemma adjoint_works_lemma: fixes f:: "real ^'n \ real ^'m" assumes lf: "linear f" @@ -1760,14 +1181,14 @@ {fix y:: "real ^ 'm" let ?w = "(\ i. (f (basis i) \ y)) :: real ^ 'n" {fix x - have "f x \ y = f (setsum (\i. (x$i) *s basis i) ?N) \ y" - by (simp only: basis_expansion) - also have "\ = (setsum (\i. (x$i) *s f (basis i)) ?N) \ y" + have "f x \ y = f (setsum (\i. (x$i) *\<^sub>R basis i) ?N) \ y" + by (simp only: basis_expansion') + also have "\ = (setsum (\i. (x$i) *\<^sub>R f (basis i)) ?N) \ y" unfolding linear_setsum[OF lf fN] by (simp add: linear_cmul[OF lf]) finally have "f x \ y = x \ ?w" apply (simp only: ) - apply (simp add: inner_vector_def setsum_left_distrib setsum_right_distrib setsum_commute[of _ ?M ?N] ring_simps) + apply (simp add: inner_vector_def setsum_left_distrib setsum_right_distrib setsum_commute[of _ ?M ?N] field_simps) done} } then show ?thesis unfolding adjoint_def @@ -1786,7 +1207,7 @@ fixes f:: "real ^'n \ real ^'m" assumes lf: "linear f" shows "linear (adjoint f)" - unfolding linear_def vector_eq_ldot[symmetric] apply safe + unfolding linear_def vector_eq_ldot[where 'a="real^'n", symmetric] apply safe unfolding inner_simps smult_conv_scaleR adjoint_works[OF lf] by auto lemma adjoint_clauses: @@ -1800,16 +1221,9 @@ fixes f:: "real ^'n \ real ^'m" assumes lf: "linear f" shows "adjoint (adjoint f) = f" - apply (rule ext) - by (simp add: vector_eq_ldot[symmetric] adjoint_clauses[OF adjoint_linear[OF lf]] adjoint_clauses[OF lf]) - -lemma adjoint_unique: - fixes f:: "real ^'n \ real ^'m" - assumes lf: "linear f" and u: "\x y. f' x \ y = x \ f y" - shows "f' = adjoint f" - apply (rule ext) - using u - by (simp add: vector_eq_rdot[symmetric] adjoint_clauses[OF lf]) + by (rule adjoint_unique, simp add: adjoint_clauses [OF lf]) + +subsection {* Matrix operations *} text{* Matrix notation. NB: an MxN matrix is of type @{typ "'a^'n^'m"}, not @{typ "'a^'m^'n"} *} @@ -1832,7 +1246,7 @@ lemma mat_0[simp]: "mat 0 = 0" by (vector mat_def) lemma matrix_add_ldistrib: "(A ** (B + C)) = (A ** B) + (A ** C)" - by (vector matrix_matrix_mult_def setsum_addf[symmetric] ring_simps) + by (vector matrix_matrix_mult_def setsum_addf[symmetric] field_simps) lemma matrix_mul_lid: fixes A :: "'a::semiring_1 ^ 'm ^ 'n" @@ -1924,18 +1338,18 @@ by (vector Cart_eq setsum_component) lemma linear_componentwise: - fixes f:: "'a::ring_1 ^'m \ 'a ^ _" + fixes f:: "real ^'m \ real ^ _" assumes lf: "linear f" shows "(f x)$j = setsum (\i. (x$i) * (f (basis i)$j)) (UNIV :: 'm set)" (is "?lhs = ?rhs") proof- let ?M = "(UNIV :: 'm set)" let ?N = "(UNIV :: 'n set)" have fM: "finite ?M" by simp - have "?rhs = (setsum (\i.(x$i) *s f (basis i) ) ?M)$j" - unfolding vector_smult_component[symmetric] - unfolding setsum_component[of "(\i.(x$i) *s f (basis i :: 'a^'m))" ?M] + have "?rhs = (setsum (\i.(x$i) *\<^sub>R f (basis i) ) ?M)$j" + unfolding vector_smult_component[symmetric] smult_conv_scaleR + unfolding setsum_component[of "(\i.(x$i) *\<^sub>R f (basis i :: real^'m))" ?M] .. - then show ?thesis unfolding linear_setsum_mul[OF lf fM, symmetric] basis_expansion .. + then show ?thesis unfolding linear_setsum_mul[OF lf fM, symmetric] basis_expansion' .. qed text{* Inverse matrices (not necessarily square) *} @@ -1950,23 +1364,23 @@ definition matrix:: "('a::{plus,times, one, zero}^'m \ 'a ^ 'n) \ 'a^'m^'n" where "matrix f = (\ i j. (f(basis j))$i)" -lemma matrix_vector_mul_linear: "linear(\x. A *v (x::'a::comm_semiring_1 ^ _))" - by (simp add: linear_def matrix_vector_mult_def Cart_eq ring_simps setsum_right_distrib setsum_addf) - -lemma matrix_works: assumes lf: "linear f" shows "matrix f *v x = f (x::'a::comm_ring_1 ^ 'n)" +lemma matrix_vector_mul_linear: "linear(\x. A *v (x::real ^ _))" + by (simp add: linear_def matrix_vector_mult_def Cart_eq field_simps setsum_right_distrib setsum_addf) + +lemma matrix_works: assumes lf: "linear f" shows "matrix f *v x = f (x::real ^ 'n)" apply (simp add: matrix_def matrix_vector_mult_def Cart_eq mult_commute) apply clarify apply (rule linear_componentwise[OF lf, symmetric]) done -lemma matrix_vector_mul: "linear f ==> f = (\x. matrix f *v (x::'a::comm_ring_1 ^ 'n))" by (simp add: ext matrix_works) - -lemma matrix_of_matrix_vector_mul: "matrix(\x. A *v (x :: 'a:: comm_ring_1 ^ 'n)) = A" +lemma matrix_vector_mul: "linear f ==> f = (\x. matrix f *v (x::real ^ 'n))" by (simp add: ext matrix_works) + +lemma matrix_of_matrix_vector_mul: "matrix(\x. A *v (x :: real ^ 'n)) = A" by (simp add: matrix_eq matrix_vector_mul_linear matrix_works) lemma matrix_compose: - assumes lf: "linear (f::'a::comm_ring_1^'n \ 'a^'m)" - and lg: "linear (g::'a::comm_ring_1^'m \ 'a^_)" + assumes lf: "linear (f::real^'n \ real^'m)" + and lg: "linear (g::real^'m \ real^_)" shows "matrix (g o f) = matrix g ** matrix f" using lf lg linear_compose[OF lf lg] matrix_works[OF linear_compose[OF lf lg]] by (simp add: matrix_eq matrix_works matrix_vector_mul_assoc[symmetric] o_def) @@ -1975,8 +1389,7 @@ by (simp add: matrix_vector_mult_def transpose_def Cart_eq mult_commute) lemma adjoint_matrix: "adjoint(\x. (A::real^'n^'m) *v x) = (\x. transpose A *v x)" - apply (rule adjoint_unique[symmetric]) - apply (rule matrix_vector_mul_linear) + apply (rule adjoint_unique) apply (simp add: transpose_def inner_vector_def matrix_vector_mult_def setsum_left_distrib setsum_right_distrib) apply (subst setsum_commute) apply (auto simp add: mult_ac) @@ -1998,40 +1411,6 @@ done -lemma real_convex_bound_lt: - assumes xa: "(x::real) < a" and ya: "y < a" and u: "0 <= u" and v: "0 <= v" - and uv: "u + v = 1" - shows "u * x + v * y < a" -proof- - have uv': "u = 0 \ v \ 0" using u v uv by arith - have "a = a * (u + v)" unfolding uv by simp - hence th: "u * a + v * a = a" by (simp add: ring_simps) - from xa u have "u \ 0 \ u*x < u*a" by (simp add: mult_compare_simps) - from ya v have "v \ 0 \ v * y < v * a" by (simp add: mult_compare_simps) - from xa ya u v have "u * x + v * y < u * a + v * a" - apply (cases "u = 0", simp_all add: uv') - apply(rule mult_strict_left_mono) - using uv' apply simp_all - - apply (rule add_less_le_mono) - apply(rule mult_strict_left_mono) - apply simp_all - apply (rule mult_left_mono) - apply simp_all - done - thus ?thesis unfolding th . -qed - -lemma real_convex_bound_le: - assumes xa: "(x::real) \ a" and ya: "y \ a" and u: "0 <= u" and v: "0 <= v" - and uv: "u + v = 1" - shows "u * x + v * y \ a" -proof- - from xa ya u v have "u * x + v * y \ u * a + v * a" by (simp add: add_mono mult_left_mono) - also have "\ \ (u + v) * a" by (simp add: ring_simps) - finally show ?thesis unfolding uv by simp -qed - lemma infinite_enumerate: assumes fS: "infinite S" shows "\r. subseq r \ (\n. r n \ S)" unfolding subseq_def @@ -2048,8 +1427,8 @@ assumes x: "0 <= (x::real)" and y:"0 <= y" and z: "0 <= z" and xy: "x^2 <= y^2 + z^2" shows "x <= y + z" proof- - have "y^2 + z^2 \ y^2 + 2*y*z + z^2" using z y by (simp add: zero_compare_simps) - with xy have th: "x ^2 \ (y+z)^2" by (simp add: power2_eq_square ring_simps) + have "y^2 + z^2 \ y^2 + 2*y*z + z^2" using z y by (simp add: mult_nonneg_nonneg) + with xy have th: "x ^2 \ (y+z)^2" by (simp add: power2_eq_square field_simps) from y z have yz: "y + z \ 0" by arith from power2_le_imp_le[OF th yz] show ?thesis . qed @@ -2074,166 +1453,6 @@ ultimately show ?thesis by metis qed -subsection{* Operator norm. *} - -definition "onorm f = Sup {norm (f x)| x. norm x = 1}" - -lemma norm_bound_generalize: - fixes f:: "real ^'n \ real^'m" - assumes lf: "linear f" - shows "(\x. norm x = 1 \ norm (f x) \ b) \ (\x. norm (f x) \ b * norm x)" (is "?lhs \ ?rhs") -proof- - {assume H: ?rhs - {fix x :: "real^'n" assume x: "norm x = 1" - from H[rule_format, of x] x have "norm (f x) \ b" by simp} - then have ?lhs by blast } - - moreover - {assume H: ?lhs - from H[rule_format, of "basis arbitrary"] - have bp: "b \ 0" using norm_ge_zero[of "f (basis arbitrary)"] - by (auto simp add: norm_basis elim: order_trans [OF norm_ge_zero]) - {fix x :: "real ^'n" - {assume "x = 0" - then have "norm (f x) \ b * norm x" by (simp add: linear_0[OF lf] bp)} - moreover - {assume x0: "x \ 0" - hence n0: "norm x \ 0" by (metis norm_eq_zero) - let ?c = "1/ norm x" - have "norm (?c*s x) = 1" using x0 by (simp add: n0 norm_mul) - with H have "norm (f(?c*s x)) \ b" by blast - hence "?c * norm (f x) \ b" - by (simp add: linear_cmul[OF lf] norm_mul) - hence "norm (f x) \ b * norm x" - using n0 norm_ge_zero[of x] by (auto simp add: field_simps)} - ultimately have "norm (f x) \ b * norm x" by blast} - then have ?rhs by blast} - ultimately show ?thesis by blast -qed - -lemma onorm: - fixes f:: "real ^'n \ real ^'m" - assumes lf: "linear f" - shows "norm (f x) <= onorm f * norm x" - and "\x. norm (f x) <= b * norm x \ onorm f <= b" -proof- - { - let ?S = "{norm (f x) |x. norm x = 1}" - have Se: "?S \ {}" using norm_basis by auto - from linear_bounded[OF lf] have b: "\ b. ?S *<= b" - unfolding norm_bound_generalize[OF lf, symmetric] by (auto simp add: setle_def) - {from Sup[OF Se b, unfolded onorm_def[symmetric]] - show "norm (f x) <= onorm f * norm x" - apply - - apply (rule spec[where x = x]) - unfolding norm_bound_generalize[OF lf, symmetric] - by (auto simp add: isLub_def isUb_def leastP_def setge_def setle_def)} - { - show "\x. norm (f x) <= b * norm x \ onorm f <= b" - using Sup[OF Se b, unfolded onorm_def[symmetric]] - unfolding norm_bound_generalize[OF lf, symmetric] - by (auto simp add: isLub_def isUb_def leastP_def setge_def setle_def)} - } -qed - -lemma onorm_pos_le: assumes lf: "linear (f::real ^'n \ real ^'m)" shows "0 <= onorm f" - using order_trans[OF norm_ge_zero onorm(1)[OF lf, of "basis arbitrary"], unfolded norm_basis] by simp - -lemma onorm_eq_0: assumes lf: "linear (f::real ^'n \ real ^'m)" - shows "onorm f = 0 \ (\x. f x = 0)" - using onorm[OF lf] - apply (auto simp add: onorm_pos_le) - apply atomize - apply (erule allE[where x="0::real"]) - using onorm_pos_le[OF lf] - apply arith - done - -lemma onorm_const: "onorm(\x::real^'n. (y::real ^'m)) = norm y" -proof- - let ?f = "\x::real^'n. (y::real ^ 'm)" - have th: "{norm (?f x)| x. norm x = 1} = {norm y}" - by(auto intro: vector_choose_size set_ext) - show ?thesis - unfolding onorm_def th - apply (rule Sup_unique) by (simp_all add: setle_def) -qed - -lemma onorm_pos_lt: assumes lf: "linear (f::real ^ 'n \ real ^'m)" - shows "0 < onorm f \ ~(\x. f x = 0)" - unfolding onorm_eq_0[OF lf, symmetric] - using onorm_pos_le[OF lf] by arith - -lemma onorm_compose: - assumes lf: "linear (f::real ^'n \ real ^'m)" - and lg: "linear (g::real^'k \ real^'n)" - shows "onorm (f o g) <= onorm f * onorm g" - apply (rule onorm(2)[OF linear_compose[OF lg lf], rule_format]) - unfolding o_def - apply (subst mult_assoc) - apply (rule order_trans) - apply (rule onorm(1)[OF lf]) - apply (rule mult_mono1) - apply (rule onorm(1)[OF lg]) - apply (rule onorm_pos_le[OF lf]) - done - -lemma onorm_neg_lemma: assumes lf: "linear (f::real ^'n \ real^'m)" - shows "onorm (\x. - f x) \ onorm f" - using onorm[OF linear_compose_neg[OF lf]] onorm[OF lf] - unfolding norm_minus_cancel by metis - -lemma onorm_neg: assumes lf: "linear (f::real ^'n \ real^'m)" - shows "onorm (\x. - f x) = onorm f" - using onorm_neg_lemma[OF lf] onorm_neg_lemma[OF linear_compose_neg[OF lf]] - by simp - -lemma onorm_triangle: - assumes lf: "linear (f::real ^'n \ real ^'m)" and lg: "linear g" - shows "onorm (\x. f x + g x) <= onorm f + onorm g" - apply(rule onorm(2)[OF linear_compose_add[OF lf lg], rule_format]) - apply (rule order_trans) - apply (rule norm_triangle_ineq) - apply (simp add: distrib) - apply (rule add_mono) - apply (rule onorm(1)[OF lf]) - apply (rule onorm(1)[OF lg]) - done - -lemma onorm_triangle_le: "linear (f::real ^'n \ real ^'m) \ linear g \ onorm(f) + onorm(g) <= e - \ onorm(\x. f x + g x) <= e" - apply (rule order_trans) - apply (rule onorm_triangle) - apply assumption+ - done - -lemma onorm_triangle_lt: "linear (f::real ^'n \ real ^'m) \ linear g \ onorm(f) + onorm(g) < e - ==> onorm(\x. f x + g x) < e" - apply (rule order_le_less_trans) - apply (rule onorm_triangle) - by assumption+ - -(* "lift" from 'a to 'a^1 and "drop" from 'a^1 to 'a -- FIXME: potential use of transfer *) - -abbreviation vec1:: "'a \ 'a ^ 1" where "vec1 x \ vec x" - -abbreviation dest_vec1:: "'a ^1 \ 'a" - where "dest_vec1 x \ (x$1)" - -lemma vec1_component[simp]: "(vec1 x)$1 = x" - by (simp add: ) - -lemma vec1_dest_vec1[simp]: "vec1(dest_vec1 x) = x" "dest_vec1(vec1 y) = y" - by (simp_all add: Cart_eq forall_1) - -lemma forall_vec1: "(\x. P x) \ (\x. P (vec1 x))" by (metis vec1_dest_vec1) - -lemma exists_vec1: "(\x. P x) \ (\x. P(vec1 x))" by (metis vec1_dest_vec1) - -lemma vec1_eq[simp]: "vec1 x = vec1 y \ x = y" by (metis vec1_dest_vec1) - -lemma dest_vec1_eq[simp]: "dest_vec1 x = dest_vec1 y \ x = y" by (metis vec1_dest_vec1) - lemma vec_in_image_vec: "vec x \ (vec ` S) \ x \ S" by auto lemma vec_add: "vec(x + y) = vec x + vec y" by (vector vec_def) @@ -2241,9 +1460,6 @@ lemma vec_cmul: "vec(c* x) = c *s vec x " by (vector vec_def) lemma vec_neg: "vec(- x) = - vec x " by (vector vec_def) -lemma range_vec1[simp]:"range vec1 = UNIV" apply(rule set_ext,rule) unfolding image_iff defer - apply(rule_tac x="dest_vec1 x" in bexI) by auto - lemma vec_setsum: assumes fS: "finite S" shows "vec(setsum f S) = setsum (vec o f) S" apply (induct rule: finite_induct[OF fS]) @@ -2251,141 +1467,6 @@ apply (auto simp add: vec_add) done -lemma dest_vec1_lambda: "dest_vec1(\ i. x i) = x 1" - by (simp) - -lemma dest_vec1_vec: "dest_vec1(vec x) = x" - by (simp) - -lemma dest_vec1_sum: assumes fS: "finite S" - shows "dest_vec1(setsum f S) = setsum (dest_vec1 o f) S" - apply (induct rule: finite_induct[OF fS]) - apply (simp add: dest_vec1_vec) - apply (auto simp add:vector_minus_component) - done - -lemma norm_vec1: "norm(vec1 x) = abs(x)" - by (simp add: vec_def norm_real) - -lemma dist_vec1: "dist(vec1 x) (vec1 y) = abs(x - y)" - by (simp only: dist_real vec1_component) -lemma abs_dest_vec1: "norm x = \dest_vec1 x\" - by (metis vec1_dest_vec1 norm_vec1) - -lemmas vec1_dest_vec1_simps = forall_vec1 vec_add[THEN sym] dist_vec1 vec_sub[THEN sym] vec1_dest_vec1 norm_vec1 vector_smult_component - vec1_eq vec_cmul[THEN sym] smult_conv_scaleR[THEN sym] o_def dist_real_def norm_vec1 real_norm_def - -lemma bounded_linear_vec1:"bounded_linear (vec1::real\real^1)" - unfolding bounded_linear_def additive_def bounded_linear_axioms_def - unfolding smult_conv_scaleR[THEN sym] unfolding vec1_dest_vec1_simps - apply(rule conjI) defer apply(rule conjI) defer apply(rule_tac x=1 in exI) by auto - -lemma linear_vmul_dest_vec1: - fixes f:: "'a::semiring_1^_ \ 'a^1" - shows "linear f \ linear (\x. dest_vec1(f x) *s v)" - apply (rule linear_vmul_component) - by auto - -lemma linear_from_scalars: - assumes lf: "linear (f::'a::comm_ring_1 ^1 \ 'a^_)" - shows "f = (\x. dest_vec1 x *s column 1 (matrix f))" - apply (rule ext) - apply (subst matrix_works[OF lf, symmetric]) - apply (auto simp add: Cart_eq matrix_vector_mult_def column_def mult_commute UNIV_1) - done - -lemma linear_to_scalars: assumes lf: "linear (f::real ^'n \ real^1)" - shows "f = (\x. vec1(row 1 (matrix f) \ x))" - apply (rule ext) - apply (subst matrix_works[OF lf, symmetric]) - apply (simp add: Cart_eq matrix_vector_mult_def row_def inner_vector_def mult_commute forall_1) - done - -lemma dest_vec1_eq_0: "dest_vec1 x = 0 \ x = 0" - by (simp add: dest_vec1_eq[symmetric]) - -lemma setsum_scalars: assumes fS: "finite S" - shows "setsum f S = vec1 (setsum (dest_vec1 o f) S)" - unfolding vec_setsum[OF fS] by simp - -lemma dest_vec1_wlog_le: "(\(x::'a::linorder ^ 1) y. P x y \ P y x) \ (\x y. dest_vec1 x <= dest_vec1 y ==> P x y) \ P x y" - apply (cases "dest_vec1 x \ dest_vec1 y") - apply simp - apply (subgoal_tac "dest_vec1 y \ dest_vec1 x") - apply (auto) - done - -text{* Pasting vectors. *} - -lemma linear_fstcart[intro]: "linear fstcart" - by (auto simp add: linear_def Cart_eq) - -lemma linear_sndcart[intro]: "linear sndcart" - by (auto simp add: linear_def Cart_eq) - -lemma fstcart_vec[simp]: "fstcart(vec x) = vec x" - by (simp add: Cart_eq) - -lemma fstcart_add[simp]:"fstcart(x + y) = fstcart (x::'a::{plus,times}^('b::finite + 'c::finite)) + fstcart y" - by (simp add: Cart_eq) - -lemma fstcart_cmul[simp]:"fstcart(c*s x) = c*s fstcart (x::'a::{plus,times}^('b::finite + 'c::finite))" - by (simp add: Cart_eq) - -lemma fstcart_neg[simp]:"fstcart(- x) = - fstcart (x::'a::ring_1^(_ + _))" - by (simp add: Cart_eq) - -lemma fstcart_sub[simp]:"fstcart(x - y) = fstcart (x::'a::ring_1^(_ + _)) - fstcart y" - by (simp add: Cart_eq) - -lemma fstcart_setsum: - fixes f:: "'d \ 'a::semiring_1^_" - assumes fS: "finite S" - shows "fstcart (setsum f S) = setsum (\i. fstcart (f i)) S" - by (induct rule: finite_induct[OF fS], simp_all add: vec_0[symmetric] del: vec_0) - -lemma sndcart_vec[simp]: "sndcart(vec x) = vec x" - by (simp add: Cart_eq) - -lemma sndcart_add[simp]:"sndcart(x + y) = sndcart (x::'a::{plus,times}^(_ + _)) + sndcart y" - by (simp add: Cart_eq) - -lemma sndcart_cmul[simp]:"sndcart(c*s x) = c*s sndcart (x::'a::{plus,times}^(_ + _))" - by (simp add: Cart_eq) - -lemma sndcart_neg[simp]:"sndcart(- x) = - sndcart (x::'a::ring_1^(_ + _))" - by (simp add: Cart_eq) - -lemma sndcart_sub[simp]:"sndcart(x - y) = sndcart (x::'a::ring_1^(_ + _)) - sndcart y" - by (simp add: Cart_eq) - -lemma sndcart_setsum: - fixes f:: "'d \ 'a::semiring_1^_" - assumes fS: "finite S" - shows "sndcart (setsum f S) = setsum (\i. sndcart (f i)) S" - by (induct rule: finite_induct[OF fS], simp_all add: vec_0[symmetric] del: vec_0) - -lemma pastecart_vec[simp]: "pastecart (vec x) (vec x) = vec x" - by (simp add: pastecart_eq fstcart_pastecart sndcart_pastecart) - -lemma pastecart_add[simp]:"pastecart (x1::'a::{plus,times}^_) y1 + pastecart x2 y2 = pastecart (x1 + x2) (y1 + y2)" - by (simp add: pastecart_eq fstcart_pastecart sndcart_pastecart) - -lemma pastecart_cmul[simp]: "pastecart (c *s (x1::'a::{plus,times}^_)) (c *s y1) = c *s pastecart x1 y1" - by (simp add: pastecart_eq fstcart_pastecart sndcart_pastecart) - -lemma pastecart_neg[simp]: "pastecart (- (x::'a::ring_1^_)) (- y) = - pastecart x y" - unfolding vector_sneg_minus1 pastecart_cmul .. - -lemma pastecart_sub: "pastecart (x1::'a::ring_1^_) y1 - pastecart x2 y2 = pastecart (x1 - x2) (y1 - y2)" - by (simp add: diff_def pastecart_neg[symmetric] del: pastecart_neg) - -lemma pastecart_setsum: - fixes f:: "'d \ 'a::semiring_1^_" - assumes fS: "finite S" - shows "pastecart (setsum f S) (setsum g S) = setsum (\i. pastecart (f i) (g i)) S" - by (simp add: pastecart_eq fstcart_setsum[OF fS] sndcart_setsum[OF fS] fstcart_pastecart sndcart_pastecart) - lemma setsum_Plus: "\finite A; finite B\ \ (\x\A <+> B. g x) = (\x\A. g (Inl x)) + (\x\B. g (Inr x))" @@ -2399,39 +1480,6 @@ apply (rule setsum_Plus [OF finite finite]) done -lemma norm_fstcart: "norm(fstcart x) <= norm (x::real ^('n::finite + 'm::finite))" -proof- - have th0: "norm x = norm (pastecart (fstcart x) (sndcart x))" - by (simp add: pastecart_fst_snd) - have th1: "fstcart x \ fstcart x \ pastecart (fstcart x) (sndcart x) \ pastecart (fstcart x) (sndcart x)" - by (simp add: inner_vector_def setsum_UNIV_sum pastecart_def setsum_nonneg) - then show ?thesis - unfolding th0 - unfolding norm_eq_sqrt_inner real_sqrt_le_iff id_def - by (simp add: inner_vector_def) -qed - -lemma dist_fstcart: "dist(fstcart (x::real^_)) (fstcart y) <= dist x y" - unfolding dist_norm by (metis fstcart_sub[symmetric] norm_fstcart) - -lemma norm_sndcart: "norm(sndcart x) <= norm (x::real ^('n::finite + 'm::finite))" -proof- - have th0: "norm x = norm (pastecart (fstcart x) (sndcart x))" - by (simp add: pastecart_fst_snd) - have th1: "sndcart x \ sndcart x \ pastecart (fstcart x) (sndcart x) \ pastecart (fstcart x) (sndcart x)" - by (simp add: inner_vector_def setsum_UNIV_sum pastecart_def setsum_nonneg) - then show ?thesis - unfolding th0 - unfolding norm_eq_sqrt_inner real_sqrt_le_iff id_def - by (simp add: inner_vector_def) -qed - -lemma dist_sndcart: "dist(sndcart (x::real^_)) (sndcart y) <= dist x y" - unfolding dist_norm by (metis sndcart_sub[symmetric] norm_sndcart) - -lemma dot_pastecart: "(pastecart (x1::real^'n) (x2::real^'m)) \ (pastecart y1 y2) = x1 \ y1 + x2 \ y2" - by (simp add: inner_vector_def setsum_UNIV_sum pastecart_def) - text {* TODO: move to NthRoot *} lemma sqrt_add_le_add_sqrt: assumes x: "0 \ x" and y: "0 \ y" @@ -2442,10 +1490,6 @@ apply (simp add: add_nonneg_nonneg x y) done -lemma norm_pastecart: "norm (pastecart x y) <= norm x + norm y" - unfolding norm_vector_def setL2_def setsum_UNIV_sum - by (simp add: sqrt_add_le_add_sqrt setsum_nonneg) - subsection {* A generic notion of "hull" (convex, affine, conic hull and closure). *} definition hull :: "'a set set \ 'a set \ 'a set" (infixl "hull" 75) where @@ -2519,7 +1563,7 @@ lemma real_arch_inv: "0 < e \ (\n::nat. n \ 0 \ 0 < inverse (real n) \ inverse (real n) < e)" using reals_Archimedean - apply (auto simp add: field_simps inverse_positive_iff_positive) + apply (auto simp add: field_simps) apply (subgoal_tac "inverse (real n) > 0") apply arith apply simp @@ -2534,9 +1578,9 @@ from h have p: "1 \ (1 + x) ^ n" using Suc.prems by simp from h have "1 + real n * x + x \ (1 + x) ^ n + x" by simp also have "\ \ (1 + x) ^ Suc n" apply (subst diff_le_0_iff_le[symmetric]) - apply (simp add: ring_simps) + apply (simp add: field_simps) using mult_left_mono[OF p Suc.prems] by simp - finally show ?case by (simp add: real_of_nat_Suc ring_simps) + finally show ?case by (simp add: real_of_nat_Suc field_simps) qed lemma real_arch_pow: assumes x: "1 < (x::real)" shows "\n. y < x^n" @@ -2602,10 +1646,10 @@ from geometric_sum[OF x1, of "Suc n", unfolded x1'] have "(- (1 - x)) * setsum (\i. x^i) {0 .. n} = - (1 - x^(Suc n))" unfolding atLeastLessThanSuc_atLeastAtMost - using x1' apply (auto simp only: field_eq_simps) - apply (simp add: ring_simps) + using x1' apply (auto simp only: field_simps) + apply (simp add: field_simps) done - then have ?thesis by (simp add: ring_simps) } + then have ?thesis by (simp add: field_simps) } ultimately show ?thesis by metis qed @@ -2624,7 +1668,7 @@ from setsum_reindex[OF i, of "op ^ x", unfolded f th setsum_right_distrib[symmetric]] have "?lhs = x^m * ((1 - x) * setsum (op ^ x) {0..n - m})" by simp then show ?thesis unfolding sum_gp_basic using mn - by (simp add: ring_simps power_add[symmetric]) + by (simp add: field_simps power_add[symmetric]) qed lemma sum_gp: "setsum (op ^ (x::'a::{field})) {m .. n} = @@ -2637,7 +1681,7 @@ {assume x: "x = 1" hence ?thesis by simp} moreover {assume x: "x \ 1" hence nz: "1 - x \ 0" by simp - from sum_gp_multiplied[OF nm, of x] nz have ?thesis by (simp add: field_eq_simps)} + from sum_gp_multiplied[OF nm, of x] nz have ?thesis by (simp add: field_simps)} ultimately have ?thesis by metis } ultimately show ?thesis by metis @@ -2646,12 +1690,15 @@ lemma sum_gp_offset: "setsum (op ^ (x::'a::{field})) {m .. m+n} = (if x = 1 then of_nat n + 1 else x^m * (1 - x^Suc n) / (1 - x))" unfolding sum_gp[of x m "m + n"] power_Suc - by (simp add: ring_simps power_add) + by (simp add: field_simps power_add) subsection{* A bit of linear algebra. *} -definition "subspace S \ 0 \ S \ (\x\ S. \y \S. x + y \ S) \ (\c. \x \S. c *s x \S )" +definition + subspace :: "'a::real_vector set \ bool" where + "subspace S \ 0 \ S \ (\x\ S. \y \S. x + y \ S) \ (\c. \x \S. c *\<^sub>R x \S )" + definition "span S = (subspace hull S)" definition "dependent S \ (\a \ S. a \ span(S - {a}))" abbreviation "independent s == ~(dependent s)" @@ -2665,13 +1712,13 @@ lemma subspace_add: "subspace S \ x \ S \ y \ S ==> x + y \ S" by (metis subspace_def) -lemma subspace_mul: "subspace S \ x \ S \ c *s x \ S" +lemma subspace_mul: "subspace S \ x \ S \ c *\<^sub>R x \ S" by (metis subspace_def) -lemma subspace_neg: "subspace S \ (x::'a::ring_1^_) \ S \ - x \ S" - by (metis vector_sneg_minus1 subspace_mul) - -lemma subspace_sub: "subspace S \ (x::'a::ring_1^_) \ S \ y \ S \ x - y \ S" +lemma subspace_neg: "subspace S \ x \ S \ - x \ S" + by (metis scaleR_minus1_left subspace_mul) + +lemma subspace_sub: "subspace S \ x \ S \ y \ S \ x - y \ S" by (metis diff_def subspace_add subspace_neg) lemma subspace_setsum: @@ -2683,19 +1730,19 @@ by (simp add: subspace_def sA, auto simp add: sA subspace_add) lemma subspace_linear_image: - assumes lf: "linear (f::'a::semiring_1^_ \ _)" and sS: "subspace S" + assumes lf: "linear f" and sS: "subspace S" shows "subspace(f ` S)" using lf sS linear_0[OF lf] unfolding linear_def subspace_def apply (auto simp add: image_iff) apply (rule_tac x="x + y" in bexI, auto) - apply (rule_tac x="c*s x" in bexI, auto) + apply (rule_tac x="c *\<^sub>R x" in bexI, auto) done -lemma subspace_linear_preimage: "linear (f::'a::semiring_1^_ \ _) ==> subspace S ==> subspace {x. f x \ S}" +lemma subspace_linear_preimage: "linear f ==> subspace S ==> subspace {x. f x \ S}" by (auto simp add: subspace_def linear_def linear_0[of f]) -lemma subspace_trivial: "subspace {0::'a::semiring_1 ^_}" +lemma subspace_trivial: "subspace {0}" by (simp add: subspace_def) lemma subspace_inter: "subspace A \ subspace B ==> subspace (A \ B)" @@ -2731,8 +1778,9 @@ "a \ S ==> a \ span S" "0 \ span S" "x\ span S \ y \ span S ==> x + y \ span S" - "x \ span S \ c *s x \ span S" - by (metis span_def hull_subset subset_eq subspace_span subspace_def)+ + "x \ span S \ c *\<^sub>R x \ span S" + by (metis span_def hull_subset subset_eq) + (metis subspace_span subspace_def)+ lemma span_induct: assumes SP: "\x. x \ S ==> P x" and P: "subspace P" and x: "x \ span S" shows "P x" @@ -2743,11 +1791,11 @@ show "P x" by (metis mem_def subset_eq) qed -lemma span_empty: "span {} = {(0::'a::semiring_0 ^ _)}" +lemma span_empty: "span {} = {0}" apply (simp add: span_def) apply (rule hull_unique) apply (auto simp add: mem_def subspace_def) - unfolding mem_def[of "0::'a^_", symmetric] + unfolding mem_def[of "0::'a", symmetric] apply simp done @@ -2769,15 +1817,15 @@ and P: "subspace P" shows "\x \ span S. P x" using span_induct SP P by blast -inductive span_induct_alt_help for S:: "'a::semiring_1^_ \ bool" +inductive span_induct_alt_help for S:: "'a::real_vector \ bool" where span_induct_alt_help_0: "span_induct_alt_help S 0" - | span_induct_alt_help_S: "x \ S \ span_induct_alt_help S z \ span_induct_alt_help S (c *s x + z)" + | span_induct_alt_help_S: "x \ S \ span_induct_alt_help S z \ span_induct_alt_help S (c *\<^sub>R x + z)" lemma span_induct_alt': - assumes h0: "h (0::'a::semiring_1^'n)" and hS: "\c x y. x \ S \ h y \ h (c*s x + y)" shows "\x \ span S. h x" + assumes h0: "h 0" and hS: "\c x y. x \ S \ h y \ h (c *\<^sub>R x + y)" shows "\x \ span S. h x" proof- - {fix x:: "'a^'n" assume x: "span_induct_alt_help S x" + {fix x:: "'a" assume x: "span_induct_alt_help S x" have "h x" apply (rule span_induct_alt_help.induct[OF x]) apply (rule h0) @@ -2808,10 +1856,10 @@ done} moreover {fix c x assume xt: "span_induct_alt_help S x" - then have "span_induct_alt_help S (c*s x)" + then have "span_induct_alt_help S (c *\<^sub>R x)" apply (induct rule: span_induct_alt_help.induct) apply (simp add: span_induct_alt_help_0) - apply (simp add: vector_smult_assoc vector_add_ldistrib) + apply (simp add: scaleR_right_distrib) apply (rule span_induct_alt_help_S) apply assumption apply simp @@ -2824,40 +1872,39 @@ qed lemma span_induct_alt: - assumes h0: "h (0::'a::semiring_1^'n)" and hS: "\c x y. x \ S \ h y \ h (c*s x + y)" and x: "x \ span S" + assumes h0: "h 0" and hS: "\c x y. x \ S \ h y \ h (c *\<^sub>R x + y)" and x: "x \ span S" shows "h x" using span_induct_alt'[of h S] h0 hS x by blast (* Individual closure properties. *) -lemma span_superset: "x \ S ==> x \ span S" by (metis span_clauses) +lemma span_superset: "x \ S ==> x \ span S" by (metis span_clauses(1)) lemma span_0: "0 \ span S" by (metis subspace_span subspace_0) lemma span_add: "x \ span S \ y \ span S ==> x + y \ span S" by (metis subspace_add subspace_span) -lemma span_mul: "x \ span S ==> (c *s x) \ span S" +lemma span_mul: "x \ span S ==> (c *\<^sub>R x) \ span S" by (metis subspace_span subspace_mul) -lemma span_neg: "x \ span S ==> - (x::'a::ring_1^_) \ span S" +lemma span_neg: "x \ span S ==> - x \ span S" by (metis subspace_neg subspace_span) -lemma span_sub: "(x::'a::ring_1^_) \ span S \ y \ span S ==> x - y \ span S" +lemma span_sub: "x \ span S \ y \ span S ==> x - y \ span S" by (metis subspace_span subspace_sub) lemma span_setsum: "finite A \ \x \ A. f x \ span S ==> setsum f A \ span S" - apply (rule subspace_setsum) - by (metis subspace_span subspace_setsum)+ - -lemma span_add_eq: "(x::'a::ring_1^_) \ span S \ x + y \ span S \ y \ span S" + by (rule subspace_setsum, rule subspace_span) + +lemma span_add_eq: "x \ span S \ x + y \ span S \ y \ span S" apply (auto simp only: span_add span_sub) apply (subgoal_tac "(x + y) - x \ span S", simp) by (simp only: span_add span_sub) (* Mapping under linear image. *) -lemma span_linear_image: assumes lf: "linear (f::'a::semiring_1 ^ _ => _)" +lemma span_linear_image: assumes lf: "linear f" shows "span (f ` S) = f ` (span S)" proof- {fix x @@ -2890,8 +1937,8 @@ (* The key breakdown property. *) lemma span_breakdown: - assumes bS: "(b::'a::ring_1 ^ _) \ S" and aS: "a \ span S" - shows "\k. a - k*s b \ span (S - {b})" (is "?P a") + assumes bS: "b \ S" and aS: "a \ span S" + shows "\k. a - k *\<^sub>R b \ span (S - {b})" (is "?P a") proof- {fix x assume xS: "x \ S" {assume ab: "x = b" @@ -2916,23 +1963,23 @@ apply (simp add: mem_def) apply (clarsimp simp add: mem_def) apply (rule_tac x="k + ka" in exI) - apply (subgoal_tac "x + y - (k + ka) *s b = (x - k*s b) + (y - ka *s b)") + apply (subgoal_tac "x + y - (k + ka) *\<^sub>R b = (x - k*\<^sub>R b) + (y - ka *\<^sub>R b)") apply (simp only: ) apply (rule span_add[unfolded mem_def]) apply assumption+ - apply (vector ring_simps) + apply (simp add: algebra_simps) apply (clarsimp simp add: mem_def) apply (rule_tac x= "c*k" in exI) - apply (subgoal_tac "c *s x - (c * k) *s b = c*s (x - k*s b)") + apply (subgoal_tac "c *\<^sub>R x - (c * k) *\<^sub>R b = c*\<^sub>R (x - k*\<^sub>R b)") apply (simp only: ) apply (rule span_mul[unfolded mem_def]) apply assumption - by (vector ring_simps) + by (simp add: algebra_simps) ultimately show "?P a" using aS span_induct[where S=S and P= "?P"] by metis qed lemma span_breakdown_eq: - "(x::'a::ring_1^_) \ span (insert a S) \ (\k. (x - k *s a) \ span S)" (is "?lhs \ ?rhs") + "x \ span (insert a S) \ (\k. (x - k *\<^sub>R a) \ span S)" (is "?lhs \ ?rhs") proof- {assume x: "x \ span (insert a S)" from x span_breakdown[of "a" "insert a S" "x"] @@ -2944,9 +1991,9 @@ apply blast done} moreover - { fix k assume k: "x - k *s a \ span S" - have eq: "x = (x - k *s a) + k *s a" by vector - have "(x - k *s a) + k *s a \ span (insert a S)" + { fix k assume k: "x - k *\<^sub>R a \ span S" + have eq: "x = (x - k *\<^sub>R a) + k *\<^sub>R a" by vector + have "(x - k *\<^sub>R a) + k *\<^sub>R a \ span (insert a S)" apply (rule span_add) apply (rule set_rev_mp[of _ "span S" _]) apply (rule k) @@ -2963,11 +2010,11 @@ (* Hence some "reversal" results.*) lemma in_span_insert: - assumes a: "(a::'a::field^_) \ span (insert b S)" and na: "a \ span S" + assumes a: "a \ span (insert b S)" and na: "a \ span S" shows "b \ span (insert a S)" proof- from span_breakdown[of b "insert b S" a, OF insertI1 a] - obtain k where k: "a - k*s b \ span (S - {b})" by auto + obtain k where k: "a - k*\<^sub>R b \ span (S - {b})" by auto {assume k0: "k = 0" with k have "a \ span S" apply (simp) @@ -2979,12 +2026,12 @@ with na have ?thesis by blast} moreover {assume k0: "k \ 0" - have eq: "b = (1/k) *s a - ((1/k) *s a - b)" by vector - from k0 have eq': "(1/k) *s (a - k*s b) = (1/k) *s a - b" - by (vector field_simps) - from k have "(1/k) *s (a - k*s b) \ span (S - {b})" + have eq: "b = (1/k) *\<^sub>R a - ((1/k) *\<^sub>R a - b)" by vector + from k0 have eq': "(1/k) *\<^sub>R (a - k*\<^sub>R b) = (1/k) *\<^sub>R a - b" + by (simp add: algebra_simps) + from k have "(1/k) *\<^sub>R (a - k*\<^sub>R b) \ span (S - {b})" by (rule span_mul) - hence th: "(1/k) *s a - b \ span (S - {b})" + hence th: "(1/k) *\<^sub>R a - b \ span (S - {b})" unfolding eq' . from k @@ -3002,7 +2049,7 @@ qed lemma in_span_delete: - assumes a: "(a::'a::field^_) \ span S" + assumes a: "a \ span S" and na: "a \ span (S-{b})" shows "b \ span (insert a (S - {b}))" apply (rule in_span_insert) @@ -3016,12 +2063,12 @@ (* Transitivity property. *) lemma span_trans: - assumes x: "(x::'a::ring_1^_) \ span S" and y: "y \ span (insert x S)" + assumes x: "x \ span S" and y: "y \ span (insert x S)" shows "y \ span S" proof- from span_breakdown[of x "insert x S" y, OF insertI1 y] - obtain k where k: "y -k*s x \ span (S - {x})" by auto - have eq: "y = (y - k *s x) + k *s x" by vector + obtain k where k: "y -k*\<^sub>R x \ span (S - {x})" by auto + have eq: "y = (y - k *\<^sub>R x) + k *\<^sub>R x" by vector show ?thesis apply (subst eq) apply (rule span_add) @@ -3038,11 +2085,11 @@ (* ------------------------------------------------------------------------- *) lemma span_explicit: - "span P = {y::'a::semiring_1^_. \S u. finite S \ S \ P \ setsum (\v. u v *s v) S = y}" + "span P = {y. \S u. finite S \ S \ P \ setsum (\v. u v *\<^sub>R v) S = y}" (is "_ = ?E" is "_ = {y. ?h y}" is "_ = {y. \S u. ?Q S u y}") proof- {fix x assume x: "x \ ?E" - then obtain S u where fS: "finite S" and SP: "S\P" and u: "setsum (\v. u v *s v) S = x" + then obtain S u where fS: "finite S" and SP: "S\P" and u: "setsum (\v. u v *\<^sub>R v) S = x" by blast have "x \ span P" unfolding u[symmetric] @@ -3059,7 +2106,7 @@ fix c x y assume x: "x \ P" and hy: "?h y" from hy obtain S u where fS: "finite S" and SP: "S\P" - and u: "setsum (\v. u v *s v) S = y" by blast + and u: "setsum (\v. u v *\<^sub>R v) S = y" by blast let ?S = "insert x S" let ?u = "\y. if y = x then (if x \ S then u y + c else c) else u y" @@ -3067,28 +2114,28 @@ {assume xS: "x \ S" have S1: "S = (S - {x}) \ {x}" and Sss:"finite (S - {x})" "finite {x}" "(S -{x}) \ {x} = {}" using xS fS by auto - have "setsum (\v. ?u v *s v) ?S =(\v\S - {x}. u v *s v) + (u x + c) *s x" + have "setsum (\v. ?u v *\<^sub>R v) ?S =(\v\S - {x}. u v *\<^sub>R v) + (u x + c) *\<^sub>R x" using xS by (simp add: setsum_Un_disjoint[OF Sss, unfolded S1[symmetric]] setsum_clauses(2)[OF fS] cong del: if_weak_cong) - also have "\ = (\v\S. u v *s v) + c *s x" + also have "\ = (\v\S. u v *\<^sub>R v) + c *\<^sub>R x" apply (simp add: setsum_Un_disjoint[OF Sss, unfolded S1[symmetric]]) - by (vector ring_simps) - also have "\ = c*s x + y" + by (simp add: algebra_simps) + also have "\ = c*\<^sub>R x + y" by (simp add: add_commute u) - finally have "setsum (\v. ?u v *s v) ?S = c*s x + y" . - then have "?Q ?S ?u (c*s x + y)" using th0 by blast} + finally have "setsum (\v. ?u v *\<^sub>R v) ?S = c*\<^sub>R x + y" . + then have "?Q ?S ?u (c*\<^sub>R x + y)" using th0 by blast} moreover {assume xS: "x \ S" - have th00: "(\v\S. (if v = x then c else u v) *s v) = y" + have th00: "(\v\S. (if v = x then c else u v) *\<^sub>R v) = y" unfolding u[symmetric] apply (rule setsum_cong2) using xS by auto - have "?Q ?S ?u (c*s x + y)" using fS xS th0 + have "?Q ?S ?u (c*\<^sub>R x + y)" using fS xS th0 by (simp add: th00 setsum_clauses add_commute cong del: if_weak_cong)} - ultimately have "?Q ?S ?u (c*s x + y)" + ultimately have "?Q ?S ?u (c*\<^sub>R x + y)" by (cases "x \ S", simp, simp) - then show "?h (c*s x + y)" + then show "?h (c*\<^sub>R x + y)" apply - apply (rule exI[where x="?S"]) apply (rule exI[where x="?u"]) by metis @@ -3097,20 +2144,20 @@ qed lemma dependent_explicit: - "dependent P \ (\S u. finite S \ S \ P \ (\(v::'a::{idom,field}^_) \S. u v \ 0 \ setsum (\v. u v *s v) S = 0))" (is "?lhs = ?rhs") + "dependent P \ (\S u. finite S \ S \ P \ (\v\S. u v \ 0 \ setsum (\v. u v *\<^sub>R v) S = 0))" (is "?lhs = ?rhs") proof- {assume dP: "dependent P" then obtain a S u where aP: "a \ P" and fS: "finite S" - and SP: "S \ P - {a}" and ua: "setsum (\v. u v *s v) S = a" + and SP: "S \ P - {a}" and ua: "setsum (\v. u v *\<^sub>R v) S = a" unfolding dependent_def span_explicit by blast let ?S = "insert a S" let ?u = "\y. if y = a then - 1 else u y" let ?v = a from aP SP have aS: "a \ S" by blast from fS SP aP have th0: "finite ?S" "?S \ P" "?v \ ?S" "?u ?v \ 0" by auto - have s0: "setsum (\v. ?u v *s v) ?S = 0" + have s0: "setsum (\v. ?u v *\<^sub>R v) ?S = 0" using fS aS - apply (simp add: vector_smult_lneg vector_smult_lid setsum_clauses ring_simps ) + apply (simp add: vector_smult_lneg setsum_clauses field_simps) apply (subst (2) ua[symmetric]) apply (rule setsum_cong2) by auto @@ -3122,47 +2169,47 @@ moreover {fix S u v assume fS: "finite S" and SP: "S \ P" and vS: "v \ S" and uv: "u v \ 0" - and u: "setsum (\v. u v *s v) S = 0" + and u: "setsum (\v. u v *\<^sub>R v) S = 0" let ?a = v let ?S = "S - {v}" let ?u = "\i. (- u i) / u v" have th0: "?a \ P" "finite ?S" "?S \ P" using fS SP vS by auto - have "setsum (\v. ?u v *s v) ?S = setsum (\v. (- (inverse (u ?a))) *s (u v *s v)) S - ?u v *s v" + have "setsum (\v. ?u v *\<^sub>R v) ?S = setsum (\v. (- (inverse (u ?a))) *\<^sub>R (u v *\<^sub>R v)) S - ?u v *\<^sub>R v" using fS vS uv by (simp add: setsum_diff1 vector_smult_lneg divide_inverse vector_smult_assoc field_simps) also have "\ = ?a" - unfolding setsum_cmul u - using uv by (simp add: vector_smult_lneg) - finally have "setsum (\v. ?u v *s v) ?S = ?a" . + unfolding scaleR_right.setsum [symmetric] u + using uv by simp + finally have "setsum (\v. ?u v *\<^sub>R v) ?S = ?a" . with th0 have ?lhs unfolding dependent_def span_explicit apply - apply (rule bexI[where x= "?a"]) - apply simp_all + apply (simp_all del: scaleR_minus_left) apply (rule exI[where x= "?S"]) - by auto} + by (auto simp del: scaleR_minus_left)} ultimately show ?thesis by blast qed lemma span_finite: assumes fS: "finite S" - shows "span S = {(y::'a::semiring_1^_). \u. setsum (\v. u v *s v) S = y}" + shows "span S = {y. \u. setsum (\v. u v *\<^sub>R v) S = y}" (is "_ = ?rhs") proof- {fix y assume y: "y \ span S" from y obtain S' u where fS': "finite S'" and SS': "S' \ S" and - u: "setsum (\v. u v *s v) S' = y" unfolding span_explicit by blast + u: "setsum (\v. u v *\<^sub>R v) S' = y" unfolding span_explicit by blast let ?u = "\x. if x \ S' then u x else 0" - from setsum_restrict_set[OF fS, of "\v. u v *s v" S', symmetric] SS' - have "setsum (\v. ?u v *s v) S = setsum (\v. u v *s v) S'" + from setsum_restrict_set[OF fS, of "\v. u v *\<^sub>R v" S', symmetric] SS' + have "setsum (\v. ?u v *\<^sub>R v) S = setsum (\v. u v *\<^sub>R v) S'" unfolding cond_value_iff cond_application_beta by (simp add: cond_value_iff inf_absorb2 cong del: if_weak_cong) - hence "setsum (\v. ?u v *s v) S = y" by (metis u) + hence "setsum (\v. ?u v *\<^sub>R v) S = y" by (metis u) hence "y \ ?rhs" by auto} moreover - {fix y u assume u: "setsum (\v. u v *s v) S = y" + {fix y u assume u: "setsum (\v. u v *\<^sub>R v) S = y" then have "y \ span S" using fS unfolding span_explicit by auto} ultimately show ?thesis by blast qed @@ -3170,10 +2217,10 @@ (* Standard bases are a spanning set, and obviously finite. *) -lemma span_stdbasis:"span {basis i :: 'a::ring_1^'n | i. i \ (UNIV :: 'n set)} = UNIV" +lemma span_stdbasis:"span {basis i :: real^'n | i. i \ (UNIV :: 'n set)} = UNIV" apply (rule set_ext) apply auto -apply (subst basis_expansion[symmetric]) +apply (subst basis_expansion'[symmetric]) apply (rule span_setsum) apply simp apply auto @@ -3196,14 +2243,14 @@ lemma independent_stdbasis_lemma: - assumes x: "(x::'a::semiring_1 ^ _) \ span (basis ` S)" + assumes x: "(x::real ^ 'n) \ span (basis ` S)" and iS: "i \ S" shows "(x$i) = 0" proof- let ?U = "UNIV :: 'n set" let ?B = "basis ` S" - let ?P = "\(x::'a^_). \i\ ?U. i \ S \ x$i =0" - {fix x::"'a^_" assume xS: "x\ ?B" + let ?P = "\(x::real^_). \i\ ?U. i \ S \ x$i =0" + {fix x::"real^_" assume xS: "x\ ?B" from xS have "?P x" by auto} moreover have "subspace ?P" @@ -3236,7 +2283,7 @@ (* This is useful for building a basis step-by-step. *) lemma independent_insert: - "independent(insert (a::'a::field ^_) S) \ + "independent(insert a S) \ (if a \ S then independent S else independent S \ a \ span S)" (is "?lhs \ ?rhs") proof- @@ -3285,7 +2332,7 @@ by (metis subset_eq span_superset) lemma spanning_subset_independent: - assumes BA: "B \ A" and iA: "independent (A::('a::field ^_) set)" + assumes BA: "B \ A" and iA: "independent A" and AsB: "A \ span B" shows "A = B" proof @@ -3312,7 +2359,7 @@ (* The general case of the Exchange Lemma, the key to what follows. *) lemma exchange_lemma: - assumes f:"finite (t:: ('a::field^'n) set)" and i: "independent s" + assumes f:"finite t" and i: "independent s" and sp:"s \ span t" shows "\t'. (card t' = card t) \ finite t' \ s \ t' \ t' \ s \ t \ s \ span t'" using f i sp @@ -3388,7 +2435,7 @@ (* This implies corresponding size bounds. *) lemma independent_span_bound: - assumes f: "finite t" and i: "independent (s::('a::field^_) set)" and sp:"s \ span t" + assumes f: "finite t" and i: "independent s" and sp:"s \ span t" shows "finite s \ card s \ card t" by (metis exchange_lemma[OF f i sp] finite_subset card_mono) @@ -3479,7 +2526,7 @@ lemma basis_card_eq_dim: "B \ (V:: (real ^'n) set) \ V \ span B \ independent B \ finite B \ card B = dim V" - by (metis order_eq_iff independent_card_le_dim span_card_ge_dim independent_mono independent_bound) + by (metis order_eq_iff independent_card_le_dim span_card_ge_dim independent_bound) lemma dim_unique: "(B::(real ^'n) set) \ V \ V \ span B \ independent B \ card B = n \ dim V = n" by (metis basis_card_eq_dim) @@ -3571,7 +2618,7 @@ by (metis dim_span) lemma spans_image: - assumes lf: "linear (f::'a::semiring_1^_ \ _)" and VB: "V \ span B" + assumes lf: "linear f" and VB: "V \ span B" shows "f ` V \ span (f ` B)" unfolding span_linear_image[OF lf] by (metis VB image_mono) @@ -3593,7 +2640,7 @@ (* Relation between bases and injectivity/surjectivity of map. *) lemma spanning_surjective_image: - assumes us: "UNIV \ span (S:: ('a::semiring_1 ^_) set)" + assumes us: "UNIV \ span S" and lf: "linear f" and sf: "surj f" shows "UNIV \ span (f ` S)" proof- @@ -3603,7 +2650,7 @@ qed lemma independent_injective_image: - assumes iS: "independent (S::('a::semiring_1^_) set)" and lf: "linear f" and fi: "inj f" + assumes iS: "independent S" and lf: "linear f" and fi: "inj f" shows "independent (f ` S)" proof- {fix a assume a: "a \ S" "f a \ span (f ` S - {f a})" @@ -3638,14 +2685,14 @@ from `\C. finite C \ card C \ card B \ span C = span B \ pairwise orthogonal C` obtain C where C: "finite C" "card C \ card B" "span C = span B" "pairwise orthogonal C" by blast - let ?a = "a - setsum (\x. (x \ a / (x \ x)) *s x) C" + let ?a = "a - setsum (\x. (x \ a / (x \ x)) *\<^sub>R x) C" let ?C = "insert ?a C" from C(1) have fC: "finite ?C" by simp from fB aB C(1,2) have cC: "card ?C \ card (insert a B)" by (simp add: card_insert_if) {fix x k - have th0: "\(a::'b::comm_ring) b c. a - (b - c) = c + (a - b)" by (simp add: ring_simps) - have "x - k *s (a - (\x\C. (x \ a / (x \ x)) *s x)) \ span C \ x - k *s a \ span C" - apply (simp only: vector_ssub_ldistrib th0) + have th0: "\(a::'b::comm_ring) b c. a - (b - c) = c + (a - b)" by (simp add: field_simps) + have "x - k *\<^sub>R (a - (\x\C. (x \ a / (x \ x)) *\<^sub>R x)) \ span C \ x - k *\<^sub>R a \ span C" + apply (simp only: scaleR_right_diff_distrib th0) apply (rule span_add_eq) apply (rule span_mul) apply (rule span_setsum[OF C(1)]) @@ -3669,7 +2716,7 @@ apply (subst Cy) using C(1) fth apply (simp only: setsum_clauses) unfolding smult_conv_scaleR - apply (auto simp add: inner_simps inner_eq_zero_iff inner_commute[of y a] dot_lsum[OF fth]) + apply (auto simp add: inner_simps inner_commute[of y a] dot_lsum[OF fth]) apply (rule setsum_0') apply clarsimp apply (rule C(4)[unfolded pairwise_def orthogonal_def, rule_format]) @@ -3686,7 +2733,7 @@ using C(1) fth apply (simp only: setsum_clauses) unfolding smult_conv_scaleR apply (subst inner_commute[of x]) - apply (auto simp add: inner_simps inner_eq_zero_iff inner_commute[of x a] dot_rsum[OF fth]) + apply (auto simp add: inner_simps inner_commute[of x a] dot_rsum[OF fth]) apply (rule setsum_0') apply clarsimp apply (rule C(4)[unfolded pairwise_def orthogonal_def, rule_format]) @@ -3739,8 +2786,8 @@ from B have fB: "finite B" "card B = dim S" using independent_bound by auto from span_mono[OF B(2)] span_mono[OF B(3)] have sSB: "span S = span B" by (simp add: span_span) - let ?a = "a - setsum (\b. (a \ b / (b \ b)) *s b) B" - have "setsum (\b. (a \ b / (b \ b)) *s b) B \ span S" + let ?a = "a - setsum (\b. (a \ b / (b \ b)) *\<^sub>R b) B" + have "setsum (\b. (a \ b / (b \ b)) *\<^sub>R b) B \ span S" unfolding sSB apply (rule span_setsum[OF fB(1)]) apply clarsimp @@ -3759,10 +2806,10 @@ apply (subst B') using fB fth unfolding setsum_clauses(2)[OF fth] apply simp unfolding inner_simps smult_conv_scaleR - apply (clarsimp simp add: inner_simps inner_eq_zero_iff smult_conv_scaleR dot_lsum) + apply (clarsimp simp add: inner_simps smult_conv_scaleR dot_lsum) apply (rule setsum_0', rule ballI) unfolding inner_commute - by (auto simp add: x field_simps inner_eq_zero_iff intro: B(5)[unfolded pairwise_def orthogonal_def, rule_format])} + by (auto simp add: x field_simps intro: B(5)[unfolded pairwise_def orthogonal_def, rule_format])} then show "\x \ B. ?a \ x = 0" by blast qed with a0 show ?thesis unfolding sSB by (auto intro: exI[where x="?a"]) @@ -3791,7 +2838,7 @@ assumes lf: "linear f" and fB: "finite B" and ifB: "independent (f ` B)" and fi: "inj_on f B" and xsB: "x \ span B" - and fx: "f (x::'a::field^_) = 0" + and fx: "f x = 0" shows "x = 0" using fB ifB fi xsB fx proof(induct arbitrary: x rule: finite_induct[OF fB]) @@ -3807,14 +2854,14 @@ apply (rule subset_inj_on [OF "2.prems"(3)]) by blast from span_breakdown[of a "insert a b", simplified, OF "2.prems"(4)] - obtain k where k: "x - k*s a \ span (b -{a})" by blast - have "f (x - k*s a) \ span (f ` b)" + obtain k where k: "x - k*\<^sub>R a \ span (b -{a})" by blast + have "f (x - k*\<^sub>R a) \ span (f ` b)" unfolding span_linear_image[OF lf] apply (rule imageI) using k span_mono[of "b-{a}" b] by blast - hence "f x - k*s f a \ span (f ` b)" + hence "f x - k*\<^sub>R f a \ span (f ` b)" by (simp add: linear_sub[OF lf] linear_cmul[OF lf]) - hence th: "-k *s f a \ span (f ` b)" + hence th: "-k *\<^sub>R f a \ span (f ` b)" using "2.prems"(5) by (simp add: vector_smult_lneg) {assume k0: "k = 0" from k0 k have "x \ span (b -{a})" by simp @@ -3841,9 +2888,10 @@ (* We can extend a linear mapping from basis. *) lemma linear_independent_extend_lemma: + fixes f :: "'a::real_vector \ 'b::real_vector" assumes fi: "finite B" and ib: "independent B" - shows "\g. (\x\ span B. \y\ span B. g ((x::'a::field^'n) + y) = g x + g y) - \ (\x\ span B. \c. g (c*s x) = c *s g x) + shows "\g. (\x\ span B. \y\ span B. g (x + y) = g x + g y) + \ (\x\ span B. \c. g (c*\<^sub>R x) = c *\<^sub>R g x) \ (\x\ B. g x = f x)" using ib fi proof(induct rule: finite_induct[OF fi]) @@ -3854,30 +2902,30 @@ by (simp_all add: independent_insert) from "2.hyps"(3)[OF ibf] obtain g where g: "\x\span b. \y\span b. g (x + y) = g x + g y" - "\x\span b. \c. g (c *s x) = c *s g x" "\x\b. g x = f x" by blast - let ?h = "\z. SOME k. (z - k *s a) \ span b" + "\x\span b. \c. g (c *\<^sub>R x) = c *\<^sub>R g x" "\x\b. g x = f x" by blast + let ?h = "\z. SOME k. (z - k *\<^sub>R a) \ span b" {fix z assume z: "z \ span (insert a b)" - have th0: "z - ?h z *s a \ span b" + have th0: "z - ?h z *\<^sub>R a \ span b" apply (rule someI_ex) unfolding span_breakdown_eq[symmetric] using z . - {fix k assume k: "z - k *s a \ span b" - have eq: "z - ?h z *s a - (z - k*s a) = (k - ?h z) *s a" - by (simp add: ring_simps vector_sadd_rdistrib[symmetric]) + {fix k assume k: "z - k *\<^sub>R a \ span b" + have eq: "z - ?h z *\<^sub>R a - (z - k*\<^sub>R a) = (k - ?h z) *\<^sub>R a" + by (simp add: field_simps scaleR_left_distrib [symmetric]) from span_sub[OF th0 k] - have khz: "(k - ?h z) *s a \ span b" by (simp add: eq) + have khz: "(k - ?h z) *\<^sub>R a \ span b" by (simp add: eq) {assume "k \ ?h z" hence k0: "k - ?h z \ 0" by simp from k0 span_mul[OF khz, of "1 /(k - ?h z)"] have "a \ span b" by (simp add: vector_smult_assoc) with "2.prems"(1) "2.hyps"(2) have False by (auto simp add: dependent_def)} then have "k = ?h z" by blast} - with th0 have "z - ?h z *s a \ span b \ (\k. z - k *s a \ span b \ k = ?h z)" by blast} + with th0 have "z - ?h z *\<^sub>R a \ span b \ (\k. z - k *\<^sub>R a \ span b \ k = ?h z)" by blast} note h = this - let ?g = "\z. ?h z *s f a + g (z - ?h z *s a)" + let ?g = "\z. ?h z *\<^sub>R f a + g (z - ?h z *\<^sub>R a)" {fix x y assume x: "x \ span (insert a b)" and y: "y \ span (insert a b)" - have tha: "\(x::'a^'n) y a k l. (x + y) - (k + l) *s a = (x - k *s a) + (y - l *s a)" - by (vector ring_simps) + have tha: "\(x::'a) y a k l. (x + y) - (k + l) *\<^sub>R a = (x - k *\<^sub>R a) + (y - l *\<^sub>R a)" + by (simp add: algebra_simps) have addh: "?h (x + y) = ?h x + ?h y" apply (rule conjunct2[OF h, rule_format, symmetric]) apply (rule span_add[OF x y]) @@ -3886,18 +2934,18 @@ have "?g (x + y) = ?g x + ?g y" unfolding addh tha g(1)[rule_format,OF conjunct1[OF h, OF x] conjunct1[OF h, OF y]] - by (simp add: vector_sadd_rdistrib)} + by (simp add: scaleR_left_distrib)} moreover - {fix x:: "'a^'n" and c:: 'a assume x: "x \ span (insert a b)" - have tha: "\(x::'a^'n) c k a. c *s x - (c * k) *s a = c *s (x - k *s a)" - by (vector ring_simps) - have hc: "?h (c *s x) = c * ?h x" + {fix x:: "'a" and c:: real assume x: "x \ span (insert a b)" + have tha: "\(x::'a) c k a. c *\<^sub>R x - (c * k) *\<^sub>R a = c *\<^sub>R (x - k *\<^sub>R a)" + by (simp add: algebra_simps) + have hc: "?h (c *\<^sub>R x) = c * ?h x" apply (rule conjunct2[OF h, rule_format, symmetric]) apply (metis span_mul x) by (metis tha span_mul x conjunct1[OF h]) - have "?g (c *s x) = c*s ?g x" + have "?g (c *\<^sub>R x) = c*\<^sub>R ?g x" unfolding hc tha g(2)[rule_format, OF conjunct1[OF h, OF x]] - by (vector ring_simps)} + by (simp add: algebra_simps)} moreover {fix x assume x: "x \ (insert a b)" {assume xa: "x = a" @@ -3915,7 +2963,7 @@ {assume xb: "x \ b" have h0: "0 = ?h x" apply (rule conjunct2[OF h, rule_format]) - apply (metis span_superset insertI1 xb x) + apply (metis span_superset x) apply simp apply (metis span_superset xb) done @@ -3934,7 +2982,7 @@ from C(2) independent_bound[of C] linear_independent_extend_lemma[of C f] obtain g where g: "(\x\ span C. \y\ span C. g (x + y) = g x + g y) - \ (\x\ span C. \c. g (c*s x) = c *s g x) + \ (\x\ span C. \c. g (c*\<^sub>R x) = c *\<^sub>R g x) \ (\x\ C. g x = f x)" by blast from g show ?thesis unfolding linear_def using C apply clarsimp by blast @@ -4021,14 +3069,14 @@ (* linear functions are equal on a subspace if they are on a spanning set. *) lemma subspace_kernel: - assumes lf: "linear (f::'a::semiring_1 ^_ \ _)" + assumes lf: "linear f" shows "subspace {x. f x = 0}" apply (simp add: subspace_def) by (simp add: linear_add[OF lf] linear_cmul[OF lf] linear_0[OF lf]) lemma linear_eq_0_span: assumes lf: "linear f" and f0: "\x\B. f x = 0" - shows "\x \ span B. f x = (0::'a::semiring_1 ^_)" + shows "\x \ span B. f x = 0" proof fix x assume x: "x \ span B" let ?P = "\x. f x = 0" @@ -4038,11 +3086,11 @@ lemma linear_eq_0: assumes lf: "linear f" and SB: "S \ span B" and f0: "\x\B. f x = 0" - shows "\x \ S. f x = (0::'a::semiring_1^_)" + shows "\x \ S. f x = 0" by (metis linear_eq_0_span[OF lf] subset_eq SB f0) lemma linear_eq: - assumes lf: "linear (f::'a::ring_1^_ \ _)" and lg: "linear g" and S: "S \ span B" + assumes lf: "linear f" and lg: "linear g" and S: "S \ span B" and fg: "\ x\ B. f x = g x" shows "\x\ S. f x = g x" proof- @@ -4053,15 +3101,15 @@ qed lemma linear_eq_stdbasis: - assumes lf: "linear (f::'a::ring_1^'m \ 'a^'n)" and lg: "linear g" + assumes lf: "linear (f::real^'m \ _)" and lg: "linear g" and fg: "\i. f (basis i) = g(basis i)" shows "f = g" proof- let ?U = "UNIV :: 'm set" - let ?I = "{basis i:: 'a^'m|i. i \ ?U}" - {fix x assume x: "x \ (UNIV :: ('a^'m) set)" + let ?I = "{basis i:: real^'m|i. i \ ?U}" + {fix x assume x: "x \ (UNIV :: (real^'m) set)" from equalityD2[OF span_stdbasis] - have IU: " (UNIV :: ('a^'m) set) \ span ?I" by blast + have IU: " (UNIV :: (real^'m) set) \ span ?I" by blast from linear_eq[OF lf lg IU] fg x have "f x = g x" unfolding Collect_def Ball_def mem_def by metis} then show ?thesis by (auto intro: ext) @@ -4070,7 +3118,7 @@ (* Similar results for bilinear functions. *) lemma bilinear_eq: - assumes bf: "bilinear (f:: 'a::ring^_ \ 'a^_ \ 'a^_)" + assumes bf: "bilinear f" and bg: "bilinear g" and SB: "S \ span B" and TC: "T \ span C" and fg: "\x\ B. \y\ C. f x y = g x y" @@ -4098,7 +3146,7 @@ qed lemma bilinear_eq_stdbasis: - assumes bf: "bilinear (f:: 'a::ring_1^'m \ 'a^'n \ 'a^_)" + assumes bf: "bilinear (f:: real^'m \ real^'n \ _)" and bg: "bilinear g" and fg: "\i j. f (basis i) (basis j) = g (basis i) (basis j)" shows "f = g" @@ -4251,6 +3299,7 @@ unfolding y[symmetric] apply (rule span_setsum[OF fU]) apply clarify + unfolding smult_conv_scaleR apply (rule span_mul) apply (rule span_superset) unfolding columns_def @@ -4260,10 +3309,9 @@ {assume h:?rhs let ?P = "\(y::real ^'n). \(x::real^'m). setsum (\i. (x$i) *s column i A) ?U = y" {fix y have "?P y" - proof(rule span_induct_alt[of ?P "columns A"]) + proof(rule span_induct_alt[of ?P "columns A", folded smult_conv_scaleR]) show "\x\real ^ 'm. setsum (\i. (x$i) *s column i A) ?U = 0" - apply (rule exI[where x=0]) - by (simp add: zero_index vector_smult_lzero) + by (rule exI[where x=0], simp) next fix c y1 y2 assume y1: "y1 \ columns A" and y2: "?P y2" from y1 obtain i where i: "i \ ?U" "y1 = column i A" @@ -4276,7 +3324,7 @@ fix j have th: "\xa \ ?U. (if xa = i then (c + (x$i)) * ((column xa A)$j) else (x$xa) * ((column xa A$j))) = (if xa = i then c * ((column i A)$j) else 0) + ((x$xa) * ((column xa A)$j))" using i(1) - by (simp add: ring_simps) + by (simp add: field_simps) have "setsum (\xa. if xa = i then (c + (x$i)) * ((column xa A)$j) else (x$xa) * ((column xa A$j))) ?U = setsum (\xa. (if xa = i then c * ((column i A)$j) else 0) + ((x$xa) * ((column xa A)$j))) ?U" apply (rule setsum_cong[OF refl]) @@ -4619,7 +3667,7 @@ from infnorm_triangle[of "x - y" " y"] infnorm_triangle[of "x - y" "-x"] have ths: "infnorm x \ infnorm (x - y) + infnorm y" "infnorm y \ infnorm (x - y) + infnorm x" - by (simp_all add: ring_simps infnorm_neg diff_def[symmetric]) + by (simp_all add: field_simps infnorm_neg diff_def[symmetric]) from th[OF ths] show ?thesis . qed @@ -4687,7 +3735,7 @@ hence d2: "(sqrt (real ?d))^2 = real ?d" by (auto intro: real_sqrt_pow2) have th: "sqrt (real ?d) * infnorm x \ 0" - by (simp add: zero_le_mult_iff real_sqrt_ge_0_iff infnorm_pos_le) + by (simp add: zero_le_mult_iff infnorm_pos_le) have th1: "x \ x \ (sqrt (real ?d) * infnorm x)^2" unfolding power_mult_distrib d2 unfolding real_of_nat_def inner_vector_def @@ -4704,7 +3752,7 @@ (* Equality in Cauchy-Schwarz and triangle inequalities. *) -lemma norm_cauchy_schwarz_eq: "(x::real ^'n) \ y = norm x * norm y \ norm x *s y = norm y *s x" (is "?lhs \ ?rhs") +lemma norm_cauchy_schwarz_eq: "x \ y = norm x * norm y \ norm x *\<^sub>R y = norm y *\<^sub>R x" (is "?lhs \ ?rhs") proof- {assume h: "x = 0" hence ?thesis by simp} @@ -4713,14 +3761,14 @@ hence ?thesis by simp} moreover {assume x: "x \ 0" and y: "y \ 0" - from inner_eq_zero_iff[of "norm y *s x - norm x *s y"] + from inner_eq_zero_iff[of "norm y *\<^sub>R x - norm x *\<^sub>R y"] have "?rhs \ (norm y * (norm y * norm x * norm x - norm x * (x \ y)) - norm x * (norm y * (y \ x) - norm x * norm y * norm y) = 0)" using x y unfolding inner_simps smult_conv_scaleR unfolding power2_norm_eq_inner[symmetric] power2_eq_square diff_eq_0_iff_eq apply (simp add: inner_commute) - apply (simp add: ring_simps) by metis + apply (simp add: field_simps) by metis also have "\ \ (2 * norm x * norm y * (norm x * norm y - x \ y) = 0)" using x y - by (simp add: ring_simps inner_commute) + by (simp add: field_simps inner_commute) also have "\ \ ?lhs" using x y apply simp by metis @@ -4729,26 +3777,24 @@ qed lemma norm_cauchy_schwarz_abs_eq: - fixes x y :: "real ^ 'n" shows "abs(x \ y) = norm x * norm y \ - norm x *s y = norm y *s x \ norm(x) *s y = - norm y *s x" (is "?lhs \ ?rhs") + norm x *\<^sub>R y = norm y *\<^sub>R x \ norm(x) *\<^sub>R y = - norm y *\<^sub>R x" (is "?lhs \ ?rhs") proof- have th: "\(x::real) a. a \ 0 \ abs x = a \ x = a \ x = - a" by arith - have "?rhs \ norm x *s y = norm y *s x \ norm (- x) *s y = norm y *s (- x)" - apply simp by vector + have "?rhs \ norm x *\<^sub>R y = norm y *\<^sub>R x \ norm (- x) *\<^sub>R y = norm y *\<^sub>R (- x)" + by simp also have "\ \(x \ y = norm x * norm y \ (-x) \ y = norm x * norm y)" unfolding norm_cauchy_schwarz_eq[symmetric] - unfolding norm_minus_cancel - norm_mul by blast + unfolding norm_minus_cancel norm_scaleR .. also have "\ \ ?lhs" unfolding th[OF mult_nonneg_nonneg, OF norm_ge_zero[of x] norm_ge_zero[of y]] inner_simps by auto finally show ?thesis .. qed lemma norm_triangle_eq: - fixes x y :: "real ^ 'n" - shows "norm(x + y) = norm x + norm y \ norm x *s y = norm y *s x" + fixes x y :: "'a::real_inner" + shows "norm(x + y) = norm x + norm y \ norm x *\<^sub>R y = norm y *\<^sub>R x" proof- {assume x: "x =0 \ y =0" hence ?thesis by (cases "x=0", simp_all)} @@ -4763,72 +3809,69 @@ have "norm(x + y) = norm x + norm y \ norm(x + y)^ 2 = (norm x + norm y) ^2" apply (rule th) using n norm_ge_zero[of "x + y"] by arith - also have "\ \ norm x *s y = norm y *s x" + also have "\ \ norm x *\<^sub>R y = norm y *\<^sub>R x" unfolding norm_cauchy_schwarz_eq[symmetric] unfolding power2_norm_eq_inner inner_simps - by (simp add: power2_norm_eq_inner[symmetric] power2_eq_square inner_commute ring_simps) + by (simp add: power2_norm_eq_inner[symmetric] power2_eq_square inner_commute field_simps) finally have ?thesis .} ultimately show ?thesis by blast qed (* Collinearity.*) -definition "collinear S \ (\u. \x \ S. \ y \ S. \c. x - y = c *s u)" +definition + collinear :: "'a::real_vector set \ bool" where + "collinear S \ (\u. \x \ S. \ y \ S. \c. x - y = c *\<^sub>R u)" lemma collinear_empty: "collinear {}" by (simp add: collinear_def) -lemma collinear_sing: "collinear {(x::'a::ring_1^_)}" - apply (simp add: collinear_def) - apply (rule exI[where x=0]) - by simp - -lemma collinear_2: "collinear {(x::'a::ring_1^_),y}" +lemma collinear_sing: "collinear {x}" + by (simp add: collinear_def) + +lemma collinear_2: "collinear {x, y}" apply (simp add: collinear_def) apply (rule exI[where x="x - y"]) apply auto - apply (rule exI[where x=0], simp) apply (rule exI[where x=1], simp) - apply (rule exI[where x="- 1"], simp add: vector_sneg_minus1[symmetric]) - apply (rule exI[where x=0], simp) + apply (rule exI[where x="- 1"], simp) done -lemma collinear_lemma: "collinear {(0::real^_),x,y} \ x = 0 \ y = 0 \ (\c. y = c *s x)" (is "?lhs \ ?rhs") +lemma collinear_lemma: "collinear {0,x,y} \ x = 0 \ y = 0 \ (\c. y = c *\<^sub>R x)" (is "?lhs \ ?rhs") proof- {assume "x=0 \ y = 0" hence ?thesis by (cases "x = 0", simp_all add: collinear_2 insert_commute)} moreover {assume x: "x \ 0" and y: "y \ 0" {assume h: "?lhs" - then obtain u where u: "\ x\ {0,x,y}. \y\ {0,x,y}. \c. x - y = c *s u" unfolding collinear_def by blast + then obtain u where u: "\ x\ {0,x,y}. \y\ {0,x,y}. \c. x - y = c *\<^sub>R u" unfolding collinear_def by blast from u[rule_format, of x 0] u[rule_format, of y 0] obtain cx and cy where - cx: "x = cx*s u" and cy: "y = cy*s u" + cx: "x = cx *\<^sub>R u" and cy: "y = cy *\<^sub>R u" by auto from cx x have cx0: "cx \ 0" by auto from cy y have cy0: "cy \ 0" by auto let ?d = "cy / cx" - from cx cy cx0 have "y = ?d *s x" + from cx cy cx0 have "y = ?d *\<^sub>R x" by (simp add: vector_smult_assoc) hence ?rhs using x y by blast} moreover {assume h: "?rhs" - then obtain c where c: "y = c*s x" using x y by blast + then obtain c where c: "y = c *\<^sub>R x" using x y by blast have ?lhs unfolding collinear_def c apply (rule exI[where x=x]) apply auto - apply (rule exI[where x="- 1"], simp only: vector_smult_lneg vector_smult_lid) - apply (rule exI[where x= "-c"], simp only: vector_smult_lneg) + apply (rule exI[where x="- 1"], simp) + apply (rule exI[where x= "-c"], simp) apply (rule exI[where x=1], simp) - apply (rule exI[where x="1 - c"], simp add: vector_smult_lneg vector_sub_rdistrib) - apply (rule exI[where x="c - 1"], simp add: vector_smult_lneg vector_sub_rdistrib) + apply (rule exI[where x="1 - c"], simp add: scaleR_left_diff_distrib) + apply (rule exI[where x="c - 1"], simp add: scaleR_left_diff_distrib) done} ultimately have ?thesis by blast} ultimately show ?thesis by blast qed lemma norm_cauchy_schwarz_equal: - fixes x y :: "real ^ 'n" - shows "abs(x \ y) = norm x * norm y \ collinear {(0::real^'n),x,y}" + shows "abs(x \ y) = norm x * norm y \ collinear {0,x,y}" unfolding norm_cauchy_schwarz_abs_eq apply (cases "x=0", simp_all add: collinear_2) apply (cases "y=0", simp_all add: collinear_2 insert_commute) @@ -4837,28 +3880,27 @@ apply (subgoal_tac "norm x \ 0") apply (subgoal_tac "norm y \ 0") apply (rule iffI) -apply (cases "norm x *s y = norm y *s x") +apply (cases "norm x *\<^sub>R y = norm y *\<^sub>R x") apply (rule exI[where x="(1/norm x) * norm y"]) apply (drule sym) -unfolding vector_smult_assoc[symmetric] +unfolding scaleR_scaleR[symmetric] apply (simp add: vector_smult_assoc field_simps) apply (rule exI[where x="(1/norm x) * - norm y"]) apply clarify apply (drule sym) -unfolding vector_smult_assoc[symmetric] +unfolding scaleR_scaleR[symmetric] apply (simp add: vector_smult_assoc field_simps) apply (erule exE) apply (erule ssubst) -unfolding vector_smult_assoc -unfolding norm_mul +unfolding scaleR_scaleR +unfolding norm_scaleR apply (subgoal_tac "norm x * c = \c\ * norm x \ norm x * c = - \c\ * norm x") -apply (case_tac "c <= 0", simp add: ring_simps) -apply (simp add: ring_simps) -apply (case_tac "c <= 0", simp add: ring_simps) -apply (simp add: ring_simps) +apply (case_tac "c <= 0", simp add: field_simps) +apply (simp add: field_simps) +apply (case_tac "c <= 0", simp add: field_simps) +apply (simp add: field_simps) apply simp apply simp done end - \ No newline at end of file diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Fashoda.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Multivariate_Analysis/Fashoda.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,556 @@ +(* Author: John Harrison + Translation from HOL light: Robert Himmelmann, TU Muenchen *) + +header {* Fashoda meet theorem. *} + +theory Fashoda +imports Brouwer_Fixpoint Vec1 Path_Connected +begin + +subsection {*Fashoda meet theorem. *} + +lemma infnorm_2: "infnorm (x::real^2) = max (abs(x$1)) (abs(x$2))" + unfolding infnorm_def UNIV_2 apply(rule Sup_eq) by auto + +lemma infnorm_eq_1_2: "infnorm (x::real^2) = 1 \ + (abs(x$1) \ 1 \ abs(x$2) \ 1 \ (x$1 = -1 \ x$1 = 1 \ x$2 = -1 \ x$2 = 1))" + unfolding infnorm_2 by auto + +lemma infnorm_eq_1_imp: assumes "infnorm (x::real^2) = 1" shows "abs(x$1) \ 1" "abs(x$2) \ 1" + using assms unfolding infnorm_eq_1_2 by auto + +lemma fashoda_unit: fixes f g::"real \ real^2" + assumes "f ` {- 1..1} \ {- 1..1}" "g ` {- 1..1} \ {- 1..1}" + "continuous_on {- 1..1} f" "continuous_on {- 1..1} g" + "f (- 1)$1 = - 1" "f 1$1 = 1" "g (- 1) $2 = -1" "g 1 $2 = 1" + shows "\s\{- 1..1}. \t\{- 1..1}. f s = g t" proof(rule ccontr) + case goal1 note as = this[unfolded bex_simps,rule_format] + def sqprojection \ "\z::real^2. (inverse (infnorm z)) *\<^sub>R z" + def negatex \ "\x::real^2. (vector [-(x$1), x$2])::real^2" + have lem1:"\z::real^2. infnorm(negatex z) = infnorm z" + unfolding negatex_def infnorm_2 vector_2 by auto + have lem2:"\z. z\0 \ infnorm(sqprojection z) = 1" unfolding sqprojection_def + unfolding infnorm_mul[unfolded smult_conv_scaleR] unfolding abs_inverse real_abs_infnorm + unfolding infnorm_eq_0[THEN sym] by auto + let ?F = "(\w::real^2. (f \ (\x. x$1)) w - (g \ (\x. x$2)) w)" + have *:"\i. (\x::real^2. x $ i) ` {- 1..1} = {- 1..1::real}" + apply(rule set_ext) unfolding image_iff Bex_def mem_interval apply rule defer + apply(rule_tac x="vec x" in exI) by auto + { fix x assume "x \ (\w. (f \ (\x. x $ 1)) w - (g \ (\x. x $ 2)) w) ` {- 1..1::real^2}" + then guess w unfolding image_iff .. note w = this + hence "x \ 0" using as[of "w$1" "w$2"] unfolding mem_interval by auto} note x0=this + have 21:"\i::2. i\1 \ i=2" using UNIV_2 by auto + have 1:"{- 1<..<1::real^2} \ {}" unfolding interval_eq_empty by auto + have 2:"continuous_on {- 1..1} (negatex \ sqprojection \ ?F)" apply(rule continuous_on_intros continuous_on_component continuous_on_vec1)+ + prefer 2 apply(rule continuous_on_intros continuous_on_component continuous_on_vec1)+ unfolding * + apply(rule assms)+ apply(rule continuous_on_compose,subst sqprojection_def) + apply(rule continuous_on_mul ) apply(rule continuous_at_imp_continuous_on,rule) apply(rule continuous_at_inv[unfolded o_def]) + apply(rule continuous_at_infnorm) unfolding infnorm_eq_0 defer apply(rule continuous_on_id) apply(rule linear_continuous_on) proof- + show "bounded_linear negatex" apply(rule bounded_linearI') unfolding Cart_eq proof(rule_tac[!] allI) fix i::2 and x y::"real^2" and c::real + show "negatex (x + y) $ i = (negatex x + negatex y) $ i" "negatex (c *\<^sub>R x) $ i = (c *\<^sub>R negatex x) $ i" + apply-apply(case_tac[!] "i\1") prefer 3 apply(drule_tac[1-2] 21) + unfolding negatex_def by(auto simp add:vector_2 ) qed qed(insert x0, auto) + have 3:"(negatex \ sqprojection \ ?F) ` {- 1..1} \ {- 1..1}" unfolding subset_eq apply rule proof- + case goal1 then guess y unfolding image_iff .. note y=this have "?F y \ 0" apply(rule x0) using y(1) by auto + hence *:"infnorm (sqprojection (?F y)) = 1" unfolding y o_def apply- by(rule lem2[rule_format]) + have "infnorm x = 1" unfolding *[THEN sym] y o_def by(rule lem1[rule_format]) + thus "x\{- 1..1}" unfolding mem_interval infnorm_2 apply- apply rule + proof-case goal1 thus ?case apply(cases "i=1") defer apply(drule 21) by auto qed qed + guess x apply(rule brouwer_weak[of "{- 1..1::real^2}" "negatex \ sqprojection \ ?F"]) + apply(rule compact_interval convex_interval)+ unfolding interior_closed_interval + apply(rule 1 2 3)+ . note x=this + have "?F x \ 0" apply(rule x0) using x(1) by auto + hence *:"infnorm (sqprojection (?F x)) = 1" unfolding o_def by(rule lem2[rule_format]) + have nx:"infnorm x = 1" apply(subst x(2)[THEN sym]) unfolding *[THEN sym] o_def by(rule lem1[rule_format]) + have "\x i. x \ 0 \ (0 < (sqprojection x)$i \ 0 < x$i)" "\x i. x \ 0 \ ((sqprojection x)$i < 0 \ x$i < 0)" + apply- apply(rule_tac[!] allI impI)+ proof- fix x::"real^2" and i::2 assume x:"x\0" + have "inverse (infnorm x) > 0" using x[unfolded infnorm_pos_lt[THEN sym]] by auto + thus "(0 < sqprojection x $ i) = (0 < x $ i)" "(sqprojection x $ i < 0) = (x $ i < 0)" + unfolding sqprojection_def vector_component_simps Cart_nth.scaleR real_scaleR_def + unfolding zero_less_mult_iff mult_less_0_iff by(auto simp add:field_simps) qed + note lem3 = this[rule_format] + have x1:"x $ 1 \ {- 1..1::real}" "x $ 2 \ {- 1..1::real}" using x(1) unfolding mem_interval by auto + hence nz:"f (x $ 1) - g (x $ 2) \ 0" unfolding right_minus_eq apply-apply(rule as) by auto + have "x $ 1 = -1 \ x $ 1 = 1 \ x $ 2 = -1 \ x $ 2 = 1" using nx unfolding infnorm_eq_1_2 by auto + thus False proof- fix P Q R S + presume "P \ Q \ R \ S" "P\False" "Q\False" "R\False" "S\False" thus False by auto + next assume as:"x$1 = 1" + hence *:"f (x $ 1) $ 1 = 1" using assms(6) by auto + have "sqprojection (f (x$1) - g (x$2)) $ 1 < 0" + using x(2)[unfolded o_def Cart_eq,THEN spec[where x=1]] + unfolding as negatex_def vector_2 by auto moreover + from x1 have "g (x $ 2) \ {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto + ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval + apply(erule_tac x=1 in allE) by auto + next assume as:"x$1 = -1" + hence *:"f (x $ 1) $ 1 = - 1" using assms(5) by auto + have "sqprojection (f (x$1) - g (x$2)) $ 1 > 0" + using x(2)[unfolded o_def Cart_eq,THEN spec[where x=1]] + unfolding as negatex_def vector_2 by auto moreover + from x1 have "g (x $ 2) \ {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto + ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval + apply(erule_tac x=1 in allE) by auto + next assume as:"x$2 = 1" + hence *:"g (x $ 2) $ 2 = 1" using assms(8) by auto + have "sqprojection (f (x$1) - g (x$2)) $ 2 > 0" + using x(2)[unfolded o_def Cart_eq,THEN spec[where x=2]] + unfolding as negatex_def vector_2 by auto moreover + from x1 have "f (x $ 1) \ {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto + ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval + apply(erule_tac x=2 in allE) by auto + next assume as:"x$2 = -1" + hence *:"g (x $ 2) $ 2 = - 1" using assms(7) by auto + have "sqprojection (f (x$1) - g (x$2)) $ 2 < 0" + using x(2)[unfolded o_def Cart_eq,THEN spec[where x=2]] + unfolding as negatex_def vector_2 by auto moreover + from x1 have "f (x $ 1) \ {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto + ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval + apply(erule_tac x=2 in allE) by auto qed(auto) qed + +lemma fashoda_unit_path: fixes f ::"real \ real^2" and g ::"real \ real^2" + assumes "path f" "path g" "path_image f \ {- 1..1}" "path_image g \ {- 1..1}" + "(pathstart f)$1 = -1" "(pathfinish f)$1 = 1" "(pathstart g)$2 = -1" "(pathfinish g)$2 = 1" + obtains z where "z \ path_image f" "z \ path_image g" proof- + note assms=assms[unfolded path_def pathstart_def pathfinish_def path_image_def] + def iscale \ "\z::real. inverse 2 *\<^sub>R (z + 1)" + have isc:"iscale ` {- 1..1} \ {0..1}" unfolding iscale_def by(auto) + have "\s\{- 1..1}. \t\{- 1..1}. (f \ iscale) s = (g \ iscale) t" proof(rule fashoda_unit) + show "(f \ iscale) ` {- 1..1} \ {- 1..1}" "(g \ iscale) ` {- 1..1} \ {- 1..1}" + using isc and assms(3-4) unfolding image_compose by auto + have *:"continuous_on {- 1..1} iscale" unfolding iscale_def by(rule continuous_on_intros)+ + show "continuous_on {- 1..1} (f \ iscale)" "continuous_on {- 1..1} (g \ iscale)" + apply-apply(rule_tac[!] continuous_on_compose[OF *]) apply(rule_tac[!] continuous_on_subset[OF _ isc]) + by(rule assms)+ have *:"(1 / 2) *\<^sub>R (1 + (1::real^1)) = 1" unfolding Cart_eq by auto + show "(f \ iscale) (- 1) $ 1 = - 1" "(f \ iscale) 1 $ 1 = 1" "(g \ iscale) (- 1) $ 2 = -1" "(g \ iscale) 1 $ 2 = 1" + unfolding o_def iscale_def using assms by(auto simp add:*) qed + then guess s .. from this(2) guess t .. note st=this + show thesis apply(rule_tac z="f (iscale s)" in that) + using st `s\{- 1..1}` unfolding o_def path_image_def image_iff apply- + apply(rule_tac x="iscale s" in bexI) prefer 3 apply(rule_tac x="iscale t" in bexI) + using isc[unfolded subset_eq, rule_format] by auto qed + +lemma fashoda: fixes b::"real^2" + assumes "path f" "path g" "path_image f \ {a..b}" "path_image g \ {a..b}" + "(pathstart f)$1 = a$1" "(pathfinish f)$1 = b$1" + "(pathstart g)$2 = a$2" "(pathfinish g)$2 = b$2" + obtains z where "z \ path_image f" "z \ path_image g" proof- + fix P Q S presume "P \ Q \ S" "P \ thesis" "Q \ thesis" "S \ thesis" thus thesis by auto +next have "{a..b} \ {}" using assms(3) using path_image_nonempty by auto + hence "a \ b" unfolding interval_eq_empty vector_le_def by(auto simp add: not_less) + thus "a$1 = b$1 \ a$2 = b$2 \ (a$1 < b$1 \ a$2 < b$2)" unfolding vector_le_def forall_2 by auto +next assume as:"a$1 = b$1" have "\z\path_image g. z$2 = (pathstart f)$2" apply(rule connected_ivt_component) + apply(rule connected_path_image assms)+apply(rule pathstart_in_path_image,rule pathfinish_in_path_image) + unfolding assms using assms(3)[unfolded path_image_def subset_eq,rule_format,of "f 0"] + unfolding pathstart_def by(auto simp add: vector_le_def) then guess z .. note z=this + have "z \ {a..b}" using z(1) assms(4) unfolding path_image_def by blast + hence "z = f 0" unfolding Cart_eq forall_2 unfolding z(2) pathstart_def + using assms(3)[unfolded path_image_def subset_eq mem_interval,rule_format,of "f 0" 1] + unfolding mem_interval apply(erule_tac x=1 in allE) using as by auto + thus thesis apply-apply(rule that[OF _ z(1)]) unfolding path_image_def by auto +next assume as:"a$2 = b$2" have "\z\path_image f. z$1 = (pathstart g)$1" apply(rule connected_ivt_component) + apply(rule connected_path_image assms)+apply(rule pathstart_in_path_image,rule pathfinish_in_path_image) + unfolding assms using assms(4)[unfolded path_image_def subset_eq,rule_format,of "g 0"] + unfolding pathstart_def by(auto simp add: vector_le_def) then guess z .. note z=this + have "z \ {a..b}" using z(1) assms(3) unfolding path_image_def by blast + hence "z = g 0" unfolding Cart_eq forall_2 unfolding z(2) pathstart_def + using assms(4)[unfolded path_image_def subset_eq mem_interval,rule_format,of "g 0" 2] + unfolding mem_interval apply(erule_tac x=2 in allE) using as by auto + thus thesis apply-apply(rule that[OF z(1)]) unfolding path_image_def by auto +next assume as:"a $ 1 < b $ 1 \ a $ 2 < b $ 2" + have int_nem:"{- 1..1::real^2} \ {}" unfolding interval_eq_empty by auto + guess z apply(rule fashoda_unit_path[of "interval_bij (a,b) (- 1,1) \ f" "interval_bij (a,b) (- 1,1) \ g"]) + unfolding path_def path_image_def pathstart_def pathfinish_def + apply(rule_tac[1-2] continuous_on_compose) apply(rule assms[unfolded path_def] continuous_on_interval_bij)+ + unfolding subset_eq apply(rule_tac[1-2] ballI) + proof- fix x assume "x \ (interval_bij (a, b) (- 1, 1) \ f) ` {0..1}" + then guess y unfolding image_iff .. note y=this + show "x \ {- 1..1}" unfolding y o_def apply(rule in_interval_interval_bij) + using y(1) using assms(3)[unfolded path_image_def subset_eq] int_nem by auto + next fix x assume "x \ (interval_bij (a, b) (- 1, 1) \ g) ` {0..1}" + then guess y unfolding image_iff .. note y=this + show "x \ {- 1..1}" unfolding y o_def apply(rule in_interval_interval_bij) + using y(1) using assms(4)[unfolded path_image_def subset_eq] int_nem by auto + next show "(interval_bij (a, b) (- 1, 1) \ f) 0 $ 1 = -1" + "(interval_bij (a, b) (- 1, 1) \ f) 1 $ 1 = 1" + "(interval_bij (a, b) (- 1, 1) \ g) 0 $ 2 = -1" + "(interval_bij (a, b) (- 1, 1) \ g) 1 $ 2 = 1" unfolding interval_bij_def Cart_lambda_beta vector_component_simps o_def split_conv + unfolding assms[unfolded pathstart_def pathfinish_def] using as by auto qed note z=this + from z(1) guess zf unfolding image_iff .. note zf=this + from z(2) guess zg unfolding image_iff .. note zg=this + have *:"\i. (- 1) $ i < (1::real^2) $ i \ a $ i < b $ i" unfolding forall_2 using as by auto + show thesis apply(rule_tac z="interval_bij (- 1,1) (a,b) z" in that) + apply(subst zf) defer apply(subst zg) unfolding o_def interval_bij_bij[OF *] path_image_def + using zf(1) zg(1) by auto qed + +subsection {*Some slightly ad hoc lemmas I use below*} + +lemma segment_vertical: fixes a::"real^2" assumes "a$1 = b$1" + shows "x \ closed_segment a b \ (x$1 = a$1 \ x$1 = b$1 \ + (a$2 \ x$2 \ x$2 \ b$2 \ b$2 \ x$2 \ x$2 \ a$2))" (is "_ = ?R") +proof- + let ?L = "\u. (x $ 1 = (1 - u) * a $ 1 + u * b $ 1 \ x $ 2 = (1 - u) * a $ 2 + u * b $ 2) \ 0 \ u \ u \ 1" + { presume "?L \ ?R" "?R \ ?L" thus ?thesis unfolding closed_segment_def mem_Collect_eq + unfolding Cart_eq forall_2 smult_conv_scaleR[THEN sym] vector_component_simps by blast } + { assume ?L then guess u apply-apply(erule exE)apply(erule conjE)+ . note u=this + { fix b a assume "b + u * a > a + u * b" + hence "(1 - u) * b > (1 - u) * a" by(auto simp add:field_simps) + hence "b \ a" apply(drule_tac mult_less_imp_less_left) using u by auto + hence "u * a \ u * b" apply-apply(rule mult_left_mono[OF _ u(3)]) + using u(3-4) by(auto simp add:field_simps) } note * = this + { fix a b assume "u * b > u * a" hence "(1 - u) * a \ (1 - u) * b" apply-apply(rule mult_left_mono) + apply(drule mult_less_imp_less_left) using u by auto + hence "a + u * b \ b + u * a" by(auto simp add:field_simps) } note ** = this + thus ?R unfolding u assms using u by(auto simp add:field_simps not_le intro:* **) } + { assume ?R thus ?L proof(cases "x$2 = b$2") + case True thus ?L apply(rule_tac x="(x$2 - a$2) / (b$2 - a$2)" in exI) unfolding assms True + using `?R` by(auto simp add:field_simps) + next case False thus ?L apply(rule_tac x="1 - (x$2 - b$2) / (a$2 - b$2)" in exI) unfolding assms using `?R` + by(auto simp add:field_simps) + qed } qed + +lemma segment_horizontal: fixes a::"real^2" assumes "a$2 = b$2" + shows "x \ closed_segment a b \ (x$2 = a$2 \ x$2 = b$2 \ + (a$1 \ x$1 \ x$1 \ b$1 \ b$1 \ x$1 \ x$1 \ a$1))" (is "_ = ?R") +proof- + let ?L = "\u. (x $ 1 = (1 - u) * a $ 1 + u * b $ 1 \ x $ 2 = (1 - u) * a $ 2 + u * b $ 2) \ 0 \ u \ u \ 1" + { presume "?L \ ?R" "?R \ ?L" thus ?thesis unfolding closed_segment_def mem_Collect_eq + unfolding Cart_eq forall_2 smult_conv_scaleR[THEN sym] vector_component_simps by blast } + { assume ?L then guess u apply-apply(erule exE)apply(erule conjE)+ . note u=this + { fix b a assume "b + u * a > a + u * b" + hence "(1 - u) * b > (1 - u) * a" by(auto simp add:field_simps) + hence "b \ a" apply(drule_tac mult_less_imp_less_left) using u by auto + hence "u * a \ u * b" apply-apply(rule mult_left_mono[OF _ u(3)]) + using u(3-4) by(auto simp add:field_simps) } note * = this + { fix a b assume "u * b > u * a" hence "(1 - u) * a \ (1 - u) * b" apply-apply(rule mult_left_mono) + apply(drule mult_less_imp_less_left) using u by auto + hence "a + u * b \ b + u * a" by(auto simp add:field_simps) } note ** = this + thus ?R unfolding u assms using u by(auto simp add:field_simps not_le intro:* **) } + { assume ?R thus ?L proof(cases "x$1 = b$1") + case True thus ?L apply(rule_tac x="(x$1 - a$1) / (b$1 - a$1)" in exI) unfolding assms True + using `?R` by(auto simp add:field_simps) + next case False thus ?L apply(rule_tac x="1 - (x$1 - b$1) / (a$1 - b$1)" in exI) unfolding assms using `?R` + by(auto simp add:field_simps) + qed } qed + +subsection {*useful Fashoda corollary pointed out to me by Tom Hales. *} + +lemma fashoda_interlace: fixes a::"real^2" + assumes "path f" "path g" + "path_image f \ {a..b}" "path_image g \ {a..b}" + "(pathstart f)$2 = a$2" "(pathfinish f)$2 = a$2" + "(pathstart g)$2 = a$2" "(pathfinish g)$2 = a$2" + "(pathstart f)$1 < (pathstart g)$1" "(pathstart g)$1 < (pathfinish f)$1" + "(pathfinish f)$1 < (pathfinish g)$1" + obtains z where "z \ path_image f" "z \ path_image g" +proof- + have "{a..b} \ {}" using path_image_nonempty using assms(3) by auto + note ab=this[unfolded interval_eq_empty not_ex forall_2 not_less] + have "pathstart f \ {a..b}" "pathfinish f \ {a..b}" "pathstart g \ {a..b}" "pathfinish g \ {a..b}" + using pathstart_in_path_image pathfinish_in_path_image using assms(3-4) by auto + note startfin = this[unfolded mem_interval forall_2] + let ?P1 = "linepath (vector[a$1 - 2, a$2 - 2]) (vector[(pathstart f)$1,a$2 - 2]) +++ + linepath(vector[(pathstart f)$1,a$2 - 2])(pathstart f) +++ f +++ + linepath(pathfinish f)(vector[(pathfinish f)$1,a$2 - 2]) +++ + linepath(vector[(pathfinish f)$1,a$2 - 2])(vector[b$1 + 2,a$2 - 2])" + let ?P2 = "linepath(vector[(pathstart g)$1, (pathstart g)$2 - 3])(pathstart g) +++ g +++ + linepath(pathfinish g)(vector[(pathfinish g)$1,a$2 - 1]) +++ + linepath(vector[(pathfinish g)$1,a$2 - 1])(vector[b$1 + 1,a$2 - 1]) +++ + linepath(vector[b$1 + 1,a$2 - 1])(vector[b$1 + 1,b$2 + 3])" + let ?a = "vector[a$1 - 2, a$2 - 3]" + let ?b = "vector[b$1 + 2, b$2 + 3]" + have P1P2:"path_image ?P1 = path_image (linepath (vector[a$1 - 2, a$2 - 2]) (vector[(pathstart f)$1,a$2 - 2])) \ + path_image (linepath(vector[(pathstart f)$1,a$2 - 2])(pathstart f)) \ path_image f \ + path_image (linepath(pathfinish f)(vector[(pathfinish f)$1,a$2 - 2])) \ + path_image (linepath(vector[(pathfinish f)$1,a$2 - 2])(vector[b$1 + 2,a$2 - 2]))" + "path_image ?P2 = path_image(linepath(vector[(pathstart g)$1, (pathstart g)$2 - 3])(pathstart g)) \ path_image g \ + path_image(linepath(pathfinish g)(vector[(pathfinish g)$1,a$2 - 1])) \ + path_image(linepath(vector[(pathfinish g)$1,a$2 - 1])(vector[b$1 + 1,a$2 - 1])) \ + path_image(linepath(vector[b$1 + 1,a$2 - 1])(vector[b$1 + 1,b$2 + 3]))" using assms(1-2) + by(auto simp add: path_image_join path_linepath) + have abab: "{a..b} \ {?a..?b}" by(auto simp add:vector_le_def forall_2 vector_2) + guess z apply(rule fashoda[of ?P1 ?P2 ?a ?b]) + unfolding pathstart_join pathfinish_join pathstart_linepath pathfinish_linepath vector_2 proof- + show "path ?P1" "path ?P2" using assms by auto + have "path_image ?P1 \ {?a .. ?b}" unfolding P1P2 path_image_linepath apply(rule Un_least)+ defer 3 + apply(rule_tac[1-4] convex_interval(1)[unfolded convex_contains_segment,rule_format]) + unfolding mem_interval forall_2 vector_2 using ab startfin abab assms(3) + using assms(9-) unfolding assms by(auto simp add:field_simps) + thus "path_image ?P1 \ {?a .. ?b}" . + have "path_image ?P2 \ {?a .. ?b}" unfolding P1P2 path_image_linepath apply(rule Un_least)+ defer 2 + apply(rule_tac[1-4] convex_interval(1)[unfolded convex_contains_segment,rule_format]) + unfolding mem_interval forall_2 vector_2 using ab startfin abab assms(4) + using assms(9-) unfolding assms by(auto simp add:field_simps) + thus "path_image ?P2 \ {?a .. ?b}" . + show "a $ 1 - 2 = a $ 1 - 2" "b $ 1 + 2 = b $ 1 + 2" "pathstart g $ 2 - 3 = a $ 2 - 3" "b $ 2 + 3 = b $ 2 + 3" + by(auto simp add: assms) + qed note z=this[unfolded P1P2 path_image_linepath] + show thesis apply(rule that[of z]) proof- + have "(z \ closed_segment (vector [a $ 1 - 2, a $ 2 - 2]) (vector [pathstart f $ 1, a $ 2 - 2]) \ + z \ closed_segment (vector [pathstart f $ 1, a $ 2 - 2]) (pathstart f)) \ + z \ closed_segment (pathfinish f) (vector [pathfinish f $ 1, a $ 2 - 2]) \ + z \ closed_segment (vector [pathfinish f $ 1, a $ 2 - 2]) (vector [b $ 1 + 2, a $ 2 - 2]) \ + (((z \ closed_segment (vector [pathstart g $ 1, pathstart g $ 2 - 3]) (pathstart g)) \ + z \ closed_segment (pathfinish g) (vector [pathfinish g $ 1, a $ 2 - 1])) \ + z \ closed_segment (vector [pathfinish g $ 1, a $ 2 - 1]) (vector [b $ 1 + 1, a $ 2 - 1])) \ + z \ closed_segment (vector [b $ 1 + 1, a $ 2 - 1]) (vector [b $ 1 + 1, b $ 2 + 3]) \ False" + apply(simp only: segment_vertical segment_horizontal vector_2) proof- case goal1 note as=this + have "pathfinish f \ {a..b}" using assms(3) pathfinish_in_path_image[of f] by auto + hence "1 + b $ 1 \ pathfinish f $ 1 \ False" unfolding mem_interval forall_2 by auto + hence "z$1 \ pathfinish f$1" using as(2) using assms ab by(auto simp add:field_simps) + moreover have "pathstart f \ {a..b}" using assms(3) pathstart_in_path_image[of f] by auto + hence "1 + b $ 1 \ pathstart f $ 1 \ False" unfolding mem_interval forall_2 by auto + hence "z$1 \ pathstart f$1" using as(2) using assms ab by(auto simp add:field_simps) + ultimately have *:"z$2 = a$2 - 2" using goal1(1) by auto + have "z$1 \ pathfinish g$1" using as(2) using assms ab by(auto simp add:field_simps *) + moreover have "pathstart g \ {a..b}" using assms(4) pathstart_in_path_image[of g] by auto + note this[unfolded mem_interval forall_2] + hence "z$1 \ pathstart g$1" using as(1) using assms ab by(auto simp add:field_simps *) + ultimately have "a $ 2 - 1 \ z $ 2 \ z $ 2 \ b $ 2 + 3 \ b $ 2 + 3 \ z $ 2 \ z $ 2 \ a $ 2 - 1" + using as(2) unfolding * assms by(auto simp add:field_simps) + thus False unfolding * using ab by auto + qed hence "z \ path_image f \ z \ path_image g" using z unfolding Un_iff by blast + hence z':"z\{a..b}" using assms(3-4) by auto + have "a $ 2 = z $ 2 \ (z $ 1 = pathstart f $ 1 \ z $ 1 = pathfinish f $ 1) \ (z = pathstart f \ z = pathfinish f)" + unfolding Cart_eq forall_2 assms by auto + with z' show "z\path_image f" using z(1) unfolding Un_iff mem_interval forall_2 apply- + apply(simp only: segment_vertical segment_horizontal vector_2) unfolding assms by auto + have "a $ 2 = z $ 2 \ (z $ 1 = pathstart g $ 1 \ z $ 1 = pathfinish g $ 1) \ (z = pathstart g \ z = pathfinish g)" + unfolding Cart_eq forall_2 assms by auto + with z' show "z\path_image g" using z(2) unfolding Un_iff mem_interval forall_2 apply- + apply(simp only: segment_vertical segment_horizontal vector_2) unfolding assms by auto + qed qed + +(** The Following still needs to be translated. Maybe I will do that later. + +(* ------------------------------------------------------------------------- *) +(* Complement in dimension N >= 2 of set homeomorphic to any interval in *) +(* any dimension is (path-)connected. This naively generalizes the argument *) +(* in Ryuji Maehara's paper "The Jordan curve theorem via the Brouwer *) +(* fixed point theorem", American Mathematical Monthly 1984. *) +(* ------------------------------------------------------------------------- *) + +let RETRACTION_INJECTIVE_IMAGE_INTERVAL = prove + (`!p:real^M->real^N a b. + ~(interval[a,b] = {}) /\ + p continuous_on interval[a,b] /\ + (!x y. x IN interval[a,b] /\ y IN interval[a,b] /\ p x = p y ==> x = y) + ==> ?f. f continuous_on (:real^N) /\ + IMAGE f (:real^N) SUBSET (IMAGE p (interval[a,b])) /\ + (!x. x IN (IMAGE p (interval[a,b])) ==> f x = x)`, + REPEAT STRIP_TAC THEN + FIRST_ASSUM(MP_TAC o GEN_REWRITE_RULE I [INJECTIVE_ON_LEFT_INVERSE]) THEN + DISCH_THEN(X_CHOOSE_TAC `q:real^N->real^M`) THEN + SUBGOAL_THEN `(q:real^N->real^M) continuous_on + (IMAGE p (interval[a:real^M,b]))` + ASSUME_TAC THENL + [MATCH_MP_TAC CONTINUOUS_ON_INVERSE THEN ASM_REWRITE_TAC[COMPACT_INTERVAL]; + ALL_TAC] THEN + MP_TAC(ISPECL [`q:real^N->real^M`; + `IMAGE (p:real^M->real^N) + (interval[a,b])`; + `a:real^M`; `b:real^M`] + TIETZE_CLOSED_INTERVAL) THEN + ASM_SIMP_TAC[COMPACT_INTERVAL; COMPACT_CONTINUOUS_IMAGE; + COMPACT_IMP_CLOSED] THEN + ANTS_TAC THENL [ASM SET_TAC[]; ALL_TAC] THEN + DISCH_THEN(X_CHOOSE_THEN `r:real^N->real^M` STRIP_ASSUME_TAC) THEN + EXISTS_TAC `(p:real^M->real^N) o (r:real^N->real^M)` THEN + REWRITE_TAC[SUBSET; FORALL_IN_IMAGE; o_THM; IN_UNIV] THEN + CONJ_TAC THENL [ALL_TAC; ASM SET_TAC[]] THEN + MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN ASM_REWRITE_TAC[] THEN + FIRST_X_ASSUM(MATCH_MP_TAC o MATCH_MP(REWRITE_RULE[IMP_CONJ] + CONTINUOUS_ON_SUBSET)) THEN ASM SET_TAC[]);; + +let UNBOUNDED_PATH_COMPONENTS_COMPLEMENT_HOMEOMORPHIC_INTERVAL = prove + (`!s:real^N->bool a b:real^M. + s homeomorphic (interval[a,b]) + ==> !x. ~(x IN s) ==> ~bounded(path_component((:real^N) DIFF s) x)`, + REPEAT GEN_TAC THEN REWRITE_TAC[homeomorphic; homeomorphism] THEN + REWRITE_TAC[LEFT_IMP_EXISTS_THM] THEN + MAP_EVERY X_GEN_TAC [`p':real^N->real^M`; `p:real^M->real^N`] THEN + DISCH_TAC THEN + SUBGOAL_THEN + `!x y. x IN interval[a,b] /\ y IN interval[a,b] /\ + (p:real^M->real^N) x = p y ==> x = y` + ASSUME_TAC THENL [ASM_MESON_TAC[]; ALL_TAC] THEN + FIRST_X_ASSUM(MP_TAC o funpow 4 CONJUNCT2) THEN + DISCH_THEN(CONJUNCTS_THEN2 (SUBST1_TAC o SYM) ASSUME_TAC) THEN + ASM_CASES_TAC `interval[a:real^M,b] = {}` THEN + ASM_REWRITE_TAC[IMAGE_CLAUSES; DIFF_EMPTY; PATH_COMPONENT_UNIV; + NOT_BOUNDED_UNIV] THEN + ABBREV_TAC `s = (:real^N) DIFF (IMAGE p (interval[a:real^M,b]))` THEN + X_GEN_TAC `c:real^N` THEN REPEAT STRIP_TAC THEN + SUBGOAL_THEN `(c:real^N) IN s` ASSUME_TAC THENL [ASM SET_TAC[]; ALL_TAC] THEN + SUBGOAL_THEN `bounded((path_component s c) UNION + (IMAGE (p:real^M->real^N) (interval[a,b])))` + MP_TAC THENL + [ASM_SIMP_TAC[BOUNDED_UNION; COMPACT_IMP_BOUNDED; COMPACT_IMP_BOUNDED; + COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; + ALL_TAC] THEN + DISCH_THEN(MP_TAC o SPEC `c:real^N` o MATCH_MP BOUNDED_SUBSET_BALL) THEN + REWRITE_TAC[UNION_SUBSET] THEN + DISCH_THEN(X_CHOOSE_THEN `B:real` STRIP_ASSUME_TAC) THEN + MP_TAC(ISPECL [`p:real^M->real^N`; `a:real^M`; `b:real^M`] + RETRACTION_INJECTIVE_IMAGE_INTERVAL) THEN + ASM_REWRITE_TAC[SUBSET; IN_UNIV] THEN + DISCH_THEN(X_CHOOSE_THEN `r:real^N->real^N` MP_TAC) THEN + DISCH_THEN(CONJUNCTS_THEN2 ASSUME_TAC + (CONJUNCTS_THEN2 MP_TAC ASSUME_TAC)) THEN + REWRITE_TAC[FORALL_IN_IMAGE; IN_UNIV] THEN DISCH_TAC THEN + ABBREV_TAC `q = \z:real^N. if z IN path_component s c then r(z) else z` THEN + SUBGOAL_THEN + `(q:real^N->real^N) continuous_on + (closure(path_component s c) UNION ((:real^N) DIFF (path_component s c)))` + MP_TAC THENL + [EXPAND_TAC "q" THEN MATCH_MP_TAC CONTINUOUS_ON_CASES THEN + REWRITE_TAC[CLOSED_CLOSURE; CONTINUOUS_ON_ID; GSYM OPEN_CLOSED] THEN + REPEAT CONJ_TAC THENL + [MATCH_MP_TAC OPEN_PATH_COMPONENT THEN EXPAND_TAC "s" THEN + ASM_SIMP_TAC[GSYM CLOSED_OPEN; COMPACT_IMP_CLOSED; + COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; + ASM_MESON_TAC[CONTINUOUS_ON_SUBSET; SUBSET_UNIV]; + ALL_TAC] THEN + X_GEN_TAC `z:real^N` THEN + REWRITE_TAC[SET_RULE `~(z IN (s DIFF t) /\ z IN t)`] THEN + STRIP_TAC THEN FIRST_X_ASSUM MATCH_MP_TAC THEN + MP_TAC(ISPECL + [`path_component s (z:real^N)`; `path_component s (c:real^N)`] + OPEN_INTER_CLOSURE_EQ_EMPTY) THEN + ASM_REWRITE_TAC[GSYM DISJOINT; PATH_COMPONENT_DISJOINT] THEN ANTS_TAC THENL + [MATCH_MP_TAC OPEN_PATH_COMPONENT THEN EXPAND_TAC "s" THEN + ASM_SIMP_TAC[GSYM CLOSED_OPEN; COMPACT_IMP_CLOSED; + COMPACT_CONTINUOUS_IMAGE; COMPACT_INTERVAL]; + REWRITE_TAC[DISJOINT; EXTENSION; IN_INTER; NOT_IN_EMPTY] THEN + DISCH_THEN(MP_TAC o SPEC `z:real^N`) THEN ASM_REWRITE_TAC[] THEN + GEN_REWRITE_TAC (LAND_CONV o ONCE_DEPTH_CONV) [IN] THEN + REWRITE_TAC[PATH_COMPONENT_REFL_EQ] THEN ASM SET_TAC[]]; + ALL_TAC] THEN + SUBGOAL_THEN + `closure(path_component s c) UNION ((:real^N) DIFF (path_component s c)) = + (:real^N)` + SUBST1_TAC THENL + [MATCH_MP_TAC(SET_RULE `s SUBSET t ==> t UNION (UNIV DIFF s) = UNIV`) THEN + REWRITE_TAC[CLOSURE_SUBSET]; + DISCH_TAC] THEN + MP_TAC(ISPECL + [`(\x. &2 % c - x) o + (\x. c + B / norm(x - c) % (x - c)) o (q:real^N->real^N)`; + `cball(c:real^N,B)`] + BROUWER) THEN + REWRITE_TAC[NOT_IMP; GSYM CONJ_ASSOC; COMPACT_CBALL; CONVEX_CBALL] THEN + ASM_SIMP_TAC[CBALL_EQ_EMPTY; REAL_LT_IMP_LE; REAL_NOT_LT] THEN + SUBGOAL_THEN `!x. ~((q:real^N->real^N) x = c)` ASSUME_TAC THENL + [X_GEN_TAC `x:real^N` THEN EXPAND_TAC "q" THEN + REWRITE_TAC[NORM_EQ_0; VECTOR_SUB_EQ] THEN COND_CASES_TAC THEN + ASM SET_TAC[PATH_COMPONENT_REFL_EQ]; + ALL_TAC] THEN + REPEAT CONJ_TAC THENL + [MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN + SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST] THEN + MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN CONJ_TAC THENL + [ASM_MESON_TAC[CONTINUOUS_ON_SUBSET; SUBSET_UNIV]; ALL_TAC] THEN + MATCH_MP_TAC CONTINUOUS_ON_ADD THEN REWRITE_TAC[CONTINUOUS_ON_CONST] THEN + MATCH_MP_TAC CONTINUOUS_ON_MUL THEN + SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST] THEN + REWRITE_TAC[o_DEF; real_div; LIFT_CMUL] THEN + MATCH_MP_TAC CONTINUOUS_ON_CMUL THEN + MATCH_MP_TAC(REWRITE_RULE[o_DEF] CONTINUOUS_ON_INV) THEN + ASM_REWRITE_TAC[FORALL_IN_IMAGE; NORM_EQ_0; VECTOR_SUB_EQ] THEN + SUBGOAL_THEN + `(\x:real^N. lift(norm(x - c))) = (lift o norm) o (\x. x - c)` + SUBST1_TAC THENL [REWRITE_TAC[o_DEF]; ALL_TAC] THEN + MATCH_MP_TAC CONTINUOUS_ON_COMPOSE THEN + ASM_SIMP_TAC[CONTINUOUS_ON_SUB; CONTINUOUS_ON_ID; CONTINUOUS_ON_CONST; + CONTINUOUS_ON_LIFT_NORM]; + REWRITE_TAC[SUBSET; FORALL_IN_IMAGE; IN_CBALL; o_THM; dist] THEN + X_GEN_TAC `x:real^N` THEN DISCH_TAC THEN + REWRITE_TAC[VECTOR_ARITH `c - (&2 % c - (c + x)) = x`] THEN + REWRITE_TAC[NORM_MUL; REAL_ABS_DIV; REAL_ABS_NORM] THEN + ASM_SIMP_TAC[REAL_DIV_RMUL; NORM_EQ_0; VECTOR_SUB_EQ] THEN + ASM_REAL_ARITH_TAC; + REWRITE_TAC[NOT_EXISTS_THM; TAUT `~(c /\ b) <=> c ==> ~b`] THEN + REWRITE_TAC[IN_CBALL; o_THM; dist] THEN + X_GEN_TAC `x:real^N` THEN DISCH_TAC THEN + REWRITE_TAC[VECTOR_ARITH `&2 % c - (c + x') = x <=> --x' = x - c`] THEN + ASM_CASES_TAC `(x:real^N) IN path_component s c` THENL + [MATCH_MP_TAC(NORM_ARITH `norm(y) < B /\ norm(x) = B ==> ~(--x = y)`) THEN + REWRITE_TAC[NORM_MUL; REAL_ABS_DIV; REAL_ABS_NORM] THEN + ASM_SIMP_TAC[REAL_DIV_RMUL; NORM_EQ_0; VECTOR_SUB_EQ] THEN + ASM_SIMP_TAC[REAL_ARITH `&0 < B ==> abs B = B`] THEN + UNDISCH_TAC `path_component s c SUBSET ball(c:real^N,B)` THEN + REWRITE_TAC[SUBSET; IN_BALL] THEN ASM_MESON_TAC[dist; NORM_SUB]; + EXPAND_TAC "q" THEN REWRITE_TAC[] THEN ASM_REWRITE_TAC[] THEN + REWRITE_TAC[VECTOR_ARITH `--(c % x) = x <=> (&1 + c) % x = vec 0`] THEN + ASM_REWRITE_TAC[DE_MORGAN_THM; VECTOR_SUB_EQ; VECTOR_MUL_EQ_0] THEN + SUBGOAL_THEN `~(x:real^N = c)` ASSUME_TAC THENL + [ASM_MESON_TAC[PATH_COMPONENT_REFL; IN]; ALL_TAC] THEN + ASM_REWRITE_TAC[] THEN + MATCH_MP_TAC(REAL_ARITH `&0 < x ==> ~(&1 + x = &0)`) THEN + ASM_SIMP_TAC[REAL_LT_DIV; NORM_POS_LT; VECTOR_SUB_EQ]]]);; + +let PATH_CONNECTED_COMPLEMENT_HOMEOMORPHIC_INTERVAL = prove + (`!s:real^N->bool a b:real^M. + 2 <= dimindex(:N) /\ s homeomorphic interval[a,b] + ==> path_connected((:real^N) DIFF s)`, + REPEAT STRIP_TAC THEN REWRITE_TAC[PATH_CONNECTED_IFF_PATH_COMPONENT] THEN + FIRST_ASSUM(MP_TAC o MATCH_MP + UNBOUNDED_PATH_COMPONENTS_COMPLEMENT_HOMEOMORPHIC_INTERVAL) THEN + ASM_REWRITE_TAC[SET_RULE `~(x IN s) <=> x IN (UNIV DIFF s)`] THEN + ABBREV_TAC `t = (:real^N) DIFF s` THEN + DISCH_TAC THEN MAP_EVERY X_GEN_TAC [`x:real^N`; `y:real^N`] THEN + STRIP_TAC THEN FIRST_ASSUM(MP_TAC o MATCH_MP HOMEOMORPHIC_COMPACTNESS) THEN + REWRITE_TAC[COMPACT_INTERVAL] THEN + DISCH_THEN(MP_TAC o MATCH_MP COMPACT_IMP_BOUNDED) THEN + REWRITE_TAC[BOUNDED_POS; LEFT_IMP_EXISTS_THM] THEN + X_GEN_TAC `B:real` THEN STRIP_TAC THEN + SUBGOAL_THEN `(?u:real^N. u IN path_component t x /\ B < norm(u)) /\ + (?v:real^N. v IN path_component t y /\ B < norm(v))` + STRIP_ASSUME_TAC THENL + [ASM_MESON_TAC[BOUNDED_POS; REAL_NOT_LE]; ALL_TAC] THEN + MATCH_MP_TAC PATH_COMPONENT_TRANS THEN EXISTS_TAC `u:real^N` THEN + CONJ_TAC THENL [ASM_MESON_TAC[IN]; ALL_TAC] THEN + MATCH_MP_TAC PATH_COMPONENT_SYM THEN + MATCH_MP_TAC PATH_COMPONENT_TRANS THEN EXISTS_TAC `v:real^N` THEN + CONJ_TAC THENL [ASM_MESON_TAC[IN]; ALL_TAC] THEN + MATCH_MP_TAC PATH_COMPONENT_OF_SUBSET THEN + EXISTS_TAC `(:real^N) DIFF cball(vec 0,B)` THEN CONJ_TAC THENL + [EXPAND_TAC "t" THEN MATCH_MP_TAC(SET_RULE + `s SUBSET t ==> (u DIFF t) SUBSET (u DIFF s)`) THEN + ASM_REWRITE_TAC[SUBSET; IN_CBALL_0]; + MP_TAC(ISPEC `cball(vec 0:real^N,B)` + PATH_CONNECTED_COMPLEMENT_BOUNDED_CONVEX) THEN + ASM_REWRITE_TAC[BOUNDED_CBALL; CONVEX_CBALL] THEN + REWRITE_TAC[PATH_CONNECTED_IFF_PATH_COMPONENT] THEN + DISCH_THEN MATCH_MP_TAC THEN + ASM_REWRITE_TAC[IN_DIFF; IN_UNIV; IN_CBALL_0; REAL_NOT_LE]]);; + +(* ------------------------------------------------------------------------- *) +(* In particular, apply all these to the special case of an arc. *) +(* ------------------------------------------------------------------------- *) + +let RETRACTION_ARC = prove + (`!p. arc p + ==> ?f. f continuous_on (:real^N) /\ + IMAGE f (:real^N) SUBSET path_image p /\ + (!x. x IN path_image p ==> f x = x)`, + REWRITE_TAC[arc; path; path_image] THEN REPEAT STRIP_TAC THEN + MATCH_MP_TAC RETRACTION_INJECTIVE_IMAGE_INTERVAL THEN + ASM_REWRITE_TAC[INTERVAL_EQ_EMPTY_1; DROP_VEC; REAL_NOT_LT; REAL_POS]);; + +let PATH_CONNECTED_ARC_COMPLEMENT = prove + (`!p. 2 <= dimindex(:N) /\ arc p + ==> path_connected((:real^N) DIFF path_image p)`, + REWRITE_TAC[arc; path] THEN REPEAT STRIP_TAC THEN SIMP_TAC[path_image] THEN + MP_TAC(ISPECL [`path_image p:real^N->bool`; `vec 0:real^1`; `vec 1:real^1`] + PATH_CONNECTED_COMPLEMENT_HOMEOMORPHIC_INTERVAL) THEN + ASM_REWRITE_TAC[path_image] THEN DISCH_THEN MATCH_MP_TAC THEN + ONCE_REWRITE_TAC[HOMEOMORPHIC_SYM] THEN + MATCH_MP_TAC HOMEOMORPHIC_COMPACT THEN + EXISTS_TAC `p:real^1->real^N` THEN ASM_REWRITE_TAC[COMPACT_INTERVAL]);; + +let CONNECTED_ARC_COMPLEMENT = prove + (`!p. 2 <= dimindex(:N) /\ arc p + ==> connected((:real^N) DIFF path_image p)`, + SIMP_TAC[PATH_CONNECTED_ARC_COMPLEMENT; PATH_CONNECTED_IMP_CONNECTED]);; *) + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Finite_Cartesian_Product.thy --- a/src/HOL/Multivariate_Analysis/Finite_Cartesian_Product.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Finite_Cartesian_Product.thy Tue May 04 20:30:22 2010 +0200 @@ -5,7 +5,7 @@ header {* Definition of finite Cartesian product types. *} theory Finite_Cartesian_Product -imports Main +imports Inner_Product L2_Norm Numeral_Type begin subsection {* Finite Cartesian products, with indexing and lambdas. *} @@ -53,46 +53,410 @@ lemma Cart_lambda_eta: "(\ i. (g$i)) = g" by (simp add: Cart_eq) -text{* A non-standard sum to "paste" Cartesian products. *} + +subsection {* Group operations and class instances *} + +instantiation cart :: (zero,finite) zero +begin + definition vector_zero_def : "0 \ (\ i. 0)" + instance .. +end -definition "pastecart f g = (\ i. case i of Inl a \ f$a | Inr b \ g$b)" -definition "fstcart f = (\ i. (f$(Inl i)))" -definition "sndcart f = (\ i. (f$(Inr i)))" +instantiation cart :: (plus,finite) plus +begin + definition vector_add_def : "op + \ (\ x y. (\ i. (x$i) + (y$i)))" + instance .. +end -lemma nth_pastecart_Inl [simp]: "pastecart f g $ Inl a = f$a" - unfolding pastecart_def by simp +instantiation cart :: (minus,finite) minus +begin + definition vector_minus_def : "op - \ (\ x y. (\ i. (x$i) - (y$i)))" + instance .. +end + +instantiation cart :: (uminus,finite) uminus +begin + definition vector_uminus_def : "uminus \ (\ x. (\ i. - (x$i)))" + instance .. +end -lemma nth_pastecart_Inr [simp]: "pastecart f g $ Inr b = g$b" - unfolding pastecart_def by simp +lemma zero_index [simp]: "0 $ i = 0" + unfolding vector_zero_def by simp + +lemma vector_add_component [simp]: "(x + y)$i = x$i + y$i" + unfolding vector_add_def by simp + +lemma vector_minus_component [simp]: "(x - y)$i = x$i - y$i" + unfolding vector_minus_def by simp + +lemma vector_uminus_component [simp]: "(- x)$i = - (x$i)" + unfolding vector_uminus_def by simp + +instance cart :: (semigroup_add, finite) semigroup_add + by default (simp add: Cart_eq add_assoc) -lemma nth_fstcart [simp]: "fstcart f $ i = f $ Inl i" - unfolding fstcart_def by simp +instance cart :: (ab_semigroup_add, finite) ab_semigroup_add + by default (simp add: Cart_eq add_commute) + +instance cart :: (monoid_add, finite) monoid_add + by default (simp_all add: Cart_eq) -lemma nth_sndtcart [simp]: "sndcart f $ i = f $ Inr i" - unfolding sndcart_def by simp +instance cart :: (comm_monoid_add, finite) comm_monoid_add + by default (simp add: Cart_eq) + +instance cart :: (cancel_semigroup_add, finite) cancel_semigroup_add + by default (simp_all add: Cart_eq) + +instance cart :: (cancel_ab_semigroup_add, finite) cancel_ab_semigroup_add + by default (simp add: Cart_eq) + +instance cart :: (cancel_comm_monoid_add, finite) cancel_comm_monoid_add .. -lemma finite_sum_image: "(UNIV::('a + 'b) set) = range Inl \ range Inr" - apply auto - apply (case_tac x) - apply auto - done +instance cart :: (group_add, finite) group_add + by default (simp_all add: Cart_eq diff_minus) + +instance cart :: (ab_group_add, finite) ab_group_add + by default (simp_all add: Cart_eq) + + +subsection {* Real vector space *} + +instantiation cart :: (real_vector, finite) real_vector +begin + +definition vector_scaleR_def: "scaleR = (\ r x. (\ i. scaleR r (x$i)))" -lemma fstcart_pastecart[simp]: "fstcart (pastecart x y) = x" - by (simp add: Cart_eq) +lemma vector_scaleR_component [simp]: "(scaleR r x)$i = scaleR r (x$i)" + unfolding vector_scaleR_def by simp + +instance + by default (simp_all add: Cart_eq scaleR_left_distrib scaleR_right_distrib) -lemma sndcart_pastecart[simp]: "sndcart (pastecart x y) = y" - by (simp add: Cart_eq) +end + + +subsection {* Topological space *} + +instantiation cart :: (topological_space, finite) topological_space +begin -lemma pastecart_fst_snd[simp]: "pastecart (fstcart z) (sndcart z) = z" - by (simp add: Cart_eq pastecart_def fstcart_def sndcart_def split: sum.split) - -lemma pastecart_eq: "(x = y) \ (fstcart x = fstcart y) \ (sndcart x = sndcart y)" - using pastecart_fst_snd[of x] pastecart_fst_snd[of y] by metis +definition open_vector_def: + "open (S :: ('a ^ 'b) set) \ + (\x\S. \A. (\i. open (A i) \ x$i \ A i) \ + (\y. (\i. y$i \ A i) \ y \ S))" -lemma forall_pastecart: "(\p. P p) \ (\x y. P (pastecart x y))" - by (metis pastecart_fst_snd) - -lemma exists_pastecart: "(\p. P p) \ (\x y. P (pastecart x y))" - by (metis pastecart_fst_snd) +instance proof + show "open (UNIV :: ('a ^ 'b) set)" + unfolding open_vector_def by auto +next + fix S T :: "('a ^ 'b) set" + assume "open S" "open T" thus "open (S \ T)" + unfolding open_vector_def + apply clarify + apply (drule (1) bspec)+ + apply (clarify, rename_tac Sa Ta) + apply (rule_tac x="\i. Sa i \ Ta i" in exI) + apply (simp add: open_Int) + done +next + fix K :: "('a ^ 'b) set set" + assume "\S\K. open S" thus "open (\K)" + unfolding open_vector_def + apply clarify + apply (drule (1) bspec) + apply (drule (1) bspec) + apply clarify + apply (rule_tac x=A in exI) + apply fast + done +qed end + +lemma open_vector_box: "\i. open (S i) \ open {x. \i. x $ i \ S i}" +unfolding open_vector_def by auto + +lemma open_vimage_Cart_nth: "open S \ open ((\x. x $ i) -` S)" +unfolding open_vector_def +apply clarify +apply (rule_tac x="\k. if k = i then S else UNIV" in exI, simp) +done + +lemma closed_vimage_Cart_nth: "closed S \ closed ((\x. x $ i) -` S)" +unfolding closed_open vimage_Compl [symmetric] +by (rule open_vimage_Cart_nth) + +lemma closed_vector_box: "\i. closed (S i) \ closed {x. \i. x $ i \ S i}" +proof - + have "{x. \i. x $ i \ S i} = (\i. (\x. x $ i) -` S i)" by auto + thus "\i. closed (S i) \ closed {x. \i. x $ i \ S i}" + by (simp add: closed_INT closed_vimage_Cart_nth) +qed + +lemma tendsto_Cart_nth [tendsto_intros]: + assumes "((\x. f x) ---> a) net" + shows "((\x. f x $ i) ---> a $ i) net" +proof (rule topological_tendstoI) + fix S assume "open S" "a $ i \ S" + then have "open ((\y. y $ i) -` S)" "a \ ((\y. y $ i) -` S)" + by (simp_all add: open_vimage_Cart_nth) + with assms have "eventually (\x. f x \ (\y. y $ i) -` S) net" + by (rule topological_tendstoD) + then show "eventually (\x. f x $ i \ S) net" + by simp +qed + +lemma eventually_Ball_finite: (* TODO: move *) + assumes "finite A" and "\y\A. eventually (\x. P x y) net" + shows "eventually (\x. \y\A. P x y) net" +using assms by (induct set: finite, simp, simp add: eventually_conj) + +lemma eventually_all_finite: (* TODO: move *) + fixes P :: "'a \ 'b::finite \ bool" + assumes "\y. eventually (\x. P x y) net" + shows "eventually (\x. \y. P x y) net" +using eventually_Ball_finite [of UNIV P] assms by simp + +lemma tendsto_vector: + assumes "\i. ((\x. f x $ i) ---> a $ i) net" + shows "((\x. f x) ---> a) net" +proof (rule topological_tendstoI) + fix S assume "open S" and "a \ S" + then obtain A where A: "\i. open (A i)" "\i. a $ i \ A i" + and S: "\y. \i. y $ i \ A i \ y \ S" + unfolding open_vector_def by metis + have "\i. eventually (\x. f x $ i \ A i) net" + using assms A by (rule topological_tendstoD) + hence "eventually (\x. \i. f x $ i \ A i) net" + by (rule eventually_all_finite) + thus "eventually (\x. f x \ S) net" + by (rule eventually_elim1, simp add: S) +qed + +lemma tendsto_Cart_lambda [tendsto_intros]: + assumes "\i. ((\x. f x i) ---> a i) net" + shows "((\x. \ i. f x i) ---> (\ i. a i)) net" +using assms by (simp add: tendsto_vector) + + +subsection {* Metric *} + +(* TODO: move somewhere else *) +lemma finite_choice: "finite A \ \x\A. \y. P x y \ \f. \x\A. P x (f x)" +apply (induct set: finite, simp_all) +apply (clarify, rename_tac y) +apply (rule_tac x="f(x:=y)" in exI, simp) +done + +instantiation cart :: (metric_space, finite) metric_space +begin + +definition dist_vector_def: + "dist x y = setL2 (\i. dist (x$i) (y$i)) UNIV" + +lemma dist_nth_le: "dist (x $ i) (y $ i) \ dist x y" +unfolding dist_vector_def +by (rule member_le_setL2) simp_all + +instance proof + fix x y :: "'a ^ 'b" + show "dist x y = 0 \ x = y" + unfolding dist_vector_def + by (simp add: setL2_eq_0_iff Cart_eq) +next + fix x y z :: "'a ^ 'b" + show "dist x y \ dist x z + dist y z" + unfolding dist_vector_def + apply (rule order_trans [OF _ setL2_triangle_ineq]) + apply (simp add: setL2_mono dist_triangle2) + done +next + (* FIXME: long proof! *) + fix S :: "('a ^ 'b) set" + show "open S \ (\x\S. \e>0. \y. dist y x < e \ y \ S)" + unfolding open_vector_def open_dist + apply safe + apply (drule (1) bspec) + apply clarify + apply (subgoal_tac "\e>0. \i y. dist y (x$i) < e \ y \ A i") + apply clarify + apply (rule_tac x=e in exI, clarify) + apply (drule spec, erule mp, clarify) + apply (drule spec, drule spec, erule mp) + apply (erule le_less_trans [OF dist_nth_le]) + apply (subgoal_tac "\i\UNIV. \e>0. \y. dist y (x$i) < e \ y \ A i") + apply (drule finite_choice [OF finite], clarify) + apply (rule_tac x="Min (range f)" in exI, simp) + apply clarify + apply (drule_tac x=i in spec, clarify) + apply (erule (1) bspec) + apply (drule (1) bspec, clarify) + apply (subgoal_tac "\r. (\i::'b. 0 < r i) \ e = setL2 r UNIV") + apply clarify + apply (rule_tac x="\i. {y. dist y (x$i) < r i}" in exI) + apply (rule conjI) + apply clarify + apply (rule conjI) + apply (clarify, rename_tac y) + apply (rule_tac x="r i - dist y (x$i)" in exI, rule conjI, simp) + apply clarify + apply (simp only: less_diff_eq) + apply (erule le_less_trans [OF dist_triangle]) + apply simp + apply clarify + apply (drule spec, erule mp) + apply (simp add: dist_vector_def setL2_strict_mono) + apply (rule_tac x="\i. e / sqrt (of_nat CARD('b))" in exI) + apply (simp add: divide_pos_pos setL2_constant) + done +qed + +end + +lemma LIMSEQ_Cart_nth: + "(X ----> a) \ (\n. X n $ i) ----> a $ i" +unfolding LIMSEQ_conv_tendsto by (rule tendsto_Cart_nth) + +lemma LIM_Cart_nth: + "(f -- x --> y) \ (\x. f x $ i) -- x --> y $ i" +unfolding LIM_conv_tendsto by (rule tendsto_Cart_nth) + +lemma Cauchy_Cart_nth: + "Cauchy (\n. X n) \ Cauchy (\n. X n $ i)" +unfolding Cauchy_def by (fast intro: le_less_trans [OF dist_nth_le]) + +lemma LIMSEQ_vector: + assumes "\i. (\n. X n $ i) ----> (a $ i)" + shows "X ----> a" +using assms unfolding LIMSEQ_conv_tendsto by (rule tendsto_vector) + +lemma Cauchy_vector: + fixes X :: "nat \ 'a::metric_space ^ 'n" + assumes X: "\i. Cauchy (\n. X n $ i)" + shows "Cauchy (\n. X n)" +proof (rule metric_CauchyI) + fix r :: real assume "0 < r" + then have "0 < r / of_nat CARD('n)" (is "0 < ?s") + by (simp add: divide_pos_pos) + def N \ "\i. LEAST N. \m\N. \n\N. dist (X m $ i) (X n $ i) < ?s" + def M \ "Max (range N)" + have "\i. \N. \m\N. \n\N. dist (X m $ i) (X n $ i) < ?s" + using X `0 < ?s` by (rule metric_CauchyD) + hence "\i. \m\N i. \n\N i. dist (X m $ i) (X n $ i) < ?s" + unfolding N_def by (rule LeastI_ex) + hence M: "\i. \m\M. \n\M. dist (X m $ i) (X n $ i) < ?s" + unfolding M_def by simp + { + fix m n :: nat + assume "M \ m" "M \ n" + have "dist (X m) (X n) = setL2 (\i. dist (X m $ i) (X n $ i)) UNIV" + unfolding dist_vector_def .. + also have "\ \ setsum (\i. dist (X m $ i) (X n $ i)) UNIV" + by (rule setL2_le_setsum [OF zero_le_dist]) + also have "\ < setsum (\i::'n. ?s) UNIV" + by (rule setsum_strict_mono, simp_all add: M `M \ m` `M \ n`) + also have "\ = r" + by simp + finally have "dist (X m) (X n) < r" . + } + hence "\m\M. \n\M. dist (X m) (X n) < r" + by simp + then show "\M. \m\M. \n\M. dist (X m) (X n) < r" .. +qed + +instance cart :: (complete_space, finite) complete_space +proof + fix X :: "nat \ 'a ^ 'b" assume "Cauchy X" + have "\i. (\n. X n $ i) ----> lim (\n. X n $ i)" + using Cauchy_Cart_nth [OF `Cauchy X`] + by (simp add: Cauchy_convergent_iff convergent_LIMSEQ_iff) + hence "X ----> Cart_lambda (\i. lim (\n. X n $ i))" + by (simp add: LIMSEQ_vector) + then show "convergent X" + by (rule convergentI) +qed + + +subsection {* Normed vector space *} + +instantiation cart :: (real_normed_vector, finite) real_normed_vector +begin + +definition norm_vector_def: + "norm x = setL2 (\i. norm (x$i)) UNIV" + +definition vector_sgn_def: + "sgn (x::'a^'b) = scaleR (inverse (norm x)) x" + +instance proof + fix a :: real and x y :: "'a ^ 'b" + show "0 \ norm x" + unfolding norm_vector_def + by (rule setL2_nonneg) + show "norm x = 0 \ x = 0" + unfolding norm_vector_def + by (simp add: setL2_eq_0_iff Cart_eq) + show "norm (x + y) \ norm x + norm y" + unfolding norm_vector_def + apply (rule order_trans [OF _ setL2_triangle_ineq]) + apply (simp add: setL2_mono norm_triangle_ineq) + done + show "norm (scaleR a x) = \a\ * norm x" + unfolding norm_vector_def + by (simp add: setL2_right_distrib) + show "sgn x = scaleR (inverse (norm x)) x" + by (rule vector_sgn_def) + show "dist x y = norm (x - y)" + unfolding dist_vector_def norm_vector_def + by (simp add: dist_norm) +qed + +end + +lemma norm_nth_le: "norm (x $ i) \ norm x" +unfolding norm_vector_def +by (rule member_le_setL2) simp_all + +interpretation Cart_nth: bounded_linear "\x. x $ i" +apply default +apply (rule vector_add_component) +apply (rule vector_scaleR_component) +apply (rule_tac x="1" in exI, simp add: norm_nth_le) +done + +instance cart :: (banach, finite) banach .. + + +subsection {* Inner product space *} + +instantiation cart :: (real_inner, finite) real_inner +begin + +definition inner_vector_def: + "inner x y = setsum (\i. inner (x$i) (y$i)) UNIV" + +instance proof + fix r :: real and x y z :: "'a ^ 'b" + show "inner x y = inner y x" + unfolding inner_vector_def + by (simp add: inner_commute) + show "inner (x + y) z = inner x z + inner y z" + unfolding inner_vector_def + by (simp add: inner_add_left setsum_addf) + show "inner (scaleR r x) y = r * inner x y" + unfolding inner_vector_def + by (simp add: setsum_right_distrib) + show "0 \ inner x x" + unfolding inner_vector_def + by (simp add: setsum_nonneg) + show "inner x x = 0 \ x = 0" + unfolding inner_vector_def + by (simp add: Cart_eq setsum_nonneg_eq_0_iff) + show "norm x = sqrt (inner x x)" + unfolding inner_vector_def norm_vector_def setL2_def + by (simp add: power2_norm_eq_inner) +qed + +end + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Integration.thy --- a/src/HOL/Multivariate_Analysis/Integration.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Integration.thy Tue May 04 20:30:22 2010 +0200 @@ -137,27 +137,27 @@ hence "\x. ball x (e/2) \ s \ (\f)" proof(erule_tac disjE) let ?z = "x - (e/2) *\<^sub>R basis k" assume as:"x$k = a$k" have "ball ?z (e / 2) \ i = {}" apply(rule ccontr) unfolding ex_in_conv[THEN sym] proof(erule exE) fix y assume "y \ ball ?z (e / 2) \ i" hence "dist ?z y < e/2" and yi:"y\i" by auto - hence "\(?z - y) $ k\ < e/2" using component_le_norm[of "?z - y" k] unfolding vector_dist_norm by auto + hence "\(?z - y) $ k\ < e/2" using component_le_norm[of "?z - y" k] unfolding dist_norm by auto hence "y$k < a$k" unfolding vector_component_simps vector_scaleR_component as using e[THEN conjunct1] by(auto simp add:field_simps) hence "y \ i" unfolding ab mem_interval not_all by(rule_tac x=k in exI,auto) thus False using yi by auto qed moreover have "ball ?z (e/2) \ s \ (\insert i f)" apply(rule order_trans[OF _ e[THEN conjunct2, unfolded lem1]]) proof fix y assume as:"y\ ball ?z (e/2)" have "norm (x - y) \ \e\ / 2 + norm (x - y - (e / 2) *\<^sub>R basis k)" apply-apply(rule order_trans,rule norm_triangle_sub[of "x - y" "(e/2) *\<^sub>R basis k"]) unfolding norm_scaleR norm_basis by auto - also have "\ < \e\ / 2 + \e\ / 2" apply(rule add_strict_left_mono) using as unfolding mem_ball vector_dist_norm using e by(auto simp add:field_simps) - finally show "y\ball x e" unfolding mem_ball vector_dist_norm using e by(auto simp add:field_simps) qed + also have "\ < \e\ / 2 + \e\ / 2" apply(rule add_strict_left_mono) using as unfolding mem_ball dist_norm using e by(auto simp add:field_simps) + finally show "y\ball x e" unfolding mem_ball dist_norm using e by(auto simp add:field_simps) qed ultimately show ?thesis apply(rule_tac x="?z" in exI) unfolding Union_insert by auto next let ?z = "x + (e/2) *\<^sub>R basis k" assume as:"x$k = b$k" have "ball ?z (e / 2) \ i = {}" apply(rule ccontr) unfolding ex_in_conv[THEN sym] proof(erule exE) fix y assume "y \ ball ?z (e / 2) \ i" hence "dist ?z y < e/2" and yi:"y\i" by auto - hence "\(?z - y) $ k\ < e/2" using component_le_norm[of "?z - y" k] unfolding vector_dist_norm by auto + hence "\(?z - y) $ k\ < e/2" using component_le_norm[of "?z - y" k] unfolding dist_norm by auto hence "y$k > b$k" unfolding vector_component_simps vector_scaleR_component as using e[THEN conjunct1] by(auto simp add:field_simps) hence "y \ i" unfolding ab mem_interval not_all by(rule_tac x=k in exI,auto) thus False using yi by auto qed moreover have "ball ?z (e/2) \ s \ (\insert i f)" apply(rule order_trans[OF _ e[THEN conjunct2, unfolded lem1]]) proof fix y assume as:"y\ ball ?z (e/2)" have "norm (x - y) \ \e\ / 2 + norm (x - y + (e / 2) *\<^sub>R basis k)" apply-apply(rule order_trans,rule norm_triangle_sub[of "x - y" "- (e/2) *\<^sub>R basis k"]) unfolding norm_scaleR norm_basis by auto - also have "\ < \e\ / 2 + \e\ / 2" apply(rule add_strict_left_mono) using as unfolding mem_ball vector_dist_norm using e by(auto simp add:field_simps) - finally show "y\ball x e" unfolding mem_ball vector_dist_norm using e by(auto simp add:field_simps) qed + also have "\ < \e\ / 2 + \e\ / 2" apply(rule add_strict_left_mono) using as unfolding mem_ball dist_norm using e by(auto simp add:field_simps) + finally show "y\ball x e" unfolding mem_ball dist_norm using e by(auto simp add:field_simps) qed ultimately show ?thesis apply(rule_tac x="?z" in exI) unfolding Union_insert by auto qed then guess x .. hence "x \ s \ interior (\f)" unfolding lem1[where U="\f",THEN sym] using centre_in_ball e[THEN conjunct1] by auto thus ?thesis apply-apply(rule lem2,rule insert(3)) using insert(4) by auto qed qed qed qed note * = this @@ -933,7 +933,7 @@ lemma has_integral_vec1: assumes "(f has_integral k) {a..b}" shows "((\x. vec1 (f x)) has_integral (vec1 k)) {a..b}" proof- have *:"\p. (\(x, k)\p. content k *\<^sub>R vec1 (f x)) - vec1 k = vec1 ((\(x, k)\p. content k *\<^sub>R f x) - k)" - unfolding vec_sub Cart_eq by(auto simp add:vec1_dest_vec1_simps split_beta) + unfolding vec_sub Cart_eq by(auto simp add: split_beta) show ?thesis using assms unfolding has_integral apply safe apply(erule_tac x=e in allE,safe) apply(rule_tac x=d in exI,safe) apply(erule_tac x=p in allE,safe) unfolding * norm_vector_1 by auto qed @@ -1067,7 +1067,7 @@ proof- case goal1 guess n using real_arch_pow2[of "(setsum (\i. b$i - a$i) UNIV) / e"] .. note n=this show ?case apply(rule_tac x=n in exI) proof(rule,rule) fix x y assume xy:"x\{A n..B n}" "y\{A n..B n}" - have "dist x y \ setsum (\i. abs((x - y)$i)) UNIV" unfolding vector_dist_norm by(rule norm_le_l1) + have "dist x y \ setsum (\i. abs((x - y)$i)) UNIV" unfolding dist_norm by(rule norm_le_l1) also have "\ \ setsum (\i. B n$i - A n$i) UNIV" proof(rule setsum_mono) fix i show "\(x - y) $ i\ \ B n $ i - A n $ i" using xy[unfolded mem_interval,THEN spec[where x=i]] @@ -1131,7 +1131,7 @@ guess d2 by(rule has_integralD[OF goal1(2) e]) note d2=this guess p by(rule fine_division_exists[OF gauge_inter[OF d1(1) d2(1)],of a b]) note p=this let ?c = "(\(x, k)\p. content k *\<^sub>R f x)" have "norm (k1 - k2) \ norm (?c - k2) + norm (?c - k1)" - using norm_triangle_ineq4[of "k1 - ?c" "k2 - ?c"] by(auto simp add:group_simps norm_minus_commute) + using norm_triangle_ineq4[of "k1 - ?c" "k2 - ?c"] by(auto simp add:algebra_simps norm_minus_commute) also have "\ < norm (k1 - k2) / 2 + norm (k1 - k2) / 2" apply(rule add_strict_mono) apply(rule_tac[!] d2(2) d1(2)) using p unfolding fine_def by auto finally show False by auto @@ -1244,7 +1244,7 @@ unfolding scaleR_right_distrib setsum_addf[of "\(x,k). content k *\<^sub>R f x" "\(x,k). content k *\<^sub>R g x" p,THEN sym] by(rule setsum_cong2,auto) have "norm ((\(x, k)\p. content k *\<^sub>R (f x + g x)) - (k + l)) = norm (((\(x, k)\p. content k *\<^sub>R f x) - k) + ((\(x, k)\p. content k *\<^sub>R g x) - l))" - unfolding * by(auto simp add:group_simps) also let ?res = "\" + unfolding * by(auto simp add:algebra_simps) also let ?res = "\" from as have *:"d1 fine p" "d2 fine p" unfolding fine_inter by auto have "?res < e/2 + e/2" apply(rule le_less_trans[OF norm_triangle_ineq]) apply(rule add_strict_mono) using d1(2)[OF as(1) *(1)] and d2(2)[OF as(1) *(2)] by auto @@ -1268,7 +1268,7 @@ lemma has_integral_sub: shows "(f has_integral k) s \ (g has_integral l) s \ ((\x. f(x) - g(x)) has_integral (k - l)) s" - using has_integral_add[OF _ has_integral_neg,of f k s g l] unfolding group_simps by auto + using has_integral_add[OF _ has_integral_neg,of f k s g l] unfolding algebra_simps by auto lemma integral_0: "integral s (\x::real^'n. 0::real^'m) = 0" by(rule integral_unique has_integral_0)+ @@ -1356,7 +1356,7 @@ lemma has_integral_eq_eq: shows "\x\s. f x = g x \ ((f has_integral k) s \ (g has_integral k) s)" - using has_integral_eq[of s f g] has_integral_eq[of s g f] by auto + using has_integral_eq[of s f g] has_integral_eq[of s g f] by rule auto lemma has_integral_null[dest]: assumes "content({a..b}) = 0" shows "(f has_integral 0) ({a..b})" @@ -1417,7 +1417,7 @@ show ?case apply(rule_tac x=d in exI,rule,rule d) apply(rule,rule,rule,(erule conjE)+) proof- fix p1 p2 assume as:"p1 tagged_division_of {a..b}" "d fine p1" "p2 tagged_division_of {a..b}" "d fine p2" show "norm ((\(x, k)\p1. content k *\<^sub>R f x) - (\(x, k)\p2. content k *\<^sub>R f x)) < e" - apply(rule dist_triangle_half_l[where y=y,unfolded vector_dist_norm]) + apply(rule dist_triangle_half_l[where y=y,unfolded dist_norm]) using d(2)[OF conjI[OF as(1-2)]] d(2)[OF conjI[OF as(3-4)]] . qed qed next assume "\e>0. \d. ?P e d" hence "\n::nat. \d. ?P (inverse(real (n + 1))) d" by auto @@ -1447,7 +1447,7 @@ have *:"inverse (real (N1 + N2 + 1)) < e / 2" apply(rule less_trans) using N1 by auto show "norm ((\(x, k)\q. content k *\<^sub>R f x) - y) < e" apply(rule norm_triangle_half_r) apply(rule less_trans[OF _ *]) apply(subst N1', rule d(2)[of "p (N1+N2)"]) defer - using N2[rule_format,unfolded vector_dist_norm,of "N1+N2"] + using N2[rule_format,unfolded dist_norm,of "N1+N2"] using as dp[of "N1 - 1 + 1 + N2" "N1 + N2"] using p(1)[of "N1 + N2"] using N1 by auto qed qed qed subsection {* Additivity of integral on abutting intervals. *} @@ -1554,7 +1554,7 @@ using p(2)[unfolded fine_def,rule_format,OF as,unfolded split_conv] by auto hence "\y. y \ ball x \x $ k - c\ \ {x. x $ k \ c}" using goal1(1) by blast then guess y .. hence "\x $ k - y $ k\ < \x $ k - c\" "y$k \ c" apply-apply(rule le_less_trans) - using component_le_norm[of "x - y" k,unfolded vector_minus_component] by(auto simp add:vector_dist_norm) + using component_le_norm[of "x - y" k,unfolded vector_minus_component] by(auto simp add:dist_norm) thus False using goal1(2)[unfolded not_le] by(auto simp add:field_simps) qed show "~(kk \ {x. x$k \ c} = {}) \ x$k \ c" @@ -1563,7 +1563,7 @@ using p(2)[unfolded fine_def,rule_format,OF as,unfolded split_conv] by auto hence "\y. y \ ball x \x $ k - c\ \ {x. x $ k \ c}" using goal1(1) by blast then guess y .. hence "\x $ k - y $ k\ < \x $ k - c\" "y$k \ c" apply-apply(rule le_less_trans) - using component_le_norm[of "x - y" k,unfolded vector_minus_component] by(auto simp add:vector_dist_norm) + using component_le_norm[of "x - y" k,unfolded vector_minus_component] by(auto simp add:dist_norm) thus False using goal1(2)[unfolded not_le] by(auto simp add:field_simps) qed qed @@ -1653,7 +1653,7 @@ proof- have *:"{a..b} = ({a..b} \ {x. x$k \ c}) \ ({a..b} \ {x. x$k \ c})" by auto show ?thesis apply(subst *) apply(rule tagged_division_union[OF assms]) unfolding interval_split interior_closed_interval - by(auto simp add: vector_less_def Cart_lambda_beta elim!:allE[where x=k]) qed + by(auto simp add: vector_less_def elim!:allE[where x=k]) qed lemma has_integral_separate_sides: fixes f::"real^'m \ 'a::real_normed_vector" assumes "(f has_integral i) ({a..b})" "e>0" @@ -1676,7 +1676,7 @@ then guess e unfolding mem_interior .. note e=this have x:"x$k = c" using x interior_subset by fastsimp have *:"\i. \(x - (x + (\ i. if i = k then e / 2 else 0))) $ i\ = (if i = k then e/2 else 0)" using e by auto - have "x + (\ i. if i = k then e/2 else 0) \ ball x e" unfolding mem_ball vector_dist_norm + have "x + (\ i. if i = k then e/2 else 0) \ ball x e" unfolding mem_ball dist_norm apply(rule le_less_trans[OF norm_le_l1]) unfolding * unfolding setsum_delta[OF finite_UNIV] using e by auto hence "x + (\ i. if i = k then e/2 else 0) \ {x. x$k = c}" using e by auto @@ -1703,7 +1703,7 @@ proof- guess p using fine_division_exists[OF d(1), of a' b] . note p=this show ?thesis using norm_triangle_half_l[OF d(2)[of p1 p] d(2)[of p2 p]] using as unfolding interval_split b'_def[symmetric] a'_def[symmetric] - using p using assms by(auto simp add:group_simps) + using p using assms by(auto simp add:algebra_simps) qed qed show "?P {x. x $ k \ c}" apply(rule_tac x=d in exI) apply(rule,rule d) apply(rule,rule,rule) proof- fix p1 p2 assume as:"p1 tagged_division_of {a..b} \ {x. x $ k \ c} \ d fine p1 \ p2 tagged_division_of {a..b} \ {x. x $ k \ c} \ d fine p2" @@ -1711,7 +1711,7 @@ proof- guess p using fine_division_exists[OF d(1), of a b'] . note p=this show ?thesis using norm_triangle_half_l[OF d(2)[of p p1] d(2)[of p p2]] using as unfolding interval_split b'_def[symmetric] a'_def[symmetric] - using p using assms by(auto simp add:group_simps) qed qed qed qed + using p using assms by(auto simp add:algebra_simps) qed qed qed qed subsection {* Generalized notion of additivity. *} @@ -1743,11 +1743,11 @@ subsection {* Using additivity of lifted function to encode definedness. *} lemma forall_option: "(\x. P x) \ P None \ (\x. P(Some x))" - by (metis map_of.simps option.nchotomy) + by (metis option.nchotomy) lemma exists_option: "(\x. P x) \ P None \ (\x. P(Some x))" - by (metis map_of.simps option.nchotomy) + by (metis option.nchotomy) fun lifted where "lifted (opp::'a\'a\'b) (Some x) (Some y) = Some(opp x y)" | @@ -1842,13 +1842,12 @@ lemma operative_content[intro]: "operative (op +) content" unfolding operative_def content_split[THEN sym] neutral_add by auto -lemma neutral_monoid[simp]: "neutral ((op +)::('a::comm_monoid_add) \ 'a \ 'a) = 0" - unfolding neutral_def apply(rule some_equality) defer - apply(erule_tac x=0 in allE) by auto +lemma neutral_monoid: "neutral ((op +)::('a::comm_monoid_add) \ 'a \ 'a) = 0" + by (rule neutral_add) (* FIXME: duplicate *) lemma monoidal_monoid[intro]: shows "monoidal ((op +)::('a::comm_monoid_add) \ 'a \ 'a)" - unfolding monoidal_def neutral_monoid by(auto simp add: group_simps) + unfolding monoidal_def neutral_monoid by(auto simp add: algebra_simps) lemma operative_integral: fixes f::"real^'n \ 'a::banach" shows "operative (lifted(op +)) (\i. if f integrable_on i then Some(integral i f) else None)" @@ -1941,7 +1940,7 @@ apply(rule_tac x="(k,(interval_lowerbound l)$k)" in exI) defer apply(rule_tac x="(k,(interval_upperbound l)$k)" in exI) unfolding division_points_def unfolding interval_bounds[OF ab] - apply (auto simp add:interval_bounds) unfolding * by auto + apply auto unfolding * by auto thus "?D1 \ ?D" apply-apply(rule,rule division_points_subset[OF assms(1-4)]) by auto have *:"interval_lowerbound ({a..b} \ {x. x $ k \ interval_lowerbound l $ k}) $ k = interval_lowerbound l $ k" @@ -1952,7 +1951,7 @@ apply(rule_tac x="(k,(interval_lowerbound l)$k)" in exI) defer apply(rule_tac x="(k,(interval_upperbound l)$k)" in exI) unfolding division_points_def unfolding interval_bounds[OF ab] - apply (auto simp add:interval_bounds) unfolding * by auto + apply auto unfolding * by auto thus "?D2 \ ?D" apply-apply(rule,rule division_points_subset[OF assms(1-4)]) by auto qed subsection {* Preservation by divisions and tagged divisions. *} @@ -2254,7 +2253,7 @@ assumes "p tagged_division_of {a..b}" "\x\{a..b}. (f x)$i \ (g x)$i" shows "(setsum (\(x,k). content k *\<^sub>R f x) p)$i \ (setsum (\(x,k). content k *\<^sub>R g x) p)$i" unfolding setsum_component apply(rule setsum_mono) - apply(rule mp) defer apply assumption apply(induct_tac x,rule) unfolding split_conv + apply(rule mp) defer apply assumption unfolding split_paired_all apply rule unfolding split_conv proof- fix a b assume ab:"(a,b) \ p" note assm = tagged_division_ofD(2-4)[OF assms(1) ab] from this(3) guess u v apply-by(erule exE)+ note b=this show "(content b *\<^sub>R f a) $ i \ (content b *\<^sub>R g a) $ i" unfolding b @@ -2381,11 +2380,11 @@ have lem2:"\s1 s2 i1 i2. norm(s2 - s1) \ e/2 \ norm(s1 - i1) < e / 4 \ norm(s2 - i2) < e / 4 \norm(i1 - i2) < e" proof- case goal1 have "norm (i1 - i2) \ norm (i1 - s1) + norm (s1 - s2) + norm (s2 - i2)" using norm_triangle_ineq[of "i1 - s1" "s1 - i2"] - using norm_triangle_ineq[of "s1 - s2" "s2 - i2"] by(auto simp add:group_simps) - also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:group_simps) + using norm_triangle_ineq[of "s1 - s2" "s2 - i2"] by(auto simp add:algebra_simps) + also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:algebra_simps) finally show ?case . qed - show ?case unfolding vector_dist_norm apply(rule lem2) defer + show ?case unfolding dist_norm apply(rule lem2) defer apply(rule gm(2)[OF conjI[OF p(1)]],rule_tac[2] gn(2)[OF conjI[OF p(1)]]) using conjunctD2[OF p(2)[unfolded fine_inter]] apply- apply assumption+ apply(rule order_trans) apply(rule rsum_diff_bound[OF p(1), where e="2 / real M"]) @@ -2399,7 +2398,7 @@ also have "\ = 2 / real M" unfolding real_divide_def by auto finally show "norm (g n x - g m x) \ 2 / real M" using norm_triangle_le[of "g n x - f x" "f x - g m x" "2 / real M"] - by(auto simp add:group_simps simp add:norm_minus_commute) + by(auto simp add:algebra_simps simp add:norm_minus_commute) qed qed qed from this[unfolded convergent_eq_cauchy[THEN sym]] guess s .. note s=this @@ -2413,8 +2412,8 @@ have lem:"\sf sg i. norm(sf - sg) \ e / 3 \ norm(i - s) < e / 3 \ norm(sg - i) < e / 3 \ norm(sf - s) < e" proof- case goal1 have "norm (sf - s) \ norm (sf - sg) + norm (sg - i) + norm (i - s)" using norm_triangle_ineq[of "sf - sg" "sg - s"] - using norm_triangle_ineq[of "sg - i" " i - s"] by(auto simp add:group_simps) - also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:group_simps) + using norm_triangle_ineq[of "sg - i" " i - s"] by(auto simp add:algebra_simps) + also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:algebra_simps) finally show ?case . qed show ?case apply(rule_tac x=g' in exI) apply(rule,rule g') @@ -2427,7 +2426,7 @@ apply-apply(rule less_le_trans,assumption) using `e>0` by auto thus "inverse (real (N1 + N2) + 1) * content {a..b} \ e / 3" unfolding inverse_eq_divide by(auto simp add:field_simps) - show "norm (i (N1 + N2) - s) < e / 3" by(rule N1[rule_format,unfolded vector_dist_norm],auto) + show "norm (i (N1 + N2) - s) < e / 3" by(rule N1[rule_format,unfolded dist_norm],auto) qed qed qed qed subsection {* Negligible sets. *} @@ -2511,7 +2510,7 @@ show "content l = content (l \ {x. \x $ k - c\ \ d})" apply(rule arg_cong[where f=content]) apply(rule set_ext,rule,rule) unfolding mem_Collect_eq proof- fix y assume y:"y\l" note p[THEN conjunct2,unfolded fine_def,rule_format,OF as(1),unfolded split_conv] - note this[unfolded subset_eq mem_ball vector_dist_norm,rule_format,OF y] note le_less_trans[OF component_le_norm[of _ k] this] + note this[unfolded subset_eq mem_ball dist_norm,rule_format,OF y] note le_less_trans[OF component_le_norm[of _ k] this] thus "\y $ k - c\ \ d" unfolding Cart_nth.diff xk by auto qed auto qed note p'= tagged_division_ofD[OF p[THEN conjunct1]] and p''=division_of_tagged_division[OF p[THEN conjunct1]] @@ -2838,7 +2837,7 @@ proof safe show "(\y. f x) integrable_on l" unfolding integrable_on_def l by(rule,rule has_integral_const) fix y assume y:"y\l" note fineD[OF p(2) as,unfolded subset_eq,rule_format,OF this] note d(2)[OF _ _ this[unfolded mem_ball]] - thus "norm (f y - f x) \ e" using y p'(2-3)[OF as] unfolding vector_dist_norm l norm_minus_commute by fastsimp qed qed + thus "norm (f y - f x) \ e" using y p'(2-3)[OF as] unfolding dist_norm l norm_minus_commute by fastsimp qed qed from e have "0 \ e" by auto from approximable_on_division[OF this division_of_tagged_division[OF p(1)] *] guess g . thus "\g. (\x\{a..b}. norm (f x - g x) \ e) \ g integrable_on {a..b}" by auto qed @@ -2903,7 +2902,7 @@ shows "setsum (\(x,k). f(interval_upperbound k) - f(interval_lowerbound k)) p = f b - f a" proof- let ?f = "(\k::(real^1) set. if k = {} then 0 else f(interval_upperbound k) - f(interval_lowerbound k))" have *:"operative op + ?f" unfolding operative_1_lt[OF monoidal_monoid] interval_eq_empty_1 - by(auto simp add:not_less interval_bound_1 vector_less_def) + by(auto simp add:not_less vector_less_def) have **:"{a..b} \ {}" using assms(1) by auto note operative_tagged_division[OF monoidal_monoid * assms(2)] note * = this[unfolded if_not_P[OF **] interval_bound_1[OF assms(1)],THEN sym ] show ?thesis unfolding * apply(subst setsum_iterate[THEN sym]) defer @@ -2956,14 +2955,14 @@ have ball:"\xa\k. xa \ ball x (d (dest_vec1 x))" using as(2)[unfolded fine_def,rule_format,OF `(x,k)\p`,unfolded split_conv subset_eq] . have "norm ((v$1 - u$1) *\<^sub>R f' x - (f v - f u)) \ norm (f u - f x - (u$1 - x$1) *\<^sub>R f' x) + norm (f v - f x - (v$1 - x$1) *\<^sub>R f' x)" apply(rule order_trans[OF _ norm_triangle_ineq4]) apply(rule eq_refl) apply(rule arg_cong[where f=norm]) - unfolding scaleR.diff_left by(auto simp add:group_simps) + unfolding scaleR.diff_left by(auto simp add:algebra_simps) also have "... \ e * norm (dest_vec1 u - dest_vec1 x) + e * norm (dest_vec1 v - dest_vec1 x)" apply(rule add_mono) apply(rule d(2)[of "x$1" "u$1",unfolded o_def vec1_dest_vec1]) prefer 4 apply(rule d(2)[of "x$1" "v$1",unfolded o_def vec1_dest_vec1]) using ball[rule_format,of u] ball[rule_format,of v] - using xk(1-2) unfolding k subset_eq by(auto simp add:vector_dist_norm norm_real) + using xk(1-2) unfolding k subset_eq by(auto simp add:dist_norm norm_real) also have "... \ e * dest_vec1 (interval_upperbound k - interval_lowerbound k)" - unfolding k interval_bound_1[OF *] using xk(1) unfolding k by(auto simp add:vector_dist_norm norm_real field_simps) + unfolding k interval_bound_1[OF *] using xk(1) unfolding k by(auto simp add:dist_norm norm_real field_simps) finally show "norm (content k *\<^sub>R f' x - (f (interval_upperbound k) - f (interval_lowerbound k))) \ e * dest_vec1 (interval_upperbound k - interval_lowerbound k)" unfolding k interval_bound_1[OF *] content_1[OF *] . qed(insert as, auto) qed qed @@ -3012,7 +3011,7 @@ proof- show "\(y - x) $ i\ < e" unfolding y_def Cart_lambda_beta vector_minus_component if_P[OF refl] apply(cases) apply(subst if_P,assumption) unfolding if_not_P unfolding i xi using di as(2) by auto show "(\i\UNIV - {i}. \(y - x) $ i\) \ (\i\UNIV. 0)" unfolding y_def by auto - qed auto thus "dist y x < e" unfolding vector_dist_norm by auto + qed auto thus "dist y x < e" unfolding dist_norm by auto have "y\k" unfolding k mem_interval apply rule apply(erule_tac x=i in allE) using xyi unfolding k i xi by auto moreover have "y \ \s" unfolding s mem_interval proof note simps = y_def Cart_lambda_beta if_not_P @@ -3098,7 +3097,7 @@ proof(rule,rule,rule d,safe) case goal1 show ?case proof(cases "y < x") case False have "f \ dest_vec1 integrable_on {vec1 a..vec1 y}" apply(rule integrable_subinterval,rule integrable_continuous) apply(rule continuous_on_o_dest_vec1 assms)+ unfolding not_less using assms(2) goal1 by auto - hence *:"?I a y - ?I a x = ?I x y" unfolding group_simps apply(subst eq_commute) apply(rule integral_combine) + hence *:"?I a y - ?I a x = ?I x y" unfolding algebra_simps apply(subst eq_commute) apply(rule integral_combine) using False unfolding not_less using assms(2) goal1 by auto have **:"norm (y - x) = content {vec1 x..vec1 y}" apply(subst content_1) using False unfolding not_less by auto show ?thesis unfolding ** apply(rule has_integral_bound[where f="(\u. f u - f x) o dest_vec1"]) unfolding * unfolding o_def @@ -3108,11 +3107,11 @@ have *:"y - x = norm(y - x)" using False by auto show "((\xa. f x) has_integral (y - x) *\<^sub>R f x) {vec1 x..vec1 y}" apply(subst *) unfolding ** by auto show "\xa\{vec1 x..vec1 y}. norm (f (dest_vec1 xa) - f x) \ e" apply safe apply(rule less_imp_le) - apply(rule d(2)[unfolded vector_dist_norm]) using assms(2) using goal1 by auto + apply(rule d(2)[unfolded dist_norm]) using assms(2) using goal1 by auto qed(insert e,auto) next case True have "f \ dest_vec1 integrable_on {vec1 a..vec1 x}" apply(rule integrable_subinterval,rule integrable_continuous) apply(rule continuous_on_o_dest_vec1 assms)+ unfolding not_less using assms(2) goal1 by auto - hence *:"?I a x - ?I a y = ?I y x" unfolding group_simps apply(subst eq_commute) apply(rule integral_combine) + hence *:"?I a x - ?I a y = ?I y x" unfolding algebra_simps apply(subst eq_commute) apply(rule integral_combine) using True using assms(2) goal1 by auto have **:"norm (y - x) = content {vec1 y..vec1 x}" apply(subst content_1) using True unfolding not_less by auto have ***:"\fy fx c::'a. fx - fy - (y - x) *\<^sub>R c = -(fy - fx - (x - y) *\<^sub>R c)" unfolding scaleR_left.diff by auto @@ -3125,7 +3124,7 @@ have *:"x - y = norm(y - x)" using True by auto show "((\xa. f x) has_integral (x - y) *\<^sub>R f x) {vec1 y..vec1 x}" apply(subst *) unfolding ** by auto show "\xa\{vec1 y..vec1 x}. norm (f (dest_vec1 xa) - f x) \ e" apply safe apply(rule less_imp_le) - apply(rule d(2)[unfolded vector_dist_norm]) using assms(2) using goal1 by auto + apply(rule d(2)[unfolded dist_norm]) using assms(2) using goal1 by auto qed(insert e,auto) qed qed qed lemma integral_has_vector_derivative': fixes f::"real^1 \ 'a::banach" @@ -3194,7 +3193,7 @@ apply(rule_tac X="g ` X" in UnionI) defer apply(rule_tac x="h x" in image_eqI) using X(2) assms(3)[rule_format,of x] by auto qed note ** = d(2)[OF this] have *:"inj_on (\(x, k). (g x, g ` k)) p" using inj(1) unfolding inj_on_def by fastsimp - have "(\(x, k)\(\(x, k). (g x, g ` k)) ` p. content k *\<^sub>R f x) - i = r *\<^sub>R (\(x, k)\p. content k *\<^sub>R f (g x)) - i" (is "?l = _") unfolding group_simps add_left_cancel + have "(\(x, k)\(\(x, k). (g x, g ` k)) ` p. content k *\<^sub>R f x) - i = r *\<^sub>R (\(x, k)\p. content k *\<^sub>R f (g x)) - i" (is "?l = _") unfolding algebra_simps add_left_cancel unfolding setsum_reindex[OF *] apply(subst scaleR_right.setsum) defer apply(rule setsum_cong2) unfolding o_def split_paired_all split_conv apply(drule p(4)) apply safe unfolding assms(7)[rule_format] using p by auto also have "... = r *\<^sub>R ((\(x, k)\p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i)" (is "_ = ?r") unfolding scaleR.diff_right scaleR.scaleR_left[THEN sym] @@ -3332,7 +3331,7 @@ lemma norm_triangle_le_sub: "norm x + norm y \ e \ norm (x - y) \ e" apply(subst(asm)(2) norm_minus_cancel[THEN sym]) - apply(drule norm_triangle_le) by(auto simp add:group_simps) + apply(drule norm_triangle_le) by(auto simp add:algebra_simps) lemma fundamental_theorem_of_calculus_interior: assumes"a \ b" "continuous_on {a..b} f" "\x\{a<.. ?thesis" show ?thesis proof(cases,rule *,assumption) assume "\ a < b" hence "a = b" using assms(1) by auto - hence *:"{vec a .. vec b} = {vec b}" "f b - f a = 0" apply(auto simp add: Cart_simps) by smt + hence *:"{vec a .. vec b} = {vec b}" "f b - f a = 0" by(auto simp add: Cart_eq vector_le_def order_antisym) show ?thesis unfolding *(2) apply(rule has_integral_null) unfolding content_eq_0_1 using * `a=b` by auto qed } assume ab:"a < b" let ?P = "\e. \d. gauge d \ (\p. p tagged_division_of {vec1 a..vec1 b} \ d fine p \ @@ -3376,7 +3375,7 @@ proof(rule add_mono) case goal1 have "\c - a\ \ \l\" using as' by auto thus ?case apply-apply(rule order_trans[OF _ l(2)]) unfolding norm_scaleR apply(rule mult_right_mono) by auto next case goal2 show ?case apply(rule less_imp_le) apply(cases "a = c") defer - apply(rule k(2)[unfolded vector_dist_norm]) using as' e ab by(auto simp add:field_simps) + apply(rule k(2)[unfolded dist_norm]) using as' e ab by(auto simp add:field_simps) qed finally show "norm (content {vec1 a..vec1 c} *\<^sub>R f' a - (f c - f a)) \ e * (b - a) / 4" unfolding content_1'[OF as(1)] by auto qed qed then guess da .. note da=conjunctD2[OF this,rule_format] @@ -3400,7 +3399,7 @@ proof(rule add_mono) case goal1 have "\c - b\ \ \l\" using as' by auto thus ?case apply-apply(rule order_trans[OF _ l(2)]) unfolding norm_scaleR apply(rule mult_right_mono) by auto next case goal2 show ?case apply(rule less_imp_le) apply(cases "b = c") defer apply(subst norm_minus_commute) - apply(rule k(2)[unfolded vector_dist_norm]) using as' e ab by(auto simp add:field_simps) + apply(rule k(2)[unfolded dist_norm]) using as' e ab by(auto simp add:field_simps) qed finally show "norm (content {vec1 c..vec1 b} *\<^sub>R f' b - (f b - f c)) \ e * (b - a) / 4" unfolding content_1'[OF as(1)] by auto qed qed then guess db .. note db=conjunctD2[OF this,rule_format] @@ -3422,7 +3421,7 @@ hence "\i. u$i \ v$i" and uv:"{u,v}\{u..v}" using p(2)[OF as(1)] by auto note this(1) this(1)[unfolded forall_1] note result = as(2)[unfolded k interval_bounds[OF this(1)] content_1[OF this(2)]] - assume as':"x \ vec1 a" "x \ vec1 b" hence "x$1 \ {a<.. vec1 a" "x \ vec1 b" hence "x$1 \ {a<..R f' (x$1) - (f (v$1) - f (u$1))) = norm ((f (u$1) - f (x$1) - (u$1 - x$1) *\<^sub>R f' (x$1)) - (f (v$1) - f (x$1) - (v$1 - x$1) *\<^sub>R f' (x$1)))" apply(rule arg_cong[of _ _ norm]) unfolding scaleR_left.diff by auto @@ -3641,17 +3640,17 @@ proof safe show "0 < ?d" using d(1) assms(3) unfolding Cart_simps by auto fix t::"_^1" assume as:"c \ t" "t$1 < c$1 + ?d" have *:"integral{a..c} f = integral{a..b} f - integral{c..b} f" - "integral{a..t} f = integral{a..b} f - integral{t..b} f" unfolding group_simps + "integral{a..t} f = integral{a..b} f - integral{t..b} f" unfolding algebra_simps apply(rule_tac[!] integral_combine) using assms as unfolding Cart_simps by auto have "(- c)$1 - d < (- t)$1 \ - t \ - c" using as by auto note d(2)[rule_format,OF this] thus "norm (integral {a..c} f - integral {a..t} f) < e" unfolding * - unfolding integral_reflect apply-apply(subst norm_minus_commute) by(auto simp add:group_simps) qed qed + unfolding integral_reflect apply-apply(subst norm_minus_commute) by(auto simp add:algebra_simps) qed qed declare dest_vec1_eq[simp del] not_less[simp] not_le[simp] lemma indefinite_integral_continuous: fixes f::"real^1 \ 'a::banach" assumes "f integrable_on {a..b}" shows "continuous_on {a..b} (\x. integral {a..x} f)" -proof(unfold continuous_on_def, safe) fix x e assume as:"x\{a..b}" "0<(e::real)" +proof(unfold continuous_on_iff, safe) fix x e assume as:"x\{a..b}" "0<(e::real)" let ?thesis = "\d>0. \x'\{a..b}. dist x' x < d \ dist (integral {a..x'} f) (integral {a..x} f) < e" { presume *:"a ?thesis" show ?thesis apply(cases,rule *,assumption) @@ -3664,19 +3663,19 @@ proof- assume "x=a" have "a \ a" by auto from indefinite_integral_continuous_right[OF assms(1) this `a0`] guess d . note d=this show ?thesis apply(rule,rule,rule d,safe) apply(subst dist_commute) - unfolding `x=a` vector_dist_norm apply(rule d(2)[rule_format]) unfolding norm_real by auto + unfolding `x=a` dist_norm apply(rule d(2)[rule_format]) unfolding norm_real by auto next assume "x=b" have "b \ b" by auto from indefinite_integral_continuous_left[OF assms(1) `a0`] guess d . note d=this show ?thesis apply(rule,rule,rule d,safe) apply(subst dist_commute) - unfolding `x=b` vector_dist_norm apply(rule d(2)[rule_format]) unfolding norm_real by auto - next assume "a xb" and xr:"a\x" "x xb" and xr:"a\x" "x0`] guess d1 . note d1=this from indefinite_integral_continuous_right[OF assms(1) xr `e>0`] guess d2 . note d2=this show ?thesis apply(rule_tac x="min d1 d2" in exI) proof safe show "0 < min d1 d2" using d1 d2 by auto fix y assume "y\{a..b}" "dist y x < min d1 d2" thus "dist (integral {a..y} f) (integral {a..x} f) < e" apply-apply(subst dist_commute) - apply(cases "y < x") unfolding vector_dist_norm apply(rule d1(2)[rule_format]) defer + apply(cases "y < x") unfolding dist_norm apply(rule d1(2)[rule_format]) defer apply(rule d2(2)[rule_format]) unfolding Cart_simps not_less norm_real by(auto simp add:field_simps) qed qed qed @@ -3715,7 +3714,7 @@ apply safe apply(rule conv) using assms(4,7) by auto have *:"\t xa. (1 - t) *\<^sub>R c + t *\<^sub>R x = (1 - xa) *\<^sub>R c + xa *\<^sub>R x \ t = xa" proof- case goal1 hence "(t - xa) *\<^sub>R x = (t - xa) *\<^sub>R c" - unfolding scaleR_simps by(auto simp add:group_simps) + unfolding scaleR_simps by(auto simp add:algebra_simps) thus ?case using `x\c` by auto qed have as2:"finite {t. ((1 - t) *\<^sub>R c + t *\<^sub>R x) \ k}" using assms(2) apply(rule finite_surj[where f="\z. SOME t. (1-t) *\<^sub>R c + t *\<^sub>R x = z"]) @@ -3726,7 +3725,7 @@ unfolding o_def using assms(5) defer apply-apply(rule) proof- fix t assume as:"t\{0..1} - {t. (1 - t) *\<^sub>R c + t *\<^sub>R x \ k}" have *:"c - t *\<^sub>R c + t *\<^sub>R x \ s - k" apply safe apply(rule conv[unfolded scaleR_simps]) - using `x\s` `c\s` as by(auto simp add:scaleR_simps) + using `x\s` `c\s` as by(auto simp add: algebra_simps) have "(f \ (\t. (1 - t) *\<^sub>R c + t *\<^sub>R x) has_derivative (\x. 0) \ (\z. (0 - z *\<^sub>R c) + z *\<^sub>R x)) (at t within {0..1})" apply(rule diff_chain_within) apply(rule has_derivative_add) unfolding scaleR_simps apply(rule has_derivative_sub) apply(rule has_derivative_const) @@ -3832,7 +3831,7 @@ thus "((\x. if x \ s then f x else 0) has_integral i) {c..d}" unfolding s apply-apply(rule has_integral_restrict_closed_subinterval) apply(rule `?l`[unfolded s]) apply safe apply(drule B(2)[rule_format]) unfolding subset_eq apply(erule_tac x=x in ballE) - by(auto simp add:vector_dist_norm) + by(auto simp add:dist_norm) qed(insert B `e>0`, auto) next assume as:"\e>0. ?r e" from this[rule_format,OF zero_less_one] guess C .. note C=conjunctD2[OF this,rule_format] @@ -3840,7 +3839,7 @@ have c_d:"{a..b} \ {c..d}" apply safe apply(drule B(2)) unfolding mem_interval proof case goal1 thus ?case using component_le_norm[of x i] unfolding c_def d_def by(auto simp add:field_simps) qed - have "ball 0 C \ {c..d}" apply safe unfolding mem_interval mem_ball vector_dist_norm + have "ball 0 C \ {c..d}" apply safe unfolding mem_interval mem_ball dist_norm proof case goal1 thus ?case using component_le_norm[of x i] unfolding c_def d_def by auto qed from C(2)[OF this] have "\y. (f has_integral y) {a..b}" unfolding has_integral_restrict_closed_subintervals_eq[OF c_d,THEN sym] unfolding s by auto @@ -3852,7 +3851,7 @@ have c_d:"{a..b} \ {c..d}" apply safe apply(drule B(2)) unfolding mem_interval proof case goal1 thus ?case using component_le_norm[of x i] unfolding c_def d_def by(auto simp add:field_simps) qed - have "ball 0 C \ {c..d}" apply safe unfolding mem_interval mem_ball vector_dist_norm + have "ball 0 C \ {c..d}" apply safe unfolding mem_interval mem_ball dist_norm proof case goal1 thus ?case using component_le_norm[of x i] unfolding c_def d_def by auto qed note C(2)[OF this] then guess z .. note z = conjunctD2[OF this, unfolded s] note this[unfolded has_integral_restrict_closed_subintervals_eq[OF c_d]] @@ -3949,7 +3948,7 @@ lemma has_integral_spike_set_eq: fixes f::"real^'n \ 'a::banach" assumes "negligible((s - t) \ (t - s))" shows "((f has_integral y) s \ (f has_integral y) t)" - unfolding has_integral_restrict_univ[THEN sym,of f] apply(rule has_integral_spike_eq[OF assms]) by auto + unfolding has_integral_restrict_univ[THEN sym,of f] apply(rule has_integral_spike_eq[OF assms]) by (safe, auto split: split_if_asm) lemma has_integral_spike_set[dest]: fixes f::"real^'n \ 'a::banach" assumes "negligible((s - t) \ (t - s))" "(f has_integral y) s" @@ -4038,7 +4037,7 @@ from as[OF zero_less_one] guess B .. note B=conjunctD2[OF this,rule_format] let ?a = "(\ i. min (a$i) (-B))::real^'n" and ?b = "(\ i. max (b$i) B)::real^'n" show "?f integrable_on {a..b}" apply(rule integrable_subinterval[of _ ?a ?b]) - proof- have "ball 0 B \ {?a..?b}" apply safe unfolding mem_ball mem_interval vector_dist_norm + proof- have "ball 0 B \ {?a..?b}" apply safe unfolding mem_ball mem_interval dist_norm proof case goal1 thus ?case using component_le_norm[of x i] by(auto simp add:field_simps) qed from B(2)[OF this] guess z .. note conjunct1[OF this] thus "?f integrable_on {?a..?b}" unfolding integrable_on_def by auto @@ -4072,10 +4071,10 @@ from as(2)[OF this] guess B .. note B = conjunctD2[OF this,rule_format] from real_arch_simple[of B] guess N .. note N = this { fix n assume n:"n \ N" have "ball 0 B \ {\ i. - real n..\ i. real n}" apply safe - unfolding mem_ball mem_interval vector_dist_norm + unfolding mem_ball mem_interval dist_norm proof case goal1 thus ?case using component_le_norm[of x i] using n N by(auto simp add:field_simps) qed } - thus ?case apply-apply(rule_tac x=N in exI) apply safe unfolding vector_dist_norm apply(rule B(2)) by auto + thus ?case apply-apply(rule_tac x=N in exI) apply safe unfolding dist_norm apply(rule B(2)) by auto qed from this[unfolded convergent_eq_cauchy[THEN sym]] guess i .. note i = this[unfolded Lim_sequentially, rule_format] @@ -4090,11 +4089,11 @@ from real_arch_simple[of ?B] guess n .. note n=this show "norm (integral {a..b} (\x. if x \ s then f x else 0) - i) < e" apply(rule norm_triangle_half_l) apply(rule B(2)) defer apply(subst norm_minus_commute) - apply(rule N[unfolded vector_dist_norm, of n]) + apply(rule N[unfolded dist_norm, of n]) proof safe show "N \ n" using n by auto fix x::"real^'n" assume x:"x \ ball 0 B" hence "x\ ball 0 ?B" by auto thus "x\{a..b}" using ab by blast - show "x\{\ i. - real n..\ i. real n}" using x unfolding mem_interval mem_ball vector_dist_norm apply- + show "x\{\ i. - real n..\ i. real n}" using x unfolding mem_interval mem_ball dist_norm apply- proof case goal1 thus ?case using component_le_norm[of x i] using n by(auto simp add:field_simps) qed qed qed qed qed @@ -4160,7 +4159,7 @@ note obt(2)[unfolded has_integral_alt'[of h]] note conjunctD2[OF this, rule_format] note h = this(1) and this(2)[OF *] from this(2) guess B2 .. note B2 = conjunctD2[OF this,rule_format] def c \ "\ i. min (a$i) (- (max B1 B2))" and d \ "\ i. max (b$i) (max B1 B2)" - have *:"ball 0 B1 \ {c..d}" "ball 0 B2 \ {c..d}" apply safe unfolding mem_ball mem_interval vector_dist_norm + have *:"ball 0 B1 \ {c..d}" "ball 0 B2 \ {c..d}" apply safe unfolding mem_ball mem_interval dist_norm proof(rule_tac[!] allI) case goal1 thus ?case using component_le_norm[of x i] unfolding c_def d_def by auto next case goal2 thus ?case using component_le_norm[of x i] unfolding c_def d_def by auto qed @@ -4390,7 +4389,7 @@ have *:"\ir ip i cr cp. norm((cp + cr) - i) < e \ norm(cr - ir) < k \ ip + ir = i \ norm(cp - ip) \ e + k" proof- case goal1 thus ?case using norm_triangle_le[of "cp + cr - i" "- (cr - ir)"] - unfolding goal1(3)[THEN sym] norm_minus_cancel by(auto simp add:group_simps) qed + unfolding goal1(3)[THEN sym] norm_minus_cancel by(auto simp add:algebra_simps) qed have "?x = norm ((\(x, k)\p. content k *\<^sub>R f x) - (\(x, k)\p. integral k f))" unfolding split_def setsum_subtractf .. @@ -4501,7 +4500,7 @@ norm(c - d) < e / 4 \ norm(a - d) < e" proof safe case goal1 thus ?case using norm_triangle_lt[of "a - b" "b - c" "3* e/4"] norm_triangle_lt[of "a - b + (b - c)" "c - d" e] unfolding norm_minus_cancel - by(auto simp add:group_simps) qed + by(auto simp add:algebra_simps) qed show "norm ((\(x, k)\p. content k *\<^sub>R g x) - i) < e" apply(rule *[rule_format,where b="\(x, k)\p. content k *\<^sub>R f (m x) x" and c="\(x, k)\p. integral k (f (m x))"]) proof safe case goal1 @@ -4623,7 +4622,7 @@ from g(2)[unfolded Lim_sequentially,of a b,rule_format,OF this] guess M .. note M=this have **:"norm (integral {a..b} (\x. if x \ s then f N x else 0) - i) < e/2" apply(rule norm_triangle_half_l) using B(2)[rule_format,OF ab] N[rule_format,of N] - unfolding vector_dist_norm apply-defer apply(subst norm_minus_commute) by auto + unfolding dist_norm apply-defer apply(subst norm_minus_commute) by auto have *:"\f1 f2 g. abs(f1 - i$1) < e / 2 \ abs(f2 - g) < e / 2 \ f1 \ f2 \ f2 \ i$1 \ abs(g - i$1) < e" by arith show "norm (integral {a..b} (\x. if x \ s then g x else 0) - i) < e" @@ -5152,7 +5151,7 @@ assumes "f absolutely_integrable_on s" "g absolutely_integrable_on s" shows "(\x. f(x) - g(x)) absolutely_integrable_on s" using absolutely_integrable_add[OF assms(1) absolutely_integrable_neg[OF assms(2)]] - unfolding group_simps . + unfolding algebra_simps . lemma absolutely_integrable_linear: fixes f::"real^'m \ real^'n" and h::"real^'n \ real^'p" assumes "f absolutely_integrable_on s" "bounded_linear h" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Multivariate_Analysis.thy --- a/src/HOL/Multivariate_Analysis/Multivariate_Analysis.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Multivariate_Analysis.thy Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,5 @@ theory Multivariate_Analysis -imports Determinants Integration Real_Integration +imports Determinants Integration Real_Integration Fashoda begin end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Operator_Norm.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Multivariate_Analysis/Operator_Norm.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,148 @@ +(* Title: Library/Operator_Norm.thy + Author: Amine Chaieb, University of Cambridge +*) + +header {* Operator Norm *} + +theory Operator_Norm +imports Euclidean_Space +begin + +definition "onorm f = Sup {norm (f x)| x. norm x = 1}" + +lemma norm_bound_generalize: + fixes f:: "real ^'n \ real^'m" + assumes lf: "linear f" + shows "(\x. norm x = 1 \ norm (f x) \ b) \ (\x. norm (f x) \ b * norm x)" (is "?lhs \ ?rhs") +proof- + {assume H: ?rhs + {fix x :: "real^'n" assume x: "norm x = 1" + from H[rule_format, of x] x have "norm (f x) \ b" by simp} + then have ?lhs by blast } + + moreover + {assume H: ?lhs + from H[rule_format, of "basis arbitrary"] + have bp: "b \ 0" using norm_ge_zero[of "f (basis arbitrary)"] + by (auto simp add: norm_basis elim: order_trans [OF norm_ge_zero]) + {fix x :: "real ^'n" + {assume "x = 0" + then have "norm (f x) \ b * norm x" by (simp add: linear_0[OF lf] bp)} + moreover + {assume x0: "x \ 0" + hence n0: "norm x \ 0" by (metis norm_eq_zero) + let ?c = "1/ norm x" + have "norm (?c *\<^sub>R x) = 1" using x0 by (simp add: n0) + with H have "norm (f (?c *\<^sub>R x)) \ b" by blast + hence "?c * norm (f x) \ b" + by (simp add: linear_cmul[OF lf]) + hence "norm (f x) \ b * norm x" + using n0 norm_ge_zero[of x] by (auto simp add: field_simps)} + ultimately have "norm (f x) \ b * norm x" by blast} + then have ?rhs by blast} + ultimately show ?thesis by blast +qed + +lemma onorm: + fixes f:: "real ^'n \ real ^'m" + assumes lf: "linear f" + shows "norm (f x) <= onorm f * norm x" + and "\x. norm (f x) <= b * norm x \ onorm f <= b" +proof- + { + let ?S = "{norm (f x) |x. norm x = 1}" + have Se: "?S \ {}" using norm_basis by auto + from linear_bounded[OF lf] have b: "\ b. ?S *<= b" + unfolding norm_bound_generalize[OF lf, symmetric] by (auto simp add: setle_def) + {from Sup[OF Se b, unfolded onorm_def[symmetric]] + show "norm (f x) <= onorm f * norm x" + apply - + apply (rule spec[where x = x]) + unfolding norm_bound_generalize[OF lf, symmetric] + by (auto simp add: isLub_def isUb_def leastP_def setge_def setle_def)} + { + show "\x. norm (f x) <= b * norm x \ onorm f <= b" + using Sup[OF Se b, unfolded onorm_def[symmetric]] + unfolding norm_bound_generalize[OF lf, symmetric] + by (auto simp add: isLub_def isUb_def leastP_def setge_def setle_def)} + } +qed + +lemma onorm_pos_le: assumes lf: "linear (f::real ^'n \ real ^'m)" shows "0 <= onorm f" + using order_trans[OF norm_ge_zero onorm(1)[OF lf, of "basis arbitrary"], unfolded norm_basis] by simp + +lemma onorm_eq_0: assumes lf: "linear (f::real ^'n \ real ^'m)" + shows "onorm f = 0 \ (\x. f x = 0)" + using onorm[OF lf] + apply (auto simp add: onorm_pos_le) + apply atomize + apply (erule allE[where x="0::real"]) + using onorm_pos_le[OF lf] + apply arith + done + +lemma onorm_const: "onorm(\x::real^'n. (y::real ^'m)) = norm y" +proof- + let ?f = "\x::real^'n. (y::real ^ 'm)" + have th: "{norm (?f x)| x. norm x = 1} = {norm y}" + by(auto intro: vector_choose_size set_ext) + show ?thesis + unfolding onorm_def th + apply (rule Sup_unique) by (simp_all add: setle_def) +qed + +lemma onorm_pos_lt: assumes lf: "linear (f::real ^ 'n \ real ^'m)" + shows "0 < onorm f \ ~(\x. f x = 0)" + unfolding onorm_eq_0[OF lf, symmetric] + using onorm_pos_le[OF lf] by arith + +lemma onorm_compose: + assumes lf: "linear (f::real ^'n \ real ^'m)" + and lg: "linear (g::real^'k \ real^'n)" + shows "onorm (f o g) <= onorm f * onorm g" + apply (rule onorm(2)[OF linear_compose[OF lg lf], rule_format]) + unfolding o_def + apply (subst mult_assoc) + apply (rule order_trans) + apply (rule onorm(1)[OF lf]) + apply (rule mult_mono1) + apply (rule onorm(1)[OF lg]) + apply (rule onorm_pos_le[OF lf]) + done + +lemma onorm_neg_lemma: assumes lf: "linear (f::real ^'n \ real^'m)" + shows "onorm (\x. - f x) \ onorm f" + using onorm[OF linear_compose_neg[OF lf]] onorm[OF lf] + unfolding norm_minus_cancel by metis + +lemma onorm_neg: assumes lf: "linear (f::real ^'n \ real^'m)" + shows "onorm (\x. - f x) = onorm f" + using onorm_neg_lemma[OF lf] onorm_neg_lemma[OF linear_compose_neg[OF lf]] + by simp + +lemma onorm_triangle: + assumes lf: "linear (f::real ^'n \ real ^'m)" and lg: "linear g" + shows "onorm (\x. f x + g x) <= onorm f + onorm g" + apply(rule onorm(2)[OF linear_compose_add[OF lf lg], rule_format]) + apply (rule order_trans) + apply (rule norm_triangle_ineq) + apply (simp add: distrib) + apply (rule add_mono) + apply (rule onorm(1)[OF lf]) + apply (rule onorm(1)[OF lg]) + done + +lemma onorm_triangle_le: "linear (f::real ^'n \ real ^'m) \ linear g \ onorm(f) + onorm(g) <= e + \ onorm(\x. f x + g x) <= e" + apply (rule order_trans) + apply (rule onorm_triangle) + apply assumption+ + done + +lemma onorm_triangle_lt: "linear (f::real ^'n \ real ^'m) \ linear g \ onorm(f) + onorm(g) < e + ==> onorm(\x. f x + g x) < e" + apply (rule order_le_less_trans) + apply (rule onorm_triangle) + by assumption+ + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Path_Connected.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Multivariate_Analysis/Path_Connected.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,570 @@ +(* Title: Multivariate_Analysis/Path_Connected.thy + Author: Robert Himmelmann, TU Muenchen +*) + +header {* Continuous paths and path-connected sets *} + +theory Path_Connected +imports Convex_Euclidean_Space +begin + +subsection {* Paths. *} + +definition + path :: "(real \ 'a::topological_space) \ bool" + where "path g \ continuous_on {0 .. 1} g" + +definition + pathstart :: "(real \ 'a::topological_space) \ 'a" + where "pathstart g = g 0" + +definition + pathfinish :: "(real \ 'a::topological_space) \ 'a" + where "pathfinish g = g 1" + +definition + path_image :: "(real \ 'a::topological_space) \ 'a set" + where "path_image g = g ` {0 .. 1}" + +definition + reversepath :: "(real \ 'a::topological_space) \ (real \ 'a)" + where "reversepath g = (\x. g(1 - x))" + +definition + joinpaths :: "(real \ 'a::topological_space) \ (real \ 'a) \ (real \ 'a)" + (infixr "+++" 75) + where "g1 +++ g2 = (\x. if x \ 1/2 then g1 (2 * x) else g2 (2 * x - 1))" + +definition + simple_path :: "(real \ 'a::topological_space) \ bool" + where "simple_path g \ + (\x\{0..1}. \y\{0..1}. g x = g y \ x = y \ x = 0 \ y = 1 \ x = 1 \ y = 0)" + +definition + injective_path :: "(real \ 'a::topological_space) \ bool" + where "injective_path g \ (\x\{0..1}. \y\{0..1}. g x = g y \ x = y)" + +subsection {* Some lemmas about these concepts. *} + +lemma injective_imp_simple_path: + "injective_path g \ simple_path g" + unfolding injective_path_def simple_path_def by auto + +lemma path_image_nonempty: "path_image g \ {}" + unfolding path_image_def image_is_empty interval_eq_empty by auto + +lemma pathstart_in_path_image[intro]: "(pathstart g) \ path_image g" + unfolding pathstart_def path_image_def by auto + +lemma pathfinish_in_path_image[intro]: "(pathfinish g) \ path_image g" + unfolding pathfinish_def path_image_def by auto + +lemma connected_path_image[intro]: "path g \ connected(path_image g)" + unfolding path_def path_image_def + apply (erule connected_continuous_image) + by(rule convex_connected, rule convex_real_interval) + +lemma compact_path_image[intro]: "path g \ compact(path_image g)" + unfolding path_def path_image_def + by (erule compact_continuous_image, rule compact_real_interval) + +lemma reversepath_reversepath[simp]: "reversepath(reversepath g) = g" + unfolding reversepath_def by auto + +lemma pathstart_reversepath[simp]: "pathstart(reversepath g) = pathfinish g" + unfolding pathstart_def reversepath_def pathfinish_def by auto + +lemma pathfinish_reversepath[simp]: "pathfinish(reversepath g) = pathstart g" + unfolding pathstart_def reversepath_def pathfinish_def by auto + +lemma pathstart_join[simp]: "pathstart(g1 +++ g2) = pathstart g1" + unfolding pathstart_def joinpaths_def pathfinish_def by auto + +lemma pathfinish_join[simp]:"pathfinish(g1 +++ g2) = pathfinish g2" + unfolding pathstart_def joinpaths_def pathfinish_def by auto + +lemma path_image_reversepath[simp]: "path_image(reversepath g) = path_image g" proof- + have *:"\g. path_image(reversepath g) \ path_image g" + unfolding path_image_def subset_eq reversepath_def Ball_def image_iff apply(rule,rule,erule bexE) + apply(rule_tac x="1 - xa" in bexI) by auto + show ?thesis using *[of g] *[of "reversepath g"] unfolding reversepath_reversepath by auto qed + +lemma path_reversepath[simp]: "path(reversepath g) \ path g" proof- + have *:"\g. path g \ path(reversepath g)" unfolding path_def reversepath_def + apply(rule continuous_on_compose[unfolded o_def, of _ "\x. 1 - x"]) + apply(rule continuous_on_sub, rule continuous_on_const, rule continuous_on_id) + apply(rule continuous_on_subset[of "{0..1}"], assumption) by auto + show ?thesis using *[of "reversepath g"] *[of g] unfolding reversepath_reversepath by (rule iffI) qed + +lemmas reversepath_simps = path_reversepath path_image_reversepath pathstart_reversepath pathfinish_reversepath + +lemma path_join[simp]: assumes "pathfinish g1 = pathstart g2" shows "path (g1 +++ g2) \ path g1 \ path g2" + unfolding path_def pathfinish_def pathstart_def apply rule defer apply(erule conjE) proof- + assume as:"continuous_on {0..1} (g1 +++ g2)" + have *:"g1 = (\x. g1 (2 *\<^sub>R x)) \ (\x. (1/2) *\<^sub>R x)" + "g2 = (\x. g2 (2 *\<^sub>R x - 1)) \ (\x. (1/2) *\<^sub>R (x + 1))" + unfolding o_def by (auto simp add: add_divide_distrib) + have "op *\<^sub>R (1 / 2) ` {0::real..1} \ {0..1}" "(\x. (1 / 2) *\<^sub>R (x + 1)) ` {(0::real)..1} \ {0..1}" + by auto + thus "continuous_on {0..1} g1 \ continuous_on {0..1} g2" apply -apply rule + apply(subst *) defer apply(subst *) apply (rule_tac[!] continuous_on_compose) + apply (rule continuous_on_cmul, rule continuous_on_add, rule continuous_on_id, rule continuous_on_const) defer + apply (rule continuous_on_cmul, rule continuous_on_id) apply(rule_tac[!] continuous_on_eq[of _ "g1 +++ g2"]) defer prefer 3 + apply(rule_tac[1-2] continuous_on_subset[of "{0 .. 1}"]) apply(rule as, assumption, rule as, assumption) + apply(rule) defer apply rule proof- + fix x assume "x \ op *\<^sub>R (1 / 2) ` {0::real..1}" + hence "x \ 1 / 2" unfolding image_iff by auto + thus "(g1 +++ g2) x = g1 (2 *\<^sub>R x)" unfolding joinpaths_def by auto next + fix x assume "x \ (\x. (1 / 2) *\<^sub>R (x + 1)) ` {0::real..1}" + hence "x \ 1 / 2" unfolding image_iff by auto + thus "(g1 +++ g2) x = g2 (2 *\<^sub>R x - 1)" proof(cases "x = 1 / 2") + case True hence "x = (1/2) *\<^sub>R 1" unfolding Cart_eq by auto + thus ?thesis unfolding joinpaths_def using assms[unfolded pathstart_def pathfinish_def] by (auto simp add: mult_ac) + qed (auto simp add:le_less joinpaths_def) qed +next assume as:"continuous_on {0..1} g1" "continuous_on {0..1} g2" + have *:"{0 .. 1::real} = {0.. (1/2)*\<^sub>R 1} \ {(1/2) *\<^sub>R 1 .. 1}" by auto + have **:"op *\<^sub>R 2 ` {0..(1 / 2) *\<^sub>R 1} = {0..1::real}" apply(rule set_ext, rule) unfolding image_iff + defer apply(rule_tac x="(1/2)*\<^sub>R x" in bexI) by auto + have ***:"(\x. 2 *\<^sub>R x - 1) ` {(1 / 2) *\<^sub>R 1..1} = {0..1::real}" + apply (auto simp add: image_def) + apply (rule_tac x="(x + 1) / 2" in bexI) + apply (auto simp add: add_divide_distrib) + done + show "continuous_on {0..1} (g1 +++ g2)" unfolding * apply(rule continuous_on_union) apply (rule closed_real_atLeastAtMost)+ proof- + show "continuous_on {0..(1 / 2) *\<^sub>R 1} (g1 +++ g2)" apply(rule continuous_on_eq[of _ "\x. g1 (2 *\<^sub>R x)"]) defer + unfolding o_def[THEN sym] apply(rule continuous_on_compose) apply(rule continuous_on_cmul, rule continuous_on_id) + unfolding ** apply(rule as(1)) unfolding joinpaths_def by auto next + show "continuous_on {(1/2)*\<^sub>R1..1} (g1 +++ g2)" apply(rule continuous_on_eq[of _ "g2 \ (\x. 2 *\<^sub>R x - 1)"]) defer + apply(rule continuous_on_compose) apply(rule continuous_on_sub, rule continuous_on_cmul, rule continuous_on_id, rule continuous_on_const) + unfolding *** o_def joinpaths_def apply(rule as(2)) using assms[unfolded pathstart_def pathfinish_def] + by (auto simp add: mult_ac) qed qed + +lemma path_image_join_subset: "path_image(g1 +++ g2) \ (path_image g1 \ path_image g2)" proof + fix x assume "x \ path_image (g1 +++ g2)" + then obtain y where y:"y\{0..1}" "x = (if y \ 1 / 2 then g1 (2 *\<^sub>R y) else g2 (2 *\<^sub>R y - 1))" + unfolding path_image_def image_iff joinpaths_def by auto + thus "x \ path_image g1 \ path_image g2" apply(cases "y \ 1/2") + apply(rule_tac UnI1) defer apply(rule_tac UnI2) unfolding y(2) path_image_def using y(1) + by(auto intro!: imageI) qed + +lemma subset_path_image_join: + assumes "path_image g1 \ s" "path_image g2 \ s" shows "path_image(g1 +++ g2) \ s" + using path_image_join_subset[of g1 g2] and assms by auto + +lemma path_image_join: + assumes "path g1" "path g2" "pathfinish g1 = pathstart g2" + shows "path_image(g1 +++ g2) = (path_image g1) \ (path_image g2)" +apply(rule, rule path_image_join_subset, rule) unfolding Un_iff proof(erule disjE) + fix x assume "x \ path_image g1" + then obtain y where y:"y\{0..1}" "x = g1 y" unfolding path_image_def image_iff by auto + thus "x \ path_image (g1 +++ g2)" unfolding joinpaths_def path_image_def image_iff + apply(rule_tac x="(1/2) *\<^sub>R y" in bexI) by auto next + fix x assume "x \ path_image g2" + then obtain y where y:"y\{0..1}" "x = g2 y" unfolding path_image_def image_iff by auto + then show "x \ path_image (g1 +++ g2)" unfolding joinpaths_def path_image_def image_iff + apply(rule_tac x="(1/2) *\<^sub>R (y + 1)" in bexI) using assms(3)[unfolded pathfinish_def pathstart_def] + by (auto simp add: add_divide_distrib) qed + +lemma not_in_path_image_join: + assumes "x \ path_image g1" "x \ path_image g2" shows "x \ path_image(g1 +++ g2)" + using assms and path_image_join_subset[of g1 g2] by auto + +lemma simple_path_reversepath: assumes "simple_path g" shows "simple_path (reversepath g)" + using assms unfolding simple_path_def reversepath_def apply- apply(rule ballI)+ + apply(erule_tac x="1-x" in ballE, erule_tac x="1-y" in ballE) + by auto + +lemma simple_path_join_loop: + assumes "injective_path g1" "injective_path g2" "pathfinish g2 = pathstart g1" + "(path_image g1 \ path_image g2) \ {pathstart g1,pathstart g2}" + shows "simple_path(g1 +++ g2)" +unfolding simple_path_def proof((rule ballI)+, rule impI) let ?g = "g1 +++ g2" + note inj = assms(1,2)[unfolded injective_path_def, rule_format] + fix x y::"real" assume xy:"x \ {0..1}" "y \ {0..1}" "?g x = ?g y" + show "x = y \ x = 0 \ y = 1 \ x = 1 \ y = 0" proof(case_tac "x \ 1/2",case_tac[!] "y \ 1/2", unfold not_le) + assume as:"x \ 1 / 2" "y \ 1 / 2" + hence "g1 (2 *\<^sub>R x) = g1 (2 *\<^sub>R y)" using xy(3) unfolding joinpaths_def by auto + moreover have "2 *\<^sub>R x \ {0..1}" "2 *\<^sub>R y \ {0..1}" using xy(1,2) as + by auto + ultimately show ?thesis using inj(1)[of "2*\<^sub>R x" "2*\<^sub>R y"] by auto + next assume as:"x > 1 / 2" "y > 1 / 2" + hence "g2 (2 *\<^sub>R x - 1) = g2 (2 *\<^sub>R y - 1)" using xy(3) unfolding joinpaths_def by auto + moreover have "2 *\<^sub>R x - 1 \ {0..1}" "2 *\<^sub>R y - 1 \ {0..1}" using xy(1,2) as by auto + ultimately show ?thesis using inj(2)[of "2*\<^sub>R x - 1" "2*\<^sub>R y - 1"] by auto + next assume as:"x \ 1 / 2" "y > 1 / 2" + hence "?g x \ path_image g1" "?g y \ path_image g2" unfolding path_image_def joinpaths_def + using xy(1,2) by auto + moreover have "?g y \ pathstart g2" using as(2) unfolding pathstart_def joinpaths_def + using inj(2)[of "2 *\<^sub>R y - 1" 0] and xy(2) + by (auto simp add: field_simps) + ultimately have *:"?g x = pathstart g1" using assms(4) unfolding xy(3) by auto + hence "x = 0" unfolding pathstart_def joinpaths_def using as(1) and xy(1) + using inj(1)[of "2 *\<^sub>R x" 0] by auto + moreover have "y = 1" using * unfolding xy(3) assms(3)[THEN sym] + unfolding joinpaths_def pathfinish_def using as(2) and xy(2) + using inj(2)[of "2 *\<^sub>R y - 1" 1] by auto + ultimately show ?thesis by auto + next assume as:"x > 1 / 2" "y \ 1 / 2" + hence "?g x \ path_image g2" "?g y \ path_image g1" unfolding path_image_def joinpaths_def + using xy(1,2) by auto + moreover have "?g x \ pathstart g2" using as(1) unfolding pathstart_def joinpaths_def + using inj(2)[of "2 *\<^sub>R x - 1" 0] and xy(1) + by (auto simp add: field_simps) + ultimately have *:"?g y = pathstart g1" using assms(4) unfolding xy(3) by auto + hence "y = 0" unfolding pathstart_def joinpaths_def using as(2) and xy(2) + using inj(1)[of "2 *\<^sub>R y" 0] by auto + moreover have "x = 1" using * unfolding xy(3)[THEN sym] assms(3)[THEN sym] + unfolding joinpaths_def pathfinish_def using as(1) and xy(1) + using inj(2)[of "2 *\<^sub>R x - 1" 1] by auto + ultimately show ?thesis by auto qed qed + +lemma injective_path_join: + assumes "injective_path g1" "injective_path g2" "pathfinish g1 = pathstart g2" + "(path_image g1 \ path_image g2) \ {pathstart g2}" + shows "injective_path(g1 +++ g2)" + unfolding injective_path_def proof(rule,rule,rule) let ?g = "g1 +++ g2" + note inj = assms(1,2)[unfolded injective_path_def, rule_format] + fix x y assume xy:"x \ {0..1}" "y \ {0..1}" "(g1 +++ g2) x = (g1 +++ g2) y" + show "x = y" proof(cases "x \ 1/2", case_tac[!] "y \ 1/2", unfold not_le) + assume "x \ 1 / 2" "y \ 1 / 2" thus ?thesis using inj(1)[of "2*\<^sub>R x" "2*\<^sub>R y"] and xy + unfolding joinpaths_def by auto + next assume "x > 1 / 2" "y > 1 / 2" thus ?thesis using inj(2)[of "2*\<^sub>R x - 1" "2*\<^sub>R y - 1"] and xy + unfolding joinpaths_def by auto + next assume as:"x \ 1 / 2" "y > 1 / 2" + hence "?g x \ path_image g1" "?g y \ path_image g2" unfolding path_image_def joinpaths_def + using xy(1,2) by auto + hence "?g x = pathfinish g1" "?g y = pathstart g2" using assms(4) unfolding assms(3) xy(3) by auto + thus ?thesis using as and inj(1)[of "2 *\<^sub>R x" 1] inj(2)[of "2 *\<^sub>R y - 1" 0] and xy(1,2) + unfolding pathstart_def pathfinish_def joinpaths_def + by auto + next assume as:"x > 1 / 2" "y \ 1 / 2" + hence "?g x \ path_image g2" "?g y \ path_image g1" unfolding path_image_def joinpaths_def + using xy(1,2) by auto + hence "?g x = pathstart g2" "?g y = pathfinish g1" using assms(4) unfolding assms(3) xy(3) by auto + thus ?thesis using as and inj(2)[of "2 *\<^sub>R x - 1" 0] inj(1)[of "2 *\<^sub>R y" 1] and xy(1,2) + unfolding pathstart_def pathfinish_def joinpaths_def + by auto qed qed + +lemmas join_paths_simps = path_join path_image_join pathstart_join pathfinish_join + +subsection {* Reparametrizing a closed curve to start at some chosen point. *} + +definition "shiftpath a (f::real \ 'a::topological_space) = + (\x. if (a + x) \ 1 then f(a + x) else f(a + x - 1))" + +lemma pathstart_shiftpath: "a \ 1 \ pathstart(shiftpath a g) = g a" + unfolding pathstart_def shiftpath_def by auto + +lemma pathfinish_shiftpath: assumes "0 \ a" "pathfinish g = pathstart g" + shows "pathfinish(shiftpath a g) = g a" + using assms unfolding pathstart_def pathfinish_def shiftpath_def + by auto + +lemma endpoints_shiftpath: + assumes "pathfinish g = pathstart g" "a \ {0 .. 1}" + shows "pathfinish(shiftpath a g) = g a" "pathstart(shiftpath a g) = g a" + using assms by(auto intro!:pathfinish_shiftpath pathstart_shiftpath) + +lemma closed_shiftpath: + assumes "pathfinish g = pathstart g" "a \ {0..1}" + shows "pathfinish(shiftpath a g) = pathstart(shiftpath a g)" + using endpoints_shiftpath[OF assms] by auto + +lemma path_shiftpath: + assumes "path g" "pathfinish g = pathstart g" "a \ {0..1}" + shows "path(shiftpath a g)" proof- + have *:"{0 .. 1} = {0 .. 1-a} \ {1-a .. 1}" using assms(3) by auto + have **:"\x. x + a = 1 \ g (x + a - 1) = g (x + a)" + using assms(2)[unfolded pathfinish_def pathstart_def] by auto + show ?thesis unfolding path_def shiftpath_def * apply(rule continuous_on_union) + apply(rule closed_real_atLeastAtMost)+ apply(rule continuous_on_eq[of _ "g \ (\x. a + x)"]) prefer 3 + apply(rule continuous_on_eq[of _ "g \ (\x. a - 1 + x)"]) defer prefer 3 + apply(rule continuous_on_intros)+ prefer 2 apply(rule continuous_on_intros)+ + apply(rule_tac[1-2] continuous_on_subset[OF assms(1)[unfolded path_def]]) + using assms(3) and ** by(auto, auto simp add: field_simps) qed + +lemma shiftpath_shiftpath: assumes "pathfinish g = pathstart g" "a \ {0..1}" "x \ {0..1}" + shows "shiftpath (1 - a) (shiftpath a g) x = g x" + using assms unfolding pathfinish_def pathstart_def shiftpath_def by auto + +lemma path_image_shiftpath: + assumes "a \ {0..1}" "pathfinish g = pathstart g" + shows "path_image(shiftpath a g) = path_image g" proof- + { fix x assume as:"g 1 = g 0" "x \ {0..1::real}" " \y\{0..1} \ {x. \ a + x \ 1}. g x \ g (a + y - 1)" + hence "\y\{0..1} \ {x. a + x \ 1}. g x = g (a + y)" proof(cases "a \ x") + case False thus ?thesis apply(rule_tac x="1 + x - a" in bexI) + using as(1,2) and as(3)[THEN bspec[where x="1 + x - a"]] and assms(1) + by(auto simp add: field_simps atomize_not) next + case True thus ?thesis using as(1-2) and assms(1) apply(rule_tac x="x - a" in bexI) + by(auto simp add: field_simps) qed } + thus ?thesis using assms unfolding shiftpath_def path_image_def pathfinish_def pathstart_def + by(auto simp add: image_iff) qed + +subsection {* Special case of straight-line paths. *} + +definition + linepath :: "'a::real_normed_vector \ 'a \ real \ 'a" where + "linepath a b = (\x. (1 - x) *\<^sub>R a + x *\<^sub>R b)" + +lemma pathstart_linepath[simp]: "pathstart(linepath a b) = a" + unfolding pathstart_def linepath_def by auto + +lemma pathfinish_linepath[simp]: "pathfinish(linepath a b) = b" + unfolding pathfinish_def linepath_def by auto + +lemma continuous_linepath_at[intro]: "continuous (at x) (linepath a b)" + unfolding linepath_def by (intro continuous_intros) + +lemma continuous_on_linepath[intro]: "continuous_on s (linepath a b)" + using continuous_linepath_at by(auto intro!: continuous_at_imp_continuous_on) + +lemma path_linepath[intro]: "path(linepath a b)" + unfolding path_def by(rule continuous_on_linepath) + +lemma path_image_linepath[simp]: "path_image(linepath a b) = (closed_segment a b)" + unfolding path_image_def segment linepath_def apply (rule set_ext, rule) defer + unfolding mem_Collect_eq image_iff apply(erule exE) apply(rule_tac x="u *\<^sub>R 1" in bexI) + by auto + +lemma reversepath_linepath[simp]: "reversepath(linepath a b) = linepath b a" + unfolding reversepath_def linepath_def by(rule ext, auto) + +lemma injective_path_linepath: + assumes "a \ b" shows "injective_path(linepath a b)" +proof - + { fix x y :: "real" + assume "x *\<^sub>R b + y *\<^sub>R a = x *\<^sub>R a + y *\<^sub>R b" + hence "(x - y) *\<^sub>R a = (x - y) *\<^sub>R b" by (simp add: algebra_simps) + with assms have "x = y" by simp } + thus ?thesis unfolding injective_path_def linepath_def by(auto simp add: algebra_simps) qed + +lemma simple_path_linepath[intro]: "a \ b \ simple_path(linepath a b)" by(auto intro!: injective_imp_simple_path injective_path_linepath) + +subsection {* Bounding a point away from a path. *} + +lemma not_on_path_ball: + fixes g :: "real \ 'a::heine_borel" + assumes "path g" "z \ path_image g" + shows "\e>0. ball z e \ (path_image g) = {}" proof- + obtain a where "a\path_image g" "\y\path_image g. dist z a \ dist z y" + using distance_attains_inf[OF _ path_image_nonempty, of g z] + using compact_path_image[THEN compact_imp_closed, OF assms(1)] by auto + thus ?thesis apply(rule_tac x="dist z a" in exI) using assms(2) by(auto intro!: dist_pos_lt) qed + +lemma not_on_path_cball: + fixes g :: "real \ 'a::heine_borel" + assumes "path g" "z \ path_image g" + shows "\e>0. cball z e \ (path_image g) = {}" proof- + obtain e where "ball z e \ path_image g = {}" "e>0" using not_on_path_ball[OF assms] by auto + moreover have "cball z (e/2) \ ball z e" using `e>0` by auto + ultimately show ?thesis apply(rule_tac x="e/2" in exI) by auto qed + +subsection {* Path component, considered as a "joinability" relation (from Tom Hales). *} + +definition "path_component s x y \ (\g. path g \ path_image g \ s \ pathstart g = x \ pathfinish g = y)" + +lemmas path_defs = path_def pathstart_def pathfinish_def path_image_def path_component_def + +lemma path_component_mem: assumes "path_component s x y" shows "x \ s" "y \ s" + using assms unfolding path_defs by auto + +lemma path_component_refl: assumes "x \ s" shows "path_component s x x" + unfolding path_defs apply(rule_tac x="\u. x" in exI) using assms + by(auto intro!:continuous_on_intros) + +lemma path_component_refl_eq: "path_component s x x \ x \ s" + by(auto intro!: path_component_mem path_component_refl) + +lemma path_component_sym: "path_component s x y \ path_component s y x" + using assms unfolding path_component_def apply(erule exE) apply(rule_tac x="reversepath g" in exI) + by auto + +lemma path_component_trans: assumes "path_component s x y" "path_component s y z" shows "path_component s x z" + using assms unfolding path_component_def apply- apply(erule exE)+ apply(rule_tac x="g +++ ga" in exI) by(auto simp add: path_image_join) + +lemma path_component_of_subset: "s \ t \ path_component s x y \ path_component t x y" + unfolding path_component_def by auto + +subsection {* Can also consider it as a set, as the name suggests. *} + +lemma path_component_set: "path_component s x = { y. (\g. path g \ path_image g \ s \ pathstart g = x \ pathfinish g = y )}" + apply(rule set_ext) unfolding mem_Collect_eq unfolding mem_def path_component_def by auto + +lemma mem_path_component_set:"x \ path_component s y \ path_component s y x" unfolding mem_def by auto + +lemma path_component_subset: "(path_component s x) \ s" + apply(rule, rule path_component_mem(2)) by(auto simp add:mem_def) + +lemma path_component_eq_empty: "path_component s x = {} \ x \ s" + apply rule apply(drule equals0D[of _ x]) defer apply(rule equals0I) unfolding mem_path_component_set + apply(drule path_component_mem(1)) using path_component_refl by auto + +subsection {* Path connectedness of a space. *} + +definition "path_connected s \ (\x\s. \y\s. \g. path g \ (path_image g) \ s \ pathstart g = x \ pathfinish g = y)" + +lemma path_connected_component: "path_connected s \ (\x\s. \y\s. path_component s x y)" + unfolding path_connected_def path_component_def by auto + +lemma path_connected_component_set: "path_connected s \ (\x\s. path_component s x = s)" + unfolding path_connected_component apply(rule, rule, rule, rule path_component_subset) + unfolding subset_eq mem_path_component_set Ball_def mem_def by auto + +subsection {* Some useful lemmas about path-connectedness. *} + +lemma convex_imp_path_connected: + fixes s :: "'a::real_normed_vector set" + assumes "convex s" shows "path_connected s" + unfolding path_connected_def apply(rule,rule,rule_tac x="linepath x y" in exI) + unfolding path_image_linepath using assms[unfolded convex_contains_segment] by auto + +lemma path_connected_imp_connected: assumes "path_connected s" shows "connected s" + unfolding connected_def not_ex apply(rule,rule,rule ccontr) unfolding not_not apply(erule conjE)+ proof- + fix e1 e2 assume as:"open e1" "open e2" "s \ e1 \ e2" "e1 \ e2 \ s = {}" "e1 \ s \ {}" "e2 \ s \ {}" + then obtain x1 x2 where obt:"x1\e1\s" "x2\e2\s" by auto + then obtain g where g:"path g" "path_image g \ s" "pathstart g = x1" "pathfinish g = x2" + using assms[unfolded path_connected_def,rule_format,of x1 x2] by auto + have *:"connected {0..1::real}" by(auto intro!: convex_connected convex_real_interval) + have "{0..1} \ {x \ {0..1}. g x \ e1} \ {x \ {0..1}. g x \ e2}" using as(3) g(2)[unfolded path_defs] by blast + moreover have "{x \ {0..1}. g x \ e1} \ {x \ {0..1}. g x \ e2} = {}" using as(4) g(2)[unfolded path_defs] unfolding subset_eq by auto + moreover have "{x \ {0..1}. g x \ e1} \ {} \ {x \ {0..1}. g x \ e2} \ {}" using g(3,4)[unfolded path_defs] using obt + by (simp add: ex_in_conv [symmetric], metis zero_le_one order_refl) + ultimately show False using *[unfolded connected_local not_ex,rule_format, of "{x\{0..1}. g x \ e1}" "{x\{0..1}. g x \ e2}"] + using continuous_open_in_preimage[OF g(1)[unfolded path_def] as(1)] + using continuous_open_in_preimage[OF g(1)[unfolded path_def] as(2)] by auto qed + +lemma open_path_component: + fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*) + assumes "open s" shows "open(path_component s x)" + unfolding open_contains_ball proof + fix y assume as:"y \ path_component s x" + hence "y\s" apply- apply(rule path_component_mem(2)) unfolding mem_def by auto + then obtain e where e:"e>0" "ball y e \ s" using assms[unfolded open_contains_ball] by auto + show "\e>0. ball y e \ path_component s x" apply(rule_tac x=e in exI) apply(rule,rule `e>0`,rule) unfolding mem_ball mem_path_component_set proof- + fix z assume "dist y z < e" thus "path_component s x z" apply(rule_tac path_component_trans[of _ _ y]) defer + apply(rule path_component_of_subset[OF e(2)]) apply(rule convex_imp_path_connected[OF convex_ball, unfolded path_connected_component, rule_format]) using `e>0` + using as[unfolded mem_def] by auto qed qed + +lemma open_non_path_component: + fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*) + assumes "open s" shows "open(s - path_component s x)" + unfolding open_contains_ball proof + fix y assume as:"y\s - path_component s x" + then obtain e where e:"e>0" "ball y e \ s" using assms[unfolded open_contains_ball] by auto + show "\e>0. ball y e \ s - path_component s x" apply(rule_tac x=e in exI) apply(rule,rule `e>0`,rule,rule) defer proof(rule ccontr) + fix z assume "z\ball y e" "\ z \ path_component s x" + hence "y \ path_component s x" unfolding not_not mem_path_component_set using `e>0` + apply- apply(rule path_component_trans,assumption) apply(rule path_component_of_subset[OF e(2)]) + apply(rule convex_imp_path_connected[OF convex_ball, unfolded path_connected_component, rule_format]) by auto + thus False using as by auto qed(insert e(2), auto) qed + +lemma connected_open_path_connected: + fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*) + assumes "open s" "connected s" shows "path_connected s" + unfolding path_connected_component_set proof(rule,rule,rule path_component_subset, rule) + fix x y assume "x \ s" "y \ s" show "y \ path_component s x" proof(rule ccontr) + assume "y \ path_component s x" moreover + have "path_component s x \ s \ {}" using `x\s` path_component_eq_empty path_component_subset[of s x] by auto + ultimately show False using `y\s` open_non_path_component[OF assms(1)] open_path_component[OF assms(1)] + using assms(2)[unfolded connected_def not_ex, rule_format, of"path_component s x" "s - path_component s x"] by auto +qed qed + +lemma path_connected_continuous_image: + assumes "continuous_on s f" "path_connected s" shows "path_connected (f ` s)" + unfolding path_connected_def proof(rule,rule) + fix x' y' assume "x' \ f ` s" "y' \ f ` s" + then obtain x y where xy:"x\s" "y\s" "x' = f x" "y' = f y" by auto + guess g using assms(2)[unfolded path_connected_def,rule_format,OF xy(1,2)] .. + thus "\g. path g \ path_image g \ f ` s \ pathstart g = x' \ pathfinish g = y'" + unfolding xy apply(rule_tac x="f \ g" in exI) unfolding path_defs + using assms(1) by(auto intro!: continuous_on_compose continuous_on_subset[of _ _ "g ` {0..1}"]) qed + +lemma homeomorphic_path_connectedness: + "s homeomorphic t \ (path_connected s \ path_connected t)" + unfolding homeomorphic_def homeomorphism_def apply(erule exE|erule conjE)+ apply rule + apply(drule_tac f=f in path_connected_continuous_image) prefer 3 + apply(drule_tac f=g in path_connected_continuous_image) by auto + +lemma path_connected_empty: "path_connected {}" + unfolding path_connected_def by auto + +lemma path_connected_singleton: "path_connected {a}" + unfolding path_connected_def pathstart_def pathfinish_def path_image_def + apply (clarify, rule_tac x="\x. a" in exI, simp add: image_constant_conv) + apply (simp add: path_def continuous_on_const) + done + +lemma path_connected_Un: assumes "path_connected s" "path_connected t" "s \ t \ {}" + shows "path_connected (s \ t)" unfolding path_connected_component proof(rule,rule) + fix x y assume as:"x \ s \ t" "y \ s \ t" + from assms(3) obtain z where "z \ s \ t" by auto + thus "path_component (s \ t) x y" using as using assms(1-2)[unfolded path_connected_component] apply- + apply(erule_tac[!] UnE)+ apply(rule_tac[2-3] path_component_trans[of _ _ z]) + by(auto simp add:path_component_of_subset [OF Un_upper1] path_component_of_subset[OF Un_upper2]) qed + +subsection {* sphere is path-connected. *} + +lemma path_connected_punctured_universe: + assumes "2 \ CARD('n::finite)" shows "path_connected((UNIV::(real^'n) set) - {a})" proof- + obtain \ where \:"bij_betw \ {1..CARD('n)} (UNIV::'n set)" using ex_bij_betw_nat_finite_1[OF finite_UNIV] by auto + let ?U = "UNIV::(real^'n) set" let ?u = "?U - {0}" + let ?basis = "\k. basis (\ k)" + let ?A = "\k. {x::real^'n. \i\{1..k}. inner (basis (\ i)) x \ 0}" + have "\k\{2..CARD('n)}. path_connected (?A k)" proof + have *:"\k. ?A (Suc k) = {x. inner (?basis (Suc k)) x < 0} \ {x. inner (?basis (Suc k)) x > 0} \ ?A k" apply(rule set_ext,rule) defer + apply(erule UnE)+ unfolding mem_Collect_eq apply(rule_tac[1-2] x="Suc k" in bexI) + by(auto elim!: ballE simp add: not_less le_Suc_eq) + fix k assume "k \ {2..CARD('n)}" thus "path_connected (?A k)" proof(induct k) + case (Suc k) show ?case proof(cases "k = 1") + case False from Suc have d:"k \ {1..CARD('n)}" "Suc k \ {1..CARD('n)}" by auto + hence "\ k \ \ (Suc k)" using \[unfolded bij_betw_def inj_on_def, THEN conjunct1, THEN bspec[where x=k]] by auto + hence **:"?basis k + ?basis (Suc k) \ {x. 0 < inner (?basis (Suc k)) x} \ (?A k)" + "?basis k - ?basis (Suc k) \ {x. 0 > inner (?basis (Suc k)) x} \ ({x. 0 < inner (?basis (Suc k)) x} \ (?A k))" using d + by(auto simp add: inner_basis intro!:bexI[where x=k]) + show ?thesis unfolding * Un_assoc apply(rule path_connected_Un) defer apply(rule path_connected_Un) + prefer 5 apply(rule_tac[1-2] convex_imp_path_connected, rule convex_halfspace_lt, rule convex_halfspace_gt) + apply(rule Suc(1)) using d ** False by auto + next case True hence d:"1\{1..CARD('n)}" "2\{1..CARD('n)}" using Suc(2) by auto + have ***:"Suc 1 = 2" by auto + have **:"\s t P Q. s \ t \ {x. P x \ Q x} = (s \ {x. P x}) \ (t \ {x. Q x})" by auto + have nequals0I:"\x A. x\A \ A \ {}" by auto + have "\ 2 \ \ (Suc 0)" using \[unfolded bij_betw_def inj_on_def, THEN conjunct1, THEN bspec[where x=2]] using assms by auto + thus ?thesis unfolding * True unfolding ** neq_iff bex_disj_distrib apply - + apply(rule path_connected_Un, rule_tac[1-2] path_connected_Un) defer 3 apply(rule_tac[1-4] convex_imp_path_connected) + apply(rule_tac[5] x=" ?basis 1 + ?basis 2" in nequals0I) + apply(rule_tac[6] x="-?basis 1 + ?basis 2" in nequals0I) + apply(rule_tac[7] x="-?basis 1 - ?basis 2" in nequals0I) + using d unfolding *** by(auto intro!: convex_halfspace_gt convex_halfspace_lt, auto simp add: inner_basis) + qed qed auto qed note lem = this + + have ***:"\x::real^'n. (\i\{1..CARD('n)}. inner (basis (\ i)) x \ 0) \ (\i. inner (basis i) x \ 0)" + apply rule apply(erule bexE) apply(rule_tac x="\ i" in exI) defer apply(erule exE) proof- + fix x::"real^'n" and i assume as:"inner (basis i) x \ 0" + have "i\\ ` {1..CARD('n)}" using \[unfolded bij_betw_def, THEN conjunct2] by auto + then obtain j where "j\{1..CARD('n)}" "\ j = i" by auto + thus "\i\{1..CARD('n)}. inner (basis (\ i)) x \ 0" apply(rule_tac x=j in bexI) using as by auto qed auto + have *:"?U - {a} = (\x. x + a) ` {x. x \ 0}" apply(rule set_ext) unfolding image_iff + apply rule apply(rule_tac x="x - a" in bexI) by auto + have **:"\x::real^'n. x\0 \ (\i. inner (basis i) x \ 0)" unfolding Cart_eq by(auto simp add: inner_basis) + show ?thesis unfolding * apply(rule path_connected_continuous_image) apply(rule continuous_on_intros)+ + unfolding ** apply(rule lem[THEN bspec[where x="CARD('n)"], unfolded ***]) using assms by auto qed + +lemma path_connected_sphere: assumes "2 \ CARD('n::finite)" shows "path_connected {x::real^'n. norm(x - a) = r}" proof(cases "r\0") + case True thus ?thesis proof(cases "r=0") + case False hence "{x::real^'n. norm(x - a) = r} = {}" using True by auto + thus ?thesis using path_connected_empty by auto + qed(auto intro!:path_connected_singleton) next + case False hence *:"{x::real^'n. norm(x - a) = r} = (\x. a + r *\<^sub>R x) ` {x. norm x = 1}" unfolding not_le apply -apply(rule set_ext,rule) + unfolding image_iff apply(rule_tac x="(1/r) *\<^sub>R (x - a)" in bexI) unfolding mem_Collect_eq norm_scaleR by (auto simp add: scaleR_right_diff_distrib) + have **:"{x::real^'n. norm x = 1} = (\x. (1/norm x) *\<^sub>R x) ` (UNIV - {0})" apply(rule set_ext,rule) + unfolding image_iff apply(rule_tac x=x in bexI) unfolding mem_Collect_eq by(auto split:split_if_asm) + have "continuous_on (UNIV - {0}) (\x::real^'n. 1 / norm x)" unfolding o_def continuous_on_eq_continuous_within + apply(rule, rule continuous_at_within_inv[unfolded o_def inverse_eq_divide]) apply(rule continuous_at_within) + apply(rule continuous_at_norm[unfolded o_def]) by auto + thus ?thesis unfolding * ** using path_connected_punctured_universe[OF assms] + by(auto intro!: path_connected_continuous_image continuous_on_intros) qed + +lemma connected_sphere: "2 \ CARD('n) \ connected {x::real^'n. norm(x - a) = r}" + using path_connected_sphere path_connected_imp_connected by auto + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy --- a/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy Tue May 04 20:30:22 2010 +0200 @@ -6,7 +6,7 @@ header {* Elementary topology in Euclidean space. *} theory Topology_Euclidean_Space -imports SEQ Euclidean_Space Product_Vector Glbs +imports SEQ Euclidean_Space Glbs begin subsection{* General notion of a topology *} @@ -48,16 +48,17 @@ "\S T. openin U S \ openin U T \ openin U (S\T)" "\K. (\S \ K. openin U S) \ openin U (\K)" using openin[of U] unfolding istopology_def Collect_def mem_def - by (metis mem_def subset_eq)+ + unfolding subset_eq Ball_def mem_def by auto lemma openin_subset[intro]: "openin U S \ S \ topspace U" unfolding topspace_def by blast lemma openin_empty[simp]: "openin U {}" by (simp add: openin_clauses) lemma openin_Int[intro]: "openin U S \ openin U T \ openin U (S \ T)" - by (simp add: openin_clauses) - -lemma openin_Union[intro]: "(\S \K. openin U S) \ openin U (\ K)" by (simp add: openin_clauses) + using openin_clauses by simp + +lemma openin_Union[intro]: "(\S \K. openin U S) \ openin U (\ K)" + using openin_clauses by simp lemma openin_Un[intro]: "openin U S \ openin U T \ openin U (S \ T)" using openin_Union[of "{S,T}" U] by auto @@ -65,16 +66,14 @@ lemma openin_topspace[intro, simp]: "openin U (topspace U)" by (simp add: openin_Union topspace_def) lemma openin_subopen: "openin U S \ (\x \ S. \T. openin U T \ x \ T \ T \ S)" (is "?lhs \ ?rhs") -proof- - {assume ?lhs then have ?rhs by auto } - moreover - {assume H: ?rhs - then obtain t where t: "\x\S. openin U (t x) \ x \ t x \ t x \ S" - unfolding Ball_def ex_simps(6)[symmetric] choice_iff by blast - from t have th0: "\x\ t`S. openin U x" by auto - have "\ t`S = S" using t by auto - with openin_Union[OF th0] have "openin U S" by simp } - ultimately show ?thesis by blast +proof + assume ?lhs then show ?rhs by auto +next + assume H: ?rhs + let ?t = "\{T. openin U T \ T \ S}" + have "openin U ?t" by (simp add: openin_Union) + also have "?t = S" using H by auto + finally show "openin U S" . qed subsection{* Closed sets *} @@ -249,8 +248,6 @@ lemma ball_min_Int: "ball a (min r s) = ball a r \ ball a s" by (simp add: expand_set_eq) -subsection{* Topological properties of open balls *} - lemma diff_less_iff: "(a::real) - b > 0 \ a > b" "(a::real) - b < 0 \ a < b" "a - b < c \ a < c +b" "a - b > c \ a > c +b" by arith+ @@ -946,7 +943,7 @@ by (metis frontier_def closure_closed Diff_subset) lemma frontier_empty[simp]: "frontier {} = {}" - by (simp add: frontier_def closure_empty) + by (simp add: frontier_def) lemma frontier_subset_eq: "frontier S \ S \ closed S" proof- @@ -954,7 +951,7 @@ hence "closure S \ S" using interior_subset unfolding frontier_def by auto hence "closed S" using closure_subset_eq by auto } - thus ?thesis using frontier_subset_closed[of S] by auto + thus ?thesis using frontier_subset_closed[of S] .. qed lemma frontier_complement: "frontier(- S) = frontier S" @@ -964,11 +961,13 @@ using frontier_complement frontier_subset_eq[of "- S"] unfolding open_closed by auto -subsection{* Common nets and The "within" modifier for nets. *} +subsection {* Nets and the ``eventually true'' quantifier *} + +text {* Common nets and The "within" modifier for nets. *} definition at_infinity :: "'a::real_normed_vector net" where - "at_infinity = Abs_net (range (\r. {x. r \ norm x}))" + "at_infinity = Abs_net (\P. \r. \x. r \ norm x \ P x)" definition indirection :: "'a::real_normed_vector \ 'a \ 'a net" (infixr "indirection" 70) where @@ -976,23 +975,23 @@ text{* Prove That They are all nets. *} -lemma Rep_net_at_infinity: - "Rep_net at_infinity = range (\r. {x. r \ norm x})" +lemma eventually_at_infinity: + "eventually P at_infinity \ (\b. \x. norm x >= b \ P x)" unfolding at_infinity_def -apply (rule Abs_net_inverse') -apply (rule image_nonempty, simp) -apply (clarsimp, rename_tac r s) -apply (rule_tac x="max r s" in exI, auto) -done - -lemma within_UNIV: "net within UNIV = net" - by (simp add: Rep_net_inject [symmetric] Rep_net_within) - -subsection{* Identify Trivial limits, where we can't approach arbitrarily closely. *} +proof (rule eventually_Abs_net, rule is_filter.intro) + fix P Q :: "'a \ bool" + assume "\r. \x. r \ norm x \ P x" and "\s. \x. s \ norm x \ Q x" + then obtain r s where + "\x. r \ norm x \ P x" and "\x. s \ norm x \ Q x" by auto + then have "\x. max r s \ norm x \ P x \ Q x" by simp + then show "\r. \x. r \ norm x \ P x \ Q x" .. +qed auto + +text {* Identify Trivial limits, where we can't approach arbitrarily closely. *} definition trivial_limit :: "'a net \ bool" where - "trivial_limit net \ {} \ Rep_net net" + "trivial_limit net \ eventually (\x. False) net" lemma trivial_limit_within: shows "trivial_limit (at a within S) \ \ a islimpt S" @@ -1000,21 +999,21 @@ assume "trivial_limit (at a within S)" thus "\ a islimpt S" unfolding trivial_limit_def - unfolding Rep_net_within Rep_net_at + unfolding eventually_within eventually_at_topological unfolding islimpt_def apply (clarsimp simp add: expand_set_eq) apply (rename_tac T, rule_tac x=T in exI) - apply (clarsimp, drule_tac x=y in spec, simp) + apply (clarsimp, drule_tac x=y in bspec, simp_all) done next assume "\ a islimpt S" thus "trivial_limit (at a within S)" unfolding trivial_limit_def - unfolding Rep_net_within Rep_net_at + unfolding eventually_within eventually_at_topological unfolding islimpt_def - apply (clarsimp simp add: image_image) - apply (rule_tac x=T in image_eqI) - apply (auto simp add: expand_set_eq) + apply clarsimp + apply (rule_tac x=T in exI) + apply auto done qed @@ -1030,25 +1029,21 @@ lemma trivial_limit_at_infinity: "\ trivial_limit (at_infinity :: ('a::{real_normed_vector,zero_neq_one}) net)" (* FIXME: find a more appropriate type class *) - unfolding trivial_limit_def Rep_net_at_infinity - apply (clarsimp simp add: expand_set_eq) - apply (drule_tac x="scaleR r (sgn 1)" in spec) + unfolding trivial_limit_def eventually_at_infinity + apply clarsimp + apply (rule_tac x="scaleR b (sgn 1)" in exI) apply (simp add: norm_sgn) done lemma trivial_limit_sequentially[intro]: "\ trivial_limit sequentially" - by (auto simp add: trivial_limit_def Rep_net_sequentially) - -subsection{* Some property holds "sufficiently close" to the limit point. *} + by (auto simp add: trivial_limit_def eventually_sequentially) + +text {* Some property holds "sufficiently close" to the limit point. *} lemma eventually_at: (* FIXME: this replaces Limits.eventually_at *) "eventually P (at a) \ (\d>0. \x. 0 < dist x a \ dist x a < d \ P x)" unfolding eventually_at dist_nz by auto -lemma eventually_at_infinity: - "eventually P at_infinity \ (\b. \x. norm x >= b \ P x)" -unfolding eventually_def Rep_net_at_infinity by auto - lemma eventually_within: "eventually P (at a within S) \ (\d>0. \x\S. 0 < dist x a \ dist x a < d \ P x)" unfolding eventually_within eventually_at dist_nz by auto @@ -1059,18 +1054,20 @@ by auto (metis Rats_dense_in_nn_real order_le_less_trans order_refl) lemma eventually_happens: "eventually P net ==> trivial_limit net \ (\x. P x)" - unfolding eventually_def trivial_limit_def - using Rep_net_nonempty [of net] by auto + unfolding trivial_limit_def + by (auto elim: eventually_rev_mp) lemma always_eventually: "(\x. P x) ==> eventually P net" - unfolding eventually_def trivial_limit_def - using Rep_net_nonempty [of net] by auto +proof - + assume "\x. P x" hence "P = (\x. True)" by (simp add: ext) + thus "eventually P net" by simp +qed lemma trivial_limit_eventually: "trivial_limit net \ eventually P net" - unfolding trivial_limit_def eventually_def by auto + unfolding trivial_limit_def by (auto elim: eventually_rev_mp) lemma eventually_False: "eventually (\x. False) net \ trivial_limit net" - unfolding trivial_limit_def eventually_def by auto + unfolding trivial_limit_def .. lemma trivial_limit_eq: "trivial_limit net \ (\P. eventually P net)" apply (safe elim!: trivial_limit_eventually) @@ -1097,7 +1094,7 @@ lemma not_eventually: "(\x. \ P x ) \ ~(trivial_limit net) ==> ~(eventually (\x. P x) net)" by (simp add: eventually_False) -subsection{* Limits, defined as vacuously true when the limit is trivial. *} +subsection {* Limits *} text{* Notation Lim to avoid collition with lim defined in analysis *} definition @@ -1267,6 +1264,23 @@ shows "(f ---> l) net \ (g ---> m) net \ ((\x. f(x) - g(x)) ---> l - m) net" by (rule tendsto_diff) +lemma Lim_mul: + fixes f :: "'a \ 'b::real_normed_vector" + assumes "(c ---> d) net" "(f ---> l) net" + shows "((\x. c(x) *\<^sub>R f x) ---> (d *\<^sub>R l)) net" + using assms by (rule scaleR.tendsto) + +lemma Lim_inv: + fixes f :: "'a \ real" + assumes "(f ---> l) (net::'a net)" "l \ 0" + shows "((inverse o f) ---> inverse l) net" + unfolding o_def using assms by (rule tendsto_inverse) + +lemma Lim_vmul: + fixes c :: "'a \ real" and v :: "'b::real_normed_vector" + shows "(c ---> d) net ==> ((\x. c(x) *\<^sub>R v) ---> d *\<^sub>R v) net" + by (intro tendsto_intros) + lemma Lim_null: fixes f :: "'a \ 'b::real_normed_vector" shows "(f ---> l) net \ ((\x. f(x) - l) ---> 0) net" by (simp add: Lim dist_norm) @@ -1442,6 +1456,8 @@ unfolding tendsto_def Limits.eventually_within eventually_at_topological by auto +lemmas Lim_intros = Lim_add Lim_const Lim_sub Lim_cmul Lim_vmul Lim_within_id + lemma Lim_at_id: "(id ---> a) (at a)" apply (subst within_UNIV[symmetric]) by (simp add: Lim_within_id) @@ -1590,7 +1606,7 @@ (* FIXME: Only one congruence rule for tendsto can be used at a time! *) -lemma Lim_cong_within[cong add]: +lemma Lim_cong_within(*[cong add]*): fixes a :: "'a::metric_space" fixes l :: "'b::metric_space" (* TODO: generalize *) shows "(\x. x \ a \ f x = g x) ==> ((\x. f x) ---> l) (at a within S) \ ((g ---> l) (at a within S))" @@ -1641,11 +1657,16 @@ text{* Some other lemmas about sequences. *} +lemma sequentially_offset: + assumes "eventually (\i. P i) sequentially" + shows "eventually (\i. P (i + k)) sequentially" + using assms unfolding eventually_sequentially by (metis trans_le_add1) + lemma seq_offset: - fixes l :: "'a::metric_space" (* TODO: generalize *) - shows "(f ---> l) sequentially ==> ((\i. f( i + k)) ---> l) sequentially" - apply (auto simp add: Lim_sequentially) - by (metis trans_le_add1 ) + assumes "(f ---> l) sequentially" + shows "((\i. f (i + k)) ---> l) sequentially" + using assms unfolding tendsto_def + by clarify (rule sequentially_offset, simp) lemma seq_offset_neg: "(f ---> l) sequentially ==> ((\i. f(i - k)) ---> l) sequentially" @@ -1669,12 +1690,12 @@ { fix e::real assume "e>0" hence "\N::nat. \n::nat\N. inverse (real n) < e" using real_arch_inv[of e] apply auto apply(rule_tac x=n in exI) - by (metis not_le le_imp_inverse_le not_less real_of_nat_gt_zero_cancel_iff real_of_nat_less_iff xt1(7)) + by (metis le_imp_inverse_le not_less real_of_nat_gt_zero_cancel_iff real_of_nat_less_iff xt1(7)) } thus ?thesis unfolding Lim_sequentially dist_norm by simp qed -text{* More properties of closed balls. *} +subsection {* More properties of closed balls. *} lemma closed_cball: "closed (cball x e)" unfolding cball_def closed_def @@ -1704,7 +1725,7 @@ apply (simp add: interior_def, safe) apply (force simp add: open_contains_cball) apply (rule_tac x="ball x e" in exI) - apply (simp add: open_ball centre_in_ball subset_trans [OF ball_subset_cball]) + apply (simp add: subset_trans [OF ball_subset_cball]) done lemma islimpt_ball: @@ -1879,14 +1900,14 @@ lemma frontier_ball: fixes a :: "'a::real_normed_vector" shows "0 < e ==> frontier(ball a e) = {x. dist a x = e}" - apply (simp add: frontier_def closure_ball interior_open open_ball order_less_imp_le) + apply (simp add: frontier_def closure_ball interior_open order_less_imp_le) apply (simp add: expand_set_eq) by arith lemma frontier_cball: fixes a :: "'a::{real_normed_vector, perfect_space}" shows "frontier(cball a e) = {x. dist a x = e}" - apply (simp add: frontier_def interior_cball closed_cball closure_closed order_less_imp_le) + apply (simp add: frontier_def interior_cball closed_cball order_less_imp_le) apply (simp add: expand_set_eq) by arith @@ -2006,9 +2027,10 @@ lemma bounded_ball[simp,intro]: "bounded(ball x e)" by (metis ball_subset_cball bounded_cball bounded_subset) -lemma finite_imp_bounded[intro]: assumes "finite S" shows "bounded S" +lemma finite_imp_bounded[intro]: + fixes S :: "'a::metric_space set" assumes "finite S" shows "bounded S" proof- - { fix a F assume as:"bounded F" + { fix a and F :: "'a set" assume as:"bounded F" then obtain x e where "\y\F. dist x y \ e" unfolding bounded_def by auto hence "\y\(insert a F). dist x y \ max e (dist x a)" by auto hence "bounded (insert a F)" unfolding bounded_def by (intro exI) @@ -2156,7 +2178,9 @@ apply (blast intro!: order_antisym dest!: isGlb_le_isLb) done -subsection{* Compactness (the definition is the one based on convegent subsequences). *} +subsection {* Equivalent versions of compactness *} + +subsubsection{* Sequential compactness *} definition compact :: "'a::metric_space set \ bool" where (* TODO: generalize *) @@ -2218,7 +2242,7 @@ apply (rule allI, rule impI, rule ext) apply (erule conjE) apply (induct_tac x) -apply (simp add: nat_rec_0) +apply simp apply (erule_tac x="n" in allE) apply (simp) done @@ -2390,7 +2414,7 @@ using l r by fast qed -subsection{* Completeness. *} +subsubsection{* Completeness *} lemma cauchy_def: "Cauchy s \ (\e>0. \N. \m n. m \ N \ n \ N --> dist(s m)(s n) < e)" @@ -2537,7 +2561,7 @@ unfolding image_def by auto -subsection{* Total boundedness. *} +subsubsection{* Total boundedness *} fun helper_1::"('a::metric_space set) \ real \ nat \ 'a" where "helper_1 s e n = (SOME y::'a. y \ s \ (\m (dist (helper_1 s e m) y < e)))" @@ -2570,7 +2594,9 @@ using x[THEN spec[where x="r (N+1)"], THEN spec[where x="r (N)"]] by auto qed -subsection{* Heine-Borel theorem (following Burkill \& Burkill vol. 2) *} +subsubsection{* Heine-Borel theorem *} + +text {* Following Burkill \& Burkill vol. 2. *} lemma heine_borel_lemma: fixes s::"'a::metric_space set" assumes "compact s" "s \ (\ t)" "\b \ t. open b" @@ -2634,7 +2660,7 @@ ultimately show "\f'\f. finite f' \ s \ \f'" using bb k(2) by (rule_tac x="bb ` k" in exI) auto qed -subsection{* Bolzano-Weierstrass property. *} +subsubsection {* Bolzano-Weierstrass property *} lemma heine_borel_imp_bolzano_weierstrass: assumes "\f. (\t \ f. open t) \ s \ (\ f) --> (\f'. f' \ f \ finite f' \ s \ (\ f'))" @@ -2650,7 +2676,8 @@ { fix x y assume "x\t" "y\t" "f x = f y" hence "x \ f x" "y \ f x \ y = x" using f[THEN bspec[where x=x]] and `t\s` by auto hence "x = y" using `f x = f y` and f[THEN bspec[where x=y]] and `y\t` and `t\s` by auto } - hence "infinite (f ` t)" using assms(2) using finite_imageD[unfolded inj_on_def, of f t] by auto + hence "inj_on f t" unfolding inj_on_def by simp + hence "infinite (f ` t)" using assms(2) using finite_imageD by auto moreover { fix x assume "x\t" "f x \ g" from g(3) assms(3) `x\t` obtain h where "h\g" and "x\h" by auto @@ -2661,7 +2688,7 @@ ultimately show False using g(2) using finite_subset by auto qed -subsection{* Complete the chain of compactness variants. *} +subsubsection {* Complete the chain of compactness variants *} primrec helper_2::"(real \ 'a::metric_space) \ nat \ 'a" where "helper_2 beyond 0 = beyond 0" | @@ -3097,7 +3124,9 @@ ultimately show ?thesis by auto qed -subsection{* Define continuity over a net to take in restrictions of the set. *} +subsection {* Continuity *} + +text {* Define continuity over a net to take in restrictions of the set. *} definition continuous :: "'a::t2_space net \ ('a \ 'b::topological_space) \ bool" where @@ -3145,7 +3174,7 @@ using `?lhs`[unfolded continuous_within Lim_within] by auto { fix y assume "y\f ` (ball x d \ s)" hence "y \ ball (f x) e" using d(2) unfolding dist_nz[THEN sym] - apply (auto simp add: dist_commute mem_ball) apply(erule_tac x=xa in ballE) apply auto using `e>0` by auto + apply (auto simp add: dist_commute) apply(erule_tac x=xa in ballE) apply auto using `e>0` by auto } hence "\d>0. f ` (ball x d \ s) \ ball (f x) e" using `d>0` unfolding subset_eq ball_def by (auto simp add: dist_commute) } thus ?rhs by auto @@ -3165,85 +3194,87 @@ apply auto apply(erule_tac x=e in allE) apply auto apply(rule_tac x=d in exI) apply auto apply(erule_tac x="f xa" in allE) by (auto simp add: dist_commute dist_nz) qed -text{* For setwise continuity, just start from the epsilon-delta definitions. *} +text{* Define setwise continuity in terms of limits within the set. *} definition - continuous_on :: "'a::metric_space set \ ('a \ 'b::metric_space) \ bool" where - "continuous_on s f \ (\x \ s. \e>0. \d::real>0. \x' \ s. dist x' x < d --> dist (f x') (f x) < e)" - + continuous_on :: + "'a set \ ('a::topological_space \ 'b::topological_space) \ bool" +where + "continuous_on s f \ (\x\s. (f ---> f x) (at x within s))" + +lemma continuous_on_topological: + "continuous_on s f \ + (\x\s. \B. open B \ f x \ B \ + (\A. open A \ x \ A \ (\y\s. y \ A \ f y \ B)))" +unfolding continuous_on_def tendsto_def +unfolding Limits.eventually_within eventually_at_topological +by (intro ball_cong [OF refl] all_cong imp_cong ex_cong conj_cong refl) auto + +lemma continuous_on_iff: + "continuous_on s f \ + (\x\s. \e>0. \d>0. \x'\s. dist x' x < d \ dist (f x') (f x) < e)" +unfolding continuous_on_def Lim_within +apply (intro ball_cong [OF refl] all_cong ex_cong) +apply (rename_tac y, case_tac "y = x", simp) +apply (simp add: dist_nz) +done definition uniformly_continuous_on :: - "'a::metric_space set \ ('a \ 'b::metric_space) \ bool" where + "'a set \ ('a::metric_space \ 'b::metric_space) \ bool" +where "uniformly_continuous_on s f \ - (\e>0. \d>0. \x\s. \ x'\s. dist x' x < d - --> dist (f x') (f x) < e)" - - -text{* Lifting and dropping *} - -lemma continuous_on_o_dest_vec1: fixes f::"real \ 'a::real_normed_vector" - assumes "continuous_on {a..b::real} f" shows "continuous_on {vec1 a..vec1 b} (f o dest_vec1)" - using assms unfolding continuous_on_def apply safe - apply(erule_tac x="x$1" in ballE,erule_tac x=e in allE) apply safe - apply(rule_tac x=d in exI) apply safe unfolding o_def dist_real_def dist_real - apply(erule_tac x="dest_vec1 x'" in ballE) by(auto simp add:vector_le_def) - -lemma continuous_on_o_vec1: fixes f::"real^1 \ 'a::real_normed_vector" - assumes "continuous_on {a..b} f" shows "continuous_on {dest_vec1 a..dest_vec1 b} (f o vec1)" - using assms unfolding continuous_on_def apply safe - apply(erule_tac x="vec x" in ballE,erule_tac x=e in allE) apply safe - apply(rule_tac x=d in exI) apply safe unfolding o_def dist_real_def dist_real - apply(erule_tac x="vec1 x'" in ballE) by(auto simp add:vector_le_def) + (\e>0. \d>0. \x\s. \x'\s. dist x' x < d \ dist (f x') (f x) < e)" text{* Some simple consequential lemmas. *} lemma uniformly_continuous_imp_continuous: " uniformly_continuous_on s f ==> continuous_on s f" - unfolding uniformly_continuous_on_def continuous_on_def by blast + unfolding uniformly_continuous_on_def continuous_on_iff by blast lemma continuous_at_imp_continuous_within: "continuous (at x) f ==> continuous (at x within s) f" unfolding continuous_within continuous_at using Lim_at_within by auto -lemma continuous_at_imp_continuous_on: assumes "(\x \ s. continuous (at x) f)" +lemma Lim_trivial_limit: "trivial_limit net \ (f ---> l) net" +unfolding tendsto_def by (simp add: trivial_limit_eq) + +lemma continuous_at_imp_continuous_on: + assumes "\x\s. continuous (at x) f" shows "continuous_on s f" -proof(simp add: continuous_at continuous_on_def, rule, rule, rule) - fix x and e::real assume "x\s" "e>0" - hence "eventually (\xa. dist (f xa) (f x) < e) (at x)" using assms unfolding continuous_at tendsto_iff by auto - then obtain d where d:"d>0" "\xa. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" unfolding eventually_at by auto - { fix x' assume "\ 0 < dist x' x" - hence "x=x'" - using dist_nz[of x' x] by auto - hence "dist (f x') (f x) < e" using `e>0` by auto - } - thus "\d>0. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using d by auto +unfolding continuous_on_def +proof + fix x assume "x \ s" + with assms have *: "(f ---> f (netlimit (at x))) (at x)" + unfolding continuous_def by simp + have "(f ---> f x) (at x)" + proof (cases "trivial_limit (at x)") + case True thus ?thesis + by (rule Lim_trivial_limit) + next + case False + hence "netlimit (at x) = x" + using netlimit_within [of x UNIV] + by (simp add: within_UNIV) + with * show ?thesis by simp + qed + thus "(f ---> f x) (at x within s)" + by (rule Lim_at_within) qed lemma continuous_on_eq_continuous_within: - "continuous_on s f \ (\x \ s. continuous (at x within s) f)" (is "?lhs = ?rhs") -proof - assume ?rhs - { fix x assume "x\s" - fix e::real assume "e>0" - assume "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" - then obtain d where "d>0" and d:"\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" by auto - { fix x' assume as:"x'\s" "dist x' x < d" - hence "dist (f x') (f x) < e" using `e>0` d `x'\s` dist_eq_0_iff[of x' x] zero_le_dist[of x' x] as(2) by (metis dist_eq_0_iff dist_nz) } - hence "\d>0. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using `d>0` by auto - } - thus ?lhs using `?rhs` unfolding continuous_on_def continuous_within Lim_within by auto -next - assume ?lhs - thus ?rhs unfolding continuous_on_def continuous_within Lim_within by blast -qed - -lemma continuous_on: - "continuous_on s f \ (\x \ s. (f ---> f(x)) (at x within s))" - by (auto simp add: continuous_on_eq_continuous_within continuous_within) + "continuous_on s f \ (\x \ s. continuous (at x within s) f)" +unfolding continuous_on_def continuous_def +apply (rule ball_cong [OF refl]) +apply (case_tac "trivial_limit (at x within s)") +apply (simp add: Lim_trivial_limit) +apply (simp add: netlimit_within) +done + +lemmas continuous_on = continuous_on_def -- "legacy theorem name" lemma continuous_on_eq_continuous_at: - "open s ==> (continuous_on s f \ (\x \ s. continuous (at x) f))" + shows "open s ==> (continuous_on s f \ (\x \ s. continuous (at x) f))" by (auto simp add: continuous_on continuous_at Lim_within_open) lemma continuous_within_subset: @@ -3252,19 +3283,19 @@ unfolding continuous_within by(metis Lim_within_subset) lemma continuous_on_subset: - "continuous_on s f \ t \ s ==> continuous_on t f" + shows "continuous_on s f \ t \ s ==> continuous_on t f" unfolding continuous_on by (metis subset_eq Lim_within_subset) lemma continuous_on_interior: - "continuous_on s f \ x \ interior s ==> continuous (at x) f" + shows "continuous_on s f \ x \ interior s ==> continuous (at x) f" unfolding interior_def apply simp by (meson continuous_on_eq_continuous_at continuous_on_subset) lemma continuous_on_eq: - "(\x \ s. f x = g x) \ continuous_on s f - ==> continuous_on s g" - by (simp add: continuous_on_def) + "(\x \ s. f x = g x) \ continuous_on s f \ continuous_on s g" + unfolding continuous_on_def tendsto_def Limits.eventually_within + by simp text{* Characterization of various kinds of continuity in terms of sequences. *} @@ -3317,7 +3348,9 @@ using continuous_within_sequentially[of a UNIV f] unfolding within_UNIV by auto lemma continuous_on_sequentially: - "continuous_on s f \ (\x. \a \ s. (\n. x(n) \ s) \ (x ---> a) sequentially + fixes f :: "'a::metric_space \ 'b::metric_space" + shows "continuous_on s f \ + (\x. \a \ s. (\n. x(n) \ s) \ (x ---> a) sequentially --> ((f o x) ---> f(a)) sequentially)" (is "?lhs = ?rhs") proof assume ?rhs thus ?lhs using continuous_within_sequentially[of _ s f] unfolding continuous_on_eq_continuous_within by auto @@ -3325,24 +3358,23 @@ assume ?lhs thus ?rhs unfolding continuous_on_eq_continuous_within using continuous_within_sequentially[of _ s f] by auto qed -lemma uniformly_continuous_on_sequentially: - fixes f :: "'a::real_normed_vector \ 'b::real_normed_vector" - shows "uniformly_continuous_on s f \ (\x y. (\n. x n \ s) \ (\n. y n \ s) \ - ((\n. x n - y n) ---> 0) sequentially - \ ((\n. f(x n) - f(y n)) ---> 0) sequentially)" (is "?lhs = ?rhs") +lemma uniformly_continuous_on_sequentially': + "uniformly_continuous_on s f \ (\x y. (\n. x n \ s) \ (\n. y n \ s) \ + ((\n. dist (x n) (y n)) ---> 0) sequentially + \ ((\n. dist (f(x n)) (f(y n))) ---> 0) sequentially)" (is "?lhs = ?rhs") proof assume ?lhs - { fix x y assume x:"\n. x n \ s" and y:"\n. y n \ s" and xy:"((\n. x n - y n) ---> 0) sequentially" + { fix x y assume x:"\n. x n \ s" and y:"\n. y n \ s" and xy:"((\n. dist (x n) (y n)) ---> 0) sequentially" { fix e::real assume "e>0" then obtain d where "d>0" and d:"\x\s. \x'\s. dist x' x < d \ dist (f x') (f x) < e" using `?lhs`[unfolded uniformly_continuous_on_def, THEN spec[where x=e]] by auto - obtain N where N:"\n\N. norm (x n - y n - 0) < d" using xy[unfolded Lim_sequentially dist_norm] and `d>0` by auto + obtain N where N:"\n\N. dist (x n) (y n) < d" using xy[unfolded Lim_sequentially dist_norm] and `d>0` by auto { fix n assume "n\N" - hence "norm (f (x n) - f (y n) - 0) < e" + hence "dist (f (x n)) (f (y n)) < e" using N[THEN spec[where x=n]] using d[THEN bspec[where x="x n"], THEN bspec[where x="y n"]] using x and y - unfolding dist_commute and dist_norm by simp } - hence "\N. \n\N. norm (f (x n) - f (y n) - 0) < e" by auto } - hence "((\n. f(x n) - f(y n)) ---> 0) sequentially" unfolding Lim_sequentially and dist_norm by auto } + unfolding dist_commute by simp } + hence "\N. \n\N. dist (f (x n)) (f (y n)) < e" by auto } + hence "((\n. dist (f(x n)) (f(y n))) ---> 0) sequentially" unfolding Lim_sequentially and dist_real_def by auto } thus ?rhs by auto next assume ?rhs @@ -3355,25 +3387,32 @@ def y \ "\n::nat. snd (fa (inverse (real n + 1)))" have xyn:"\n. x n \ s \ y n \ s" and xy0:"\n. dist (x n) (y n) < inverse (real n + 1)" and fxy:"\n. \ dist (f (x n)) (f (y n)) < e" unfolding x_def and y_def using fa by auto - have 1:"\(x::'a) y. dist (x - y) 0 = dist x y" unfolding dist_norm by auto - have 2:"\(x::'b) y. dist (x - y) 0 = dist x y" unfolding dist_norm by auto { fix e::real assume "e>0" then obtain N::nat where "N \ 0" and N:"0 < inverse (real N) \ inverse (real N) < e" unfolding real_arch_inv[of e] by auto { fix n::nat assume "n\N" hence "inverse (real n + 1) < inverse (real N)" using real_of_nat_ge_zero and `N\0` by auto also have "\ < e" using N by auto finally have "inverse (real n + 1) < e" by auto - hence "dist (x n - y n) 0 < e" unfolding 1 using xy0[THEN spec[where x=n]] by auto } - hence "\N. \n\N. dist (x n - y n) 0 < e" by auto } - hence "\e>0. \N. \n\N. dist (f (x n) - f (y n)) 0 < e" using `?rhs`[THEN spec[where x=x], THEN spec[where x=y]] and xyn unfolding Lim_sequentially by auto - hence False unfolding 2 using fxy and `e>0` by auto } + hence "dist (x n) (y n) < e" using xy0[THEN spec[where x=n]] by auto } + hence "\N. \n\N. dist (x n) (y n) < e" by auto } + hence "\e>0. \N. \n\N. dist (f (x n)) (f (y n)) < e" using `?rhs`[THEN spec[where x=x], THEN spec[where x=y]] and xyn unfolding Lim_sequentially dist_real_def by auto + hence False using fxy and `e>0` by auto } thus ?lhs unfolding uniformly_continuous_on_def by blast qed +lemma uniformly_continuous_on_sequentially: + fixes f :: "'a::real_normed_vector \ 'b::real_normed_vector" + shows "uniformly_continuous_on s f \ (\x y. (\n. x n \ s) \ (\n. y n \ s) \ + ((\n. x n - y n) ---> 0) sequentially + \ ((\n. f(x n) - f(y n)) ---> 0) sequentially)" (is "?lhs = ?rhs") +(* BH: maybe the previous lemma should replace this one? *) +unfolding uniformly_continuous_on_sequentially' +unfolding dist_norm Lim_null_norm [symmetric] .. + text{* The usual transformation theorems. *} lemma continuous_transform_within: - fixes f g :: "'a::metric_space \ 'b::metric_space" + fixes f g :: "'a::metric_space \ 'b::metric_space" (* TODO: generalize *) assumes "0 < d" "x \ s" "\x' \ s. dist x' x < d --> f x' = g x'" "continuous (at x within s) f" shows "continuous (at x within s) g" @@ -3389,7 +3428,7 @@ qed lemma continuous_transform_at: - fixes f g :: "'a::metric_space \ 'b::metric_space" + fixes f g :: "'a::metric_space \ 'b::metric_space" (* TODO: generalize *) assumes "0 < d" "\x'. dist x' x < d --> f x' = g x'" "continuous (at x) f" shows "continuous (at x) g" @@ -3436,29 +3475,29 @@ lemma continuous_on_const: "continuous_on s (\x. c)" - unfolding continuous_on_eq_continuous_within using continuous_const by blast + unfolding continuous_on_def by auto lemma continuous_on_cmul: - fixes f :: "'a::metric_space \ 'b::real_normed_vector" - shows "continuous_on s f ==> continuous_on s (\x. c *\<^sub>R (f x))" - unfolding continuous_on_eq_continuous_within using continuous_cmul by blast + fixes f :: "'a::topological_space \ 'b::real_normed_vector" + shows "continuous_on s f \ continuous_on s (\x. c *\<^sub>R (f x))" + unfolding continuous_on_def by (auto intro: tendsto_intros) lemma continuous_on_neg: - fixes f :: "'a::metric_space \ 'b::real_normed_vector" + fixes f :: "'a::topological_space \ 'b::real_normed_vector" shows "continuous_on s f \ continuous_on s (\x. - f x)" - unfolding continuous_on_eq_continuous_within using continuous_neg by blast + unfolding continuous_on_def by (auto intro: tendsto_intros) lemma continuous_on_add: - fixes f g :: "'a::metric_space \ 'b::real_normed_vector" + fixes f g :: "'a::topological_space \ 'b::real_normed_vector" shows "continuous_on s f \ continuous_on s g \ continuous_on s (\x. f x + g x)" - unfolding continuous_on_eq_continuous_within using continuous_add by blast + unfolding continuous_on_def by (auto intro: tendsto_intros) lemma continuous_on_sub: - fixes f g :: "'a::metric_space \ 'b::real_normed_vector" + fixes f g :: "'a::topological_space \ 'b::real_normed_vector" shows "continuous_on s f \ continuous_on s g \ continuous_on s (\x. f x - g x)" - unfolding continuous_on_eq_continuous_within using continuous_sub by blast + unfolding continuous_on_def by (auto intro: tendsto_intros) text{* Same thing for uniform continuity, using sequential formulations. *} @@ -3467,8 +3506,7 @@ unfolding uniformly_continuous_on_def by simp lemma uniformly_continuous_on_cmul: - fixes f :: "'a::real_normed_vector \ 'b::real_normed_vector" - (* FIXME: generalize 'a to metric_space *) + fixes f :: "'a::metric_space \ 'b::real_normed_vector" assumes "uniformly_continuous_on s f" shows "uniformly_continuous_on s (\x. c *\<^sub>R f(x))" proof- @@ -3477,7 +3515,8 @@ using Lim_cmul[of "(\n. f (x n) - f (y n))" 0 sequentially c] unfolding scaleR_zero_right scaleR_right_diff_distrib by auto } - thus ?thesis using assms unfolding uniformly_continuous_on_sequentially by auto + thus ?thesis using assms unfolding uniformly_continuous_on_sequentially' + unfolding dist_norm Lim_null_norm [symmetric] by auto qed lemma dist_minus: @@ -3492,7 +3531,7 @@ unfolding uniformly_continuous_on_def dist_minus . lemma uniformly_continuous_on_add: - fixes f g :: "'a::real_normed_vector \ 'b::real_normed_vector" (* FIXME: generalize 'a *) + fixes f g :: "'a::metric_space \ 'b::real_normed_vector" assumes "uniformly_continuous_on s f" "uniformly_continuous_on s g" shows "uniformly_continuous_on s (\x. f x + g x)" proof- @@ -3501,11 +3540,12 @@ hence "((\xa. f (x xa) - f (y xa) + (g (x xa) - g (y xa))) ---> 0 + 0) sequentially" using Lim_add[of "\ n. f (x n) - f (y n)" 0 sequentially "\ n. g (x n) - g (y n)" 0] by auto hence "((\n. f (x n) + g (x n) - (f (y n) + g (y n))) ---> 0) sequentially" unfolding Lim_sequentially and add_diff_add [symmetric] by auto } - thus ?thesis using assms unfolding uniformly_continuous_on_sequentially by auto + thus ?thesis using assms unfolding uniformly_continuous_on_sequentially' + unfolding dist_norm Lim_null_norm [symmetric] by auto qed lemma uniformly_continuous_on_sub: - fixes f :: "'a::real_normed_vector \ 'b::real_normed_vector" (* FIXME: generalize 'a *) + fixes f :: "'a::metric_space \ 'b::real_normed_vector" shows "uniformly_continuous_on s f \ uniformly_continuous_on s g ==> uniformly_continuous_on s (\x. f x - g x)" unfolding ab_diff_minus @@ -3524,7 +3564,7 @@ lemma continuous_on_id: "continuous_on s (\x. x)" - unfolding continuous_on Lim_within by auto + unfolding continuous_on_def by (auto intro: tendsto_ident_at_within) lemma uniformly_continuous_on_id: "uniformly_continuous_on s (\x. x)" @@ -3532,25 +3572,21 @@ text{* Continuity of all kinds is preserved under composition. *} +lemma continuous_within_topological: + "continuous (at x within s) f \ + (\B. open B \ f x \ B \ + (\A. open A \ x \ A \ (\y\s. y \ A \ f y \ B)))" +unfolding continuous_within +unfolding tendsto_def Limits.eventually_within eventually_at_topological +by (intro ball_cong [OF refl] all_cong imp_cong ex_cong conj_cong refl) auto + lemma continuous_within_compose: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) - fixes g :: "'b::metric_space \ 'c::metric_space" - assumes "continuous (at x within s) f" "continuous (at (f x) within f ` s) g" + assumes "continuous (at x within s) f" + assumes "continuous (at (f x) within f ` s) g" shows "continuous (at x within s) (g o f)" -proof- - { fix e::real assume "e>0" - with assms(2)[unfolded continuous_within Lim_within] obtain d where "d>0" and d:"\xa\f ` s. 0 < dist xa (f x) \ dist xa (f x) < d \ dist (g xa) (g (f x)) < e" by auto - from assms(1)[unfolded continuous_within Lim_within] obtain d' where "d'>0" and d':"\xa\s. 0 < dist xa x \ dist xa x < d' \ dist (f xa) (f x) < d" using `d>0` by auto - { fix y assume as:"y\s" "0 < dist y x" "dist y x < d'" - hence "dist (f y) (f x) < d" using d'[THEN bspec[where x=y]] by (auto simp add:dist_commute) - hence "dist (g (f y)) (g (f x)) < e" using as(1) d[THEN bspec[where x="f y"]] unfolding dist_nz[THEN sym] using `e>0` by auto } - hence "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (g (f xa)) (g (f x)) < e" using `d'>0` by auto } - thus ?thesis unfolding continuous_within Lim_within by auto -qed +using assms unfolding continuous_within_topological by simp metis lemma continuous_at_compose: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) - fixes g :: "'b::metric_space \ 'c::metric_space" assumes "continuous (at x) f" "continuous (at (f x)) g" shows "continuous (at x) (g o f)" proof- @@ -3559,8 +3595,8 @@ qed lemma continuous_on_compose: - "continuous_on s f \ continuous_on (f ` s) g \ continuous_on s (g o f)" - unfolding continuous_on_eq_continuous_within using continuous_within_compose[of _ s f g] by auto + "continuous_on s f \ continuous_on (f ` s) g \ continuous_on s (g o f)" + unfolding continuous_on_topological by simp metis lemma uniformly_continuous_on_compose: assumes "uniformly_continuous_on s f" "uniformly_continuous_on (f ` s) g" @@ -3576,74 +3612,56 @@ text{* Continuity in terms of open preimages. *} lemma continuous_at_open: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) - shows "continuous (at x) f \ (\t. open t \ f x \ t --> (\s. open s \ x \ s \ (\x' \ s. (f x') \ t)))" (is "?lhs = ?rhs") -proof - assume ?lhs - { fix t assume as: "open t" "f x \ t" - then obtain e where "e>0" and e:"ball (f x) e \ t" unfolding open_contains_ball by auto - - obtain d where "d>0" and d:"\y. 0 < dist y x \ dist y x < d \ dist (f y) (f x) < e" using `e>0` using `?lhs`[unfolded continuous_at Lim_at open_dist] by auto - - have "open (ball x d)" using open_ball by auto - moreover have "x \ ball x d" unfolding centre_in_ball using `d>0` by simp - moreover - { fix x' assume "x'\ball x d" hence "f x' \ t" - using e[unfolded subset_eq Ball_def mem_ball, THEN spec[where x="f x'"]] d[THEN spec[where x=x']] - unfolding mem_ball apply (auto simp add: dist_commute) - unfolding dist_nz[THEN sym] using as(2) by auto } - hence "\x'\ball x d. f x' \ t" by auto - ultimately have "\s. open s \ x \ s \ (\x'\s. f x' \ t)" - apply(rule_tac x="ball x d" in exI) by simp } - thus ?rhs by auto -next - assume ?rhs - { fix e::real assume "e>0" - then obtain s where s: "open s" "x \ s" "\x'\s. f x' \ ball (f x) e" using `?rhs`[unfolded continuous_at Lim_at, THEN spec[where x="ball (f x) e"]] - unfolding centre_in_ball[of "f x" e, THEN sym] by auto - then obtain d where "d>0" and d:"ball x d \ s" unfolding open_contains_ball by auto - { fix y assume "0 < dist y x \ dist y x < d" - hence "dist (f y) (f x) < e" using d[unfolded subset_eq Ball_def mem_ball, THEN spec[where x=y]] - using s(3)[THEN bspec[where x=y], unfolded mem_ball] by (auto simp add: dist_commute) } - hence "\d>0. \xa. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" using `d>0` by auto } - thus ?lhs unfolding continuous_at Lim_at by auto -qed + shows "continuous (at x) f \ (\t. open t \ f x \ t --> (\s. open s \ x \ s \ (\x' \ s. (f x') \ t)))" +unfolding continuous_within_topological [of x UNIV f, unfolded within_UNIV] +unfolding imp_conjL by (intro all_cong imp_cong ex_cong conj_cong refl) auto lemma continuous_on_open: - "continuous_on s f \ + shows "continuous_on s f \ (\t. openin (subtopology euclidean (f ` s)) t --> openin (subtopology euclidean s) {x \ s. f x \ t})" (is "?lhs = ?rhs") -proof - assume ?lhs - { fix t assume as:"openin (subtopology euclidean (f ` s)) t" - have "{x \ s. f x \ t} \ s" using as[unfolded openin_euclidean_subtopology_iff] by auto - moreover - { fix x assume as':"x\{x \ s. f x \ t}" - then obtain e where e: "e>0" "\x'\f ` s. dist x' (f x) < e \ x' \ t" using as[unfolded openin_euclidean_subtopology_iff, THEN conjunct2, THEN bspec[where x="f x"]] by auto - from this(1) obtain d where d: "d>0" "\xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" using `?lhs`[unfolded continuous_on Lim_within, THEN bspec[where x=x]] using as' by auto - have "\e>0. \x'\s. dist x' x < e \ x' \ {x \ s. f x \ t}" using d e unfolding dist_nz[THEN sym] by (rule_tac x=d in exI, auto) } - ultimately have "openin (subtopology euclidean s) {x \ s. f x \ t}" unfolding openin_euclidean_subtopology_iff by auto } - thus ?rhs unfolding continuous_on Lim_within using openin by auto +proof (safe) + fix t :: "'b set" + assume 1: "continuous_on s f" + assume 2: "openin (subtopology euclidean (f ` s)) t" + from 2 obtain B where B: "open B" and t: "t = f ` s \ B" + unfolding openin_open by auto + def U == "\{A. open A \ (\x\s. x \ A \ f x \ B)}" + have "open U" unfolding U_def by (simp add: open_Union) + moreover have "\x\s. x \ U \ f x \ t" + proof (intro ballI iffI) + fix x assume "x \ s" and "x \ U" thus "f x \ t" + unfolding U_def t by auto + next + fix x assume "x \ s" and "f x \ t" + hence "x \ s" and "f x \ B" + unfolding t by auto + with 1 B obtain A where "open A" "x \ A" "\y\s. y \ A \ f y \ B" + unfolding t continuous_on_topological by metis + then show "x \ U" + unfolding U_def by auto + qed + ultimately have "open U \ {x \ s. f x \ t} = s \ U" by auto + then show "openin (subtopology euclidean s) {x \ s. f x \ t}" + unfolding openin_open by fast next - assume ?rhs - { fix e::real and x assume "x\s" "e>0" - { fix xa x' assume "dist (f xa) (f x) < e" "xa \ s" "x' \ s" "dist (f xa) (f x') < e - dist (f xa) (f x)" - hence "dist (f x') (f x) < e" using dist_triangle[of "f x'" "f x" "f xa"] - by (auto simp add: dist_commute) } - hence "ball (f x) e \ f ` s \ f ` s \ (\xa\ball (f x) e \ f ` s. \ea>0. \x'\f ` s. dist x' xa < ea \ x' \ ball (f x) e \ f ` s)" apply auto - apply(rule_tac x="e - dist (f xa) (f x)" in exI) using `e>0` by (auto simp add: dist_commute) - hence "\xa\{xa \ s. f xa \ ball (f x) e \ f ` s}. \ea>0. \x'\s. dist x' xa < ea \ x' \ {xa \ s. f xa \ ball (f x) e \ f ` s}" - using `?rhs`[unfolded openin_euclidean_subtopology_iff, THEN spec[where x="ball (f x) e \ f ` s"]] by auto - hence "\d>0. \xa\s. 0 < dist xa x \ dist xa x < d \ dist (f xa) (f x) < e" apply(erule_tac x=x in ballE) apply auto using `e>0` `x\s` by (auto simp add: dist_commute) } - thus ?lhs unfolding continuous_on Lim_within by auto -qed - -(* ------------------------------------------------------------------------- *) -(* Similarly in terms of closed sets. *) -(* ------------------------------------------------------------------------- *) + assume "?rhs" show "continuous_on s f" + unfolding continuous_on_topological + proof (clarify) + fix x and B assume "x \ s" and "open B" and "f x \ B" + have "openin (subtopology euclidean (f ` s)) (f ` s \ B)" + unfolding openin_open using `open B` by auto + then have "openin (subtopology euclidean s) {x \ s. f x \ f ` s \ B}" + using `?rhs` by fast + then show "\A. open A \ x \ A \ (\y\s. y \ A \ f y \ B)" + unfolding openin_open using `x \ s` and `f x \ B` by auto + qed +qed + +text {* Similarly in terms of closed sets. *} lemma continuous_on_closed: - "continuous_on s f \ (\t. closedin (subtopology euclidean (f ` s)) t --> closedin (subtopology euclidean s) {x \ s. f x \ t})" (is "?lhs = ?rhs") + shows "continuous_on s f \ (\t. closedin (subtopology euclidean (f ` s)) t --> closedin (subtopology euclidean s) {x \ s. f x \ t})" (is "?lhs = ?rhs") proof assume ?lhs { fix t @@ -3706,26 +3724,22 @@ qed lemma continuous_open_preimage_univ: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) shows "\x. continuous (at x) f \ open s \ open {x. f x \ s}" using continuous_open_preimage[of UNIV f s] open_UNIV continuous_at_imp_continuous_on by auto lemma continuous_closed_preimage_univ: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) shows "(\x. continuous (at x) f) \ closed s ==> closed {x. f x \ s}" using continuous_closed_preimage[of UNIV f s] closed_UNIV continuous_at_imp_continuous_on by auto lemma continuous_open_vimage: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) shows "\x. continuous (at x) f \ open s \ open (f -` s)" unfolding vimage_def by (rule continuous_open_preimage_univ) lemma continuous_closed_vimage: - fixes f :: "'a::metric_space \ 'b::metric_space" (* FIXME: generalize *) shows "\x. continuous (at x) f \ closed s \ closed (f -` s)" unfolding vimage_def by (rule continuous_closed_preimage_univ) -lemma interior_image_subset: fixes f::"_::metric_space \ _::metric_space" +lemma interior_image_subset: assumes "\x. continuous (at x) f" "inj f" shows "interior (f ` s) \ f ` (interior s)" apply rule unfolding interior_def mem_Collect_eq image_iff apply safe @@ -3739,14 +3753,17 @@ text{* Equality of continuous functions on closure and related results. *} lemma continuous_closed_in_preimage_constant: - "continuous_on s f ==> closedin (subtopology euclidean s) {x \ s. f x = a}" + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) + shows "continuous_on s f ==> closedin (subtopology euclidean s) {x \ s. f x = a}" using continuous_closed_in_preimage[of s f "{a}"] closed_sing by auto lemma continuous_closed_preimage_constant: - "continuous_on s f \ closed s ==> closed {x \ s. f x = a}" + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) + shows "continuous_on s f \ closed s ==> closed {x \ s. f x = a}" using continuous_closed_preimage[of s f "{a}"] closed_sing by auto lemma continuous_constant_on_closure: + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) assumes "continuous_on (closure s) f" "\x \ s. f x = a" shows "\x \ (closure s). f x = a" @@ -3798,11 +3815,13 @@ using assms using continuous_within_avoid[of x UNIV f a, unfolded within_UNIV] by auto lemma continuous_on_avoid: + fixes f :: "'a::metric_space \ 'b::metric_space" (* TODO: generalize *) assumes "continuous_on s f" "x \ s" "f x \ a" shows "\e>0. \y \ s. dist x y < e \ f y \ a" using assms(1)[unfolded continuous_on_eq_continuous_within, THEN bspec[where x=x], OF assms(2)] continuous_within_avoid[of x s f a] assms(2,3) by auto lemma continuous_on_open_avoid: + fixes f :: "'a::metric_space \ 'b::metric_space" (* TODO: generalize *) assumes "continuous_on s f" "open s" "x \ s" "f x \ a" shows "\e>0. \y. dist x y < e \ f y \ a" using assms(1)[unfolded continuous_on_eq_continuous_at[OF assms(2)], THEN bspec[where x=x], OF assms(3)] continuous_at_avoid[of x f a] assms(3,4) by auto @@ -3810,22 +3829,25 @@ text{* Proving a function is constant by proving open-ness of level set. *} lemma continuous_levelset_open_in_cases: - "connected s \ continuous_on s f \ + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) + shows "connected s \ continuous_on s f \ openin (subtopology euclidean s) {x \ s. f x = a} ==> (\x \ s. f x \ a) \ (\x \ s. f x = a)" unfolding connected_clopen using continuous_closed_in_preimage_constant by auto lemma continuous_levelset_open_in: - "connected s \ continuous_on s f \ + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) + shows "connected s \ continuous_on s f \ openin (subtopology euclidean s) {x \ s. f x = a} \ (\x \ s. f x = a) ==> (\x \ s. f x = a)" using continuous_levelset_open_in_cases[of s f ] by meson lemma continuous_levelset_open: + fixes f :: "_ \ 'b::metric_space" (* class constraint due to closed_sing *) assumes "connected s" "continuous_on s f" "open {x \ s. f x = a}" "\x \ s. f x = a" shows "\x \ s. f x = a" -using continuous_levelset_open_in[OF assms(1,2), of a, unfolded openin_open] using assms (3,4) by auto +using continuous_levelset_open_in[OF assms(1,2), of a, unfolded openin_open] using assms (3,4) by fast text{* Some arithmetical combinations (more to prove). *} @@ -3896,7 +3918,104 @@ thus "x \ interior (op + a ` s)" unfolding mem_interior using `e>0` by auto qed -subsection {* Preservation of compactness and connectedness under continuous function. *} +text {* We can now extend limit compositions to consider the scalar multiplier. *} + +lemma continuous_vmul: + fixes c :: "'a::metric_space \ real" and v :: "'b::real_normed_vector" + shows "continuous net c ==> continuous net (\x. c(x) *\<^sub>R v)" + unfolding continuous_def using Lim_vmul[of c] by auto + +lemma continuous_mul: + fixes c :: "'a::metric_space \ real" + fixes f :: "'a::metric_space \ 'b::real_normed_vector" + shows "continuous net c \ continuous net f + ==> continuous net (\x. c(x) *\<^sub>R f x) " + unfolding continuous_def by (intro tendsto_intros) + +lemmas continuous_intros = continuous_add continuous_vmul continuous_cmul continuous_const continuous_sub continuous_at_id continuous_within_id continuous_mul + +lemma continuous_on_vmul: + fixes c :: "'a::metric_space \ real" and v :: "'b::real_normed_vector" + shows "continuous_on s c ==> continuous_on s (\x. c(x) *\<^sub>R v)" + unfolding continuous_on_eq_continuous_within using continuous_vmul[of _ c] by auto + +lemma continuous_on_mul: + fixes c :: "'a::metric_space \ real" + fixes f :: "'a::metric_space \ 'b::real_normed_vector" + shows "continuous_on s c \ continuous_on s f + ==> continuous_on s (\x. c(x) *\<^sub>R f x)" + unfolding continuous_on_eq_continuous_within using continuous_mul[of _ c] by auto + +lemmas continuous_on_intros = continuous_on_add continuous_on_const continuous_on_id continuous_on_compose continuous_on_cmul continuous_on_neg continuous_on_sub + uniformly_continuous_on_add uniformly_continuous_on_const uniformly_continuous_on_id uniformly_continuous_on_compose uniformly_continuous_on_cmul uniformly_continuous_on_neg uniformly_continuous_on_sub + continuous_on_mul continuous_on_vmul + +text{* And so we have continuity of inverse. *} + +lemma continuous_inv: + fixes f :: "'a::metric_space \ real" + shows "continuous net f \ f(netlimit net) \ 0 + ==> continuous net (inverse o f)" + unfolding continuous_def using Lim_inv by auto + +lemma continuous_at_within_inv: + fixes f :: "'a::metric_space \ 'b::real_normed_field" + assumes "continuous (at a within s) f" "f a \ 0" + shows "continuous (at a within s) (inverse o f)" + using assms unfolding continuous_within o_def + by (intro tendsto_intros) + +lemma continuous_at_inv: + fixes f :: "'a::metric_space \ 'b::real_normed_field" + shows "continuous (at a) f \ f a \ 0 + ==> continuous (at a) (inverse o f) " + using within_UNIV[THEN sym, of "at a"] using continuous_at_within_inv[of a UNIV] by auto + +text {* Topological properties of linear functions. *} + +lemma linear_lim_0: + assumes "bounded_linear f" shows "(f ---> 0) (at (0))" +proof- + interpret f: bounded_linear f by fact + have "(f ---> f 0) (at 0)" + using tendsto_ident_at by (rule f.tendsto) + thus ?thesis unfolding f.zero . +qed + +lemma linear_continuous_at: + assumes "bounded_linear f" shows "continuous (at a) f" + unfolding continuous_at using assms + apply (rule bounded_linear.tendsto) + apply (rule tendsto_ident_at) + done + +lemma linear_continuous_within: + shows "bounded_linear f ==> continuous (at x within s) f" + using continuous_at_imp_continuous_within[of x f s] using linear_continuous_at[of f] by auto + +lemma linear_continuous_on: + shows "bounded_linear f ==> continuous_on s f" + using continuous_at_imp_continuous_on[of s f] using linear_continuous_at[of f] by auto + +text{* Also bilinear functions, in composition form. *} + +lemma bilinear_continuous_at_compose: + shows "continuous (at x) f \ continuous (at x) g \ bounded_bilinear h + ==> continuous (at x) (\x. h (f x) (g x))" + unfolding continuous_at using Lim_bilinear[of f "f x" "(at x)" g "g x" h] by auto + +lemma bilinear_continuous_within_compose: + shows "continuous (at x within s) f \ continuous (at x within s) g \ bounded_bilinear h + ==> continuous (at x within s) (\x. h (f x) (g x))" + unfolding continuous_within using Lim_bilinear[of f "f x"] by auto + +lemma bilinear_continuous_on_compose: + shows "continuous_on s f \ continuous_on s g \ bounded_bilinear h + ==> continuous_on s (\x. h (f x) (g x))" + unfolding continuous_on_def + by (fast elim: bounded_bilinear.tendsto) + +text {* Preservation of compactness and connectedness under continuous function. *} lemma compact_continuous_image: assumes "continuous_on s f" "compact s" @@ -3906,7 +4025,7 @@ then obtain y where y:"\n. y n \ s \ x n = f (y n)" unfolding image_iff Bex_def using choice[of "\n xa. xa \ s \ x n = f xa"] by auto then obtain l r where "l\s" and r:"subseq r" and lr:"((y \ r) ---> l) sequentially" using assms(2)[unfolded compact_def, THEN spec[where x=y]] by auto { fix e::real assume "e>0" - then obtain d where "d>0" and d:"\x'\s. dist x' l < d \ dist (f x') (f l) < e" using assms(1)[unfolded continuous_on_def, THEN bspec[where x=l], OF `l\s`] by auto + then obtain d where "d>0" and d:"\x'\s. dist x' l < d \ dist (f x') (f l) < e" using assms(1)[unfolded continuous_on_iff, THEN bspec[where x=l], OF `l\s`] by auto then obtain N::nat where N:"\n\N. dist ((y \ r) n) l < d" using lr[unfolded Lim_sequentially, THEN spec[where x=d]] by auto { fix n::nat assume "n\N" hence "dist ((x \ r) n) (f l) < e" using N[THEN spec[where x=n]] d[THEN bspec[where x="y (r n)"]] y[THEN spec[where x="r n"]] by auto } hence "\N. \n\N. dist ((x \ r) n) (f l) < e" by auto } @@ -3935,7 +4054,7 @@ shows "uniformly_continuous_on s f" proof- { fix x assume x:"x\s" - hence "\xa. \y. 0 < xa \ (y > 0 \ (\x'\s. dist x' x < y \ dist (f x') (f x) < xa))" using assms(1)[unfolded continuous_on_def, THEN bspec[where x=x]] by auto + hence "\xa. \y. 0 < xa \ (y > 0 \ (\x'\s. dist x' x < y \ dist (f x') (f x) < xa))" using assms(1)[unfolded continuous_on_iff, THEN bspec[where x=x]] by auto hence "\fa. \xa>0. \x'\s. fa xa > 0 \ (dist x' x < fa xa \ dist (f x') (f x) < xa)" using choice[of "\e d. e>0 \ d>0 \(\x'\s. (dist x' x < d \ dist (f x') (f x) < e))"] by auto } then have "\x\s. \y. \xa. 0 < xa \ (\x'\s. y xa > 0 \ (dist x' x < y xa \ dist (f x') (f x) < xa))" by auto then obtain d where d:"\e>0. \x\s. \x'\s. d x e > 0 \ (dist x' x < d x e \ dist (f x') (f x) < e)" @@ -3988,7 +4107,7 @@ thus ?thesis unfolding continuous_on_closed by auto qed -subsection{* A uniformly convergent limit of continuous functions is continuous. *} +text {* A uniformly convergent limit of continuous functions is continuous. *} lemma norm_triangle_lt: fixes x y :: "'a::real_normed_vector" @@ -4007,7 +4126,7 @@ using eventually_and[of "(\n. \x\s. norm (f n x - g x) < e / 3)" "(\n. continuous_on s (f n))" net] assms(1,2) eventually_happens by blast have "e / 3 > 0" using `e>0` by auto then obtain d where "d>0" and d:"\x'\s. dist x' x < d \ dist (f n x') (f n x) < e / 3" - using n(2)[unfolded continuous_on_def, THEN bspec[where x=x], OF `x\s`, THEN spec[where x="e/3"]] by blast + using n(2)[unfolded continuous_on_iff, THEN bspec[where x=x], OF `x\s`, THEN spec[where x="e/3"]] by blast { fix y assume "y\s" "dist y x < d" hence "dist (f n y) (f n x) < e / 3" using d[THEN bspec[where x=y]] by auto hence "norm (f n y - g x) < 2 * e / 3" using norm_triangle_lt[of "f n y - f n x" "f n x - g x" "2*e/3"] @@ -4015,55 +4134,8 @@ hence "dist (g y) (g x) < e" unfolding dist_norm using n(1)[THEN bspec[where x=y], OF `y\s`] unfolding norm_minus_cancel[of "f n y - g y", THEN sym] using norm_triangle_lt[of "f n y - g x" "g y - f n y" e] by (auto simp add: uminus_add_conv_diff) } hence "\d>0. \x'\s. dist x' x < d \ dist (g x') (g x) < e" using `d>0` by auto } - thus ?thesis unfolding continuous_on_def by auto -qed - -subsection{* Topological properties of linear functions. *} - -lemma linear_lim_0: - assumes "bounded_linear f" shows "(f ---> 0) (at (0))" -proof- - interpret f: bounded_linear f by fact - have "(f ---> f 0) (at 0)" - using tendsto_ident_at by (rule f.tendsto) - thus ?thesis unfolding f.zero . -qed - -lemma linear_continuous_at: - assumes "bounded_linear f" shows "continuous (at a) f" - unfolding continuous_at using assms - apply (rule bounded_linear.tendsto) - apply (rule tendsto_ident_at) - done - -lemma linear_continuous_within: - shows "bounded_linear f ==> continuous (at x within s) f" - using continuous_at_imp_continuous_within[of x f s] using linear_continuous_at[of f] by auto - -lemma linear_continuous_on: - shows "bounded_linear f ==> continuous_on s f" - using continuous_at_imp_continuous_on[of s f] using linear_continuous_at[of f] by auto - -lemma continuous_on_vec1:"continuous_on A (vec1::real\real^1)" - by(rule linear_continuous_on[OF bounded_linear_vec1]) - -text{* Also bilinear functions, in composition form. *} - -lemma bilinear_continuous_at_compose: - shows "continuous (at x) f \ continuous (at x) g \ bounded_bilinear h - ==> continuous (at x) (\x. h (f x) (g x))" - unfolding continuous_at using Lim_bilinear[of f "f x" "(at x)" g "g x" h] by auto - -lemma bilinear_continuous_within_compose: - shows "continuous (at x within s) f \ continuous (at x within s) g \ bounded_bilinear h - ==> continuous (at x within s) (\x. h (f x) (g x))" - unfolding continuous_within using Lim_bilinear[of f "f x"] by auto - -lemma bilinear_continuous_on_compose: - shows "continuous_on s f \ continuous_on s g \ bounded_bilinear h - ==> continuous_on s (\x. h (f x) (g x))" - unfolding continuous_on_eq_continuous_within apply auto apply(erule_tac x=x in ballE) apply auto apply(erule_tac x=x in ballE) apply auto - using bilinear_continuous_within_compose[of _ s f g h] by auto + thus ?thesis unfolding continuous_on_iff by auto +qed subsection{* Topological stuff lifted from and dropped to R *} @@ -4098,7 +4170,7 @@ lemma continuous_on_real_range: fixes f :: "'a::real_normed_vector \ real" shows "continuous_on s f \ (\x \ s. \e>0. \d>0. (\x' \ s. norm(x' - x) < d --> abs(f x' - f x) < e))" - unfolding continuous_on_def dist_norm by simp + unfolding continuous_on_iff dist_norm by simp lemma continuous_at_norm: "continuous (at x) norm" unfolding continuous_at by (intro tendsto_intros) @@ -4110,7 +4182,7 @@ unfolding continuous_at by (intro tendsto_intros) lemma continuous_on_component: "continuous_on s (\x. x $ i)" -unfolding continuous_on by (intro ballI tendsto_intros) +unfolding continuous_on_def by (intro ballI tendsto_intros) lemma continuous_at_infnorm: "continuous (at x) infnorm" unfolding continuous_at Lim_at o_def unfolding dist_norm @@ -4218,91 +4290,7 @@ thus ?thesis by fastsimp qed -subsection{* We can now extend limit compositions to consider the scalar multiplier. *} - -lemma Lim_mul: - fixes f :: "'a \ 'b::real_normed_vector" - assumes "(c ---> d) net" "(f ---> l) net" - shows "((\x. c(x) *\<^sub>R f x) ---> (d *\<^sub>R l)) net" - using assms by (rule scaleR.tendsto) - -lemma Lim_vmul: - fixes c :: "'a \ real" and v :: "'b::real_normed_vector" - shows "(c ---> d) net ==> ((\x. c(x) *\<^sub>R v) ---> d *\<^sub>R v) net" - by (intro tendsto_intros) - -lemmas Lim_intros = Lim_add Lim_const Lim_sub Lim_cmul Lim_vmul Lim_within_id - -lemma continuous_vmul: - fixes c :: "'a::metric_space \ real" and v :: "'b::real_normed_vector" - shows "continuous net c ==> continuous net (\x. c(x) *\<^sub>R v)" - unfolding continuous_def using Lim_vmul[of c] by auto - -lemma continuous_mul: - fixes c :: "'a::metric_space \ real" - fixes f :: "'a::metric_space \ 'b::real_normed_vector" - shows "continuous net c \ continuous net f - ==> continuous net (\x. c(x) *\<^sub>R f x) " - unfolding continuous_def by (intro tendsto_intros) - -lemmas continuous_intros = continuous_add continuous_vmul continuous_cmul continuous_const continuous_sub continuous_at_id continuous_within_id continuous_mul - -lemma continuous_on_vmul: - fixes c :: "'a::metric_space \ real" and v :: "'b::real_normed_vector" - shows "continuous_on s c ==> continuous_on s (\x. c(x) *\<^sub>R v)" - unfolding continuous_on_eq_continuous_within using continuous_vmul[of _ c] by auto - -lemma continuous_on_mul: - fixes c :: "'a::metric_space \ real" - fixes f :: "'a::metric_space \ 'b::real_normed_vector" - shows "continuous_on s c \ continuous_on s f - ==> continuous_on s (\x. c(x) *\<^sub>R f x)" - unfolding continuous_on_eq_continuous_within using continuous_mul[of _ c] by auto - -lemmas continuous_on_intros = continuous_on_add continuous_on_const continuous_on_id continuous_on_compose continuous_on_cmul continuous_on_neg continuous_on_sub - uniformly_continuous_on_add uniformly_continuous_on_const uniformly_continuous_on_id uniformly_continuous_on_compose uniformly_continuous_on_cmul uniformly_continuous_on_neg uniformly_continuous_on_sub - continuous_on_mul continuous_on_vmul - -text{* And so we have continuity of inverse. *} - -lemma Lim_inv: - fixes f :: "'a \ real" - assumes "(f ---> l) (net::'a net)" "l \ 0" - shows "((inverse o f) ---> inverse l) net" - unfolding o_def using assms by (rule tendsto_inverse) - -lemma continuous_inv: - fixes f :: "'a::metric_space \ real" - shows "continuous net f \ f(netlimit net) \ 0 - ==> continuous net (inverse o f)" - unfolding continuous_def using Lim_inv by auto - -lemma continuous_at_within_inv: - fixes f :: "'a::metric_space \ 'b::real_normed_field" - assumes "continuous (at a within s) f" "f a \ 0" - shows "continuous (at a within s) (inverse o f)" - using assms unfolding continuous_within o_def - by (intro tendsto_intros) - -lemma continuous_at_inv: - fixes f :: "'a::metric_space \ 'b::real_normed_field" - shows "continuous (at a) f \ f a \ 0 - ==> continuous (at a) (inverse o f) " - using within_UNIV[THEN sym, of "at a"] using continuous_at_within_inv[of a UNIV] by auto - -subsection{* Preservation properties for pasted sets. *} - -lemma bounded_pastecart: - fixes s :: "('a::real_normed_vector ^ _) set" (* FIXME: generalize to metric_space *) - assumes "bounded s" "bounded t" - shows "bounded { pastecart x y | x y . (x \ s \ y \ t)}" -proof- - obtain a b where ab:"\x\s. norm x \ a" "\x\t. norm x \ b" using assms[unfolded bounded_iff] by auto - { fix x y assume "x\s" "y\t" - hence "norm x \ a" "norm y \ b" using ab by auto - hence "norm (pastecart x y) \ a + b" using norm_pastecart[of x y] by auto } - thus ?thesis unfolding bounded_iff by auto -qed +subsection {* Pasted sets *} lemma bounded_Times: assumes "bounded s" "bounded t" shows "bounded (s \ t)" @@ -4314,33 +4302,6 @@ thus ?thesis unfolding bounded_any_center [where a="(x, y)"] by auto qed -lemma closed_pastecart: - fixes s :: "(real ^ 'a) set" (* FIXME: generalize *) - assumes "closed s" "closed t" - shows "closed {pastecart x y | x y . x \ s \ y \ t}" -proof- - { fix x l assume as:"\n::nat. x n \ {pastecart x y |x y. x \ s \ y \ t}" "(x ---> l) sequentially" - { fix n::nat have "fstcart (x n) \ s" "sndcart (x n) \ t" using as(1)[THEN spec[where x=n]] by auto } note * = this - moreover - { fix e::real assume "e>0" - then obtain N::nat where N:"\n\N. dist (x n) l < e" using as(2)[unfolded Lim_sequentially, THEN spec[where x=e]] by auto - { fix n::nat assume "n\N" - hence "dist (fstcart (x n)) (fstcart l) < e" "dist (sndcart (x n)) (sndcart l) < e" - using N[THEN spec[where x=n]] dist_fstcart[of "x n" l] dist_sndcart[of "x n" l] by auto } - hence "\N. \n\N. dist (fstcart (x n)) (fstcart l) < e" "\N. \n\N. dist (sndcart (x n)) (sndcart l) < e" by auto } - ultimately have "fstcart l \ s" "sndcart l \ t" - using assms(1)[unfolded closed_sequential_limits, THEN spec[where x="\n. fstcart (x n)"], THEN spec[where x="fstcart l"]] - using assms(2)[unfolded closed_sequential_limits, THEN spec[where x="\n. sndcart (x n)"], THEN spec[where x="sndcart l"]] - unfolding Lim_sequentially by auto - hence "l \ {pastecart x y |x y. x \ s \ y \ t}" apply- unfolding mem_Collect_eq apply(rule_tac x="fstcart l" in exI,rule_tac x="sndcart l" in exI) by auto } - thus ?thesis unfolding closed_sequential_limits by auto -qed - -lemma compact_pastecart: - fixes s t :: "(real ^ _) set" - shows "compact s \ compact t ==> compact {pastecart x y | x y . x \ s \ y \ t}" - unfolding compact_eq_bounded_closed using bounded_pastecart[of s t] closed_pastecart[of s t] by auto - lemma mem_Times_iff: "x \ A \ B \ fst x \ A \ snd x \ B" by (induct x) simp @@ -4424,7 +4385,7 @@ have "{x - y | x y . x\s \ y\s} \ {}" using `s \ {}` by auto then obtain x where x:"x\{x - y |x y. x \ s \ y \ s}" "\y\{x - y |x y. x \ s \ y \ s}. norm y \ norm x" using compact_differences[OF assms(1) assms(1)] - using distance_attains_sup[where 'a="'a", unfolded dist_norm, of "{x - y | x y . x\s \ y\s}" 0] by(auto simp add: norm_minus_cancel) + using distance_attains_sup[where 'a="'a", unfolded dist_norm, of "{x - y | x y . x\s \ y\s}" 0] by auto from x(1) obtain a b where "a\s" "b\s" "x = a - b" by auto thus ?thesis using x(2)[unfolded `x = a - b`] by blast qed @@ -4442,10 +4403,10 @@ let ?D = "{norm (x - y) |x y. x \ s \ y \ s}" obtain a where a:"\x\s. norm x \ a" using assms[unfolded bounded_iff] by auto { fix x y assume "x \ s" "y \ s" - hence "norm (x - y) \ 2 * a" using norm_triangle_ineq[of x "-y", unfolded norm_minus_cancel] a[THEN bspec[where x=x]] a[THEN bspec[where x=y]] by (auto simp add: ring_simps) } + hence "norm (x - y) \ 2 * a" using norm_triangle_ineq[of x "-y", unfolded norm_minus_cancel] a[THEN bspec[where x=x]] a[THEN bspec[where x=y]] by (auto simp add: field_simps) } note * = this { fix x y assume "x\s" "y\s" hence "s \ {}" by auto - have "norm(x - y) \ diameter s" unfolding diameter_def using `s\{}` *[OF `x\s` `y\s`] `x\s` `y\s` + have "norm(x - y) \ diameter s" unfolding diameter_def using `s\{}` *[OF `x\s` `y\s`] `x\s` `y\s` by simp (blast intro!: Sup_upper *) } moreover { fix d::real assume "d>0" "d < diameter s" @@ -4476,10 +4437,10 @@ proof- have b:"bounded s" using assms(1) by (rule compact_imp_bounded) then obtain x y where xys:"x\s" "y\s" and xy:"\u\s. \v\s. norm (u - v) \ norm (x - y)" using compact_sup_maxdistance[OF assms] by auto - hence "diameter s \ norm (x - y)" - by (force simp add: diameter_def intro!: Sup_least) + hence "diameter s \ norm (x - y)" + unfolding diameter_def by clarsimp (rule Sup_least, fast+) thus ?thesis - by (metis b diameter_bounded_bound order_antisym xys) + by (metis b diameter_bounded_bound order_antisym xys) qed text{* Related results with closure as the conclusion. *} @@ -4653,7 +4614,7 @@ by (auto simp add: dist_commute) qed -(* A cute way of denoting open and closed intervals using overloading. *) +subsection {* Intervals *} lemma interval: fixes a :: "'a::ord^'n" shows "{a <..< b} = {x::'a^'n. \i. a$i < x$i \ x$i < b$i}" and @@ -4665,20 +4626,6 @@ "x \ {a .. b} \ (\i. a$i \ x$i \ x$i \ b$i)" using interval[of a b] by(auto simp add: expand_set_eq vector_less_def vector_le_def) -lemma mem_interval_1: fixes x :: "real^1" shows - "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b)" - "(x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" -by(simp_all add: Cart_eq vector_less_def vector_le_def forall_1) - -lemma vec1_interval:fixes a::"real" shows - "vec1 ` {a .. b} = {vec1 a .. vec1 b}" - "vec1 ` {a<.. (\i. b$i \ a$i))" (is ?th1) and "({a .. b} = {} \ (\i. b$i < a$i))" (is ?th2) @@ -4694,7 +4641,7 @@ have "a$i < b$i" using as[THEN spec[where x=i]] by auto hence "a$i < ((1/2) *\<^sub>R (a+b)) $ i" "((1/2) *\<^sub>R (a+b)) $ i < b$i" unfolding vector_smult_component and vector_add_component - by (auto simp add: less_divide_eq_number_of1) } + by auto } hence "{a <..< b} \ {}" using mem_interval(1)[of "?x" a b] by auto } ultimately show ?th1 by blast @@ -4709,7 +4656,7 @@ have "a$i \ b$i" using as[THEN spec[where x=i]] by auto hence "a$i \ ((1/2) *\<^sub>R (a+b)) $ i" "((1/2) *\<^sub>R (a+b)) $ i \ b$i" unfolding vector_smult_component and vector_add_component - by (auto simp add: less_divide_eq_number_of1) } + by auto } hence "{a .. b} \ {}" using mem_interval(2)[of "?x" a b] by auto } ultimately show ?th2 by blast qed @@ -4772,13 +4719,13 @@ { fix j have "c $ j < ?x $ j \ ?x $ j < d $ j" unfolding Cart_lambda_beta apply(cases "j=i") using as(2)[THEN spec[where x=j]] - by (auto simp add: less_divide_eq_number_of1 as2) } + by (auto simp add: as2) } hence "?x\{c<..{a .. b}" unfolding mem_interval apply auto apply(rule_tac x=i in exI) using as(2)[THEN spec[where x=i]] and as2 - by (auto simp add: less_divide_eq_number_of1) + by auto ultimately have False using as by auto } hence "a$i \ c$i" by(rule ccontr)auto moreover @@ -4787,13 +4734,13 @@ { fix j have "d $ j > ?x $ j \ ?x $ j > c $ j" unfolding Cart_lambda_beta apply(cases "j=i") using as(2)[THEN spec[where x=j]] - by (auto simp add: less_divide_eq_number_of1 as2) } + by (auto simp add: as2) } hence "?x\{c<..{a .. b}" unfolding mem_interval apply auto apply(rule_tac x=i in exI) using as(2)[THEN spec[where x=i]] and as2 - by (auto simp add: less_divide_eq_number_of1) + by auto ultimately have False using as by auto } hence "b$i \ d$i" by(rule ccontr)auto ultimately @@ -4824,7 +4771,7 @@ lemma inter_interval: fixes a :: "'a::linorder^'n" shows "{a .. b} \ {c .. d} = {(\ i. max (a$i) (c$i)) .. (\ i. min (b$i) (d$i))}" unfolding expand_set_eq and Int_iff and mem_interval - by (auto simp add: less_divide_eq_number_of1 intro!: bexI) + by auto (* Moved interval_open_subset_closed a bit upwards *) @@ -4861,10 +4808,7 @@ qed lemma open_interval_real[intro]: fixes a :: "real" shows "open {a<..R (a + b)) $ i \ ((1 / 2) *\<^sub>R (a + b)) $ i < b $ i" using assms[unfolded interval_ne_empty, THEN spec[where x=i]] unfolding vector_smult_component and vector_add_component - by(auto simp add: less_divide_eq_number_of1) } + by auto } thus ?thesis unfolding mem_interval by auto qed @@ -4987,7 +4931,7 @@ x + (inverse (real n + 1)) *\<^sub>R (((1 / 2) *\<^sub>R (a + b)) - x)" by (auto simp add: algebra_simps) hence "f n < b" and "a < f n" using open_closed_interval_convex[OF open_interval_midpoint[OF assms] as *] unfolding f_def by auto - hence False using fn unfolding f_def using xc by(auto simp add: vector_mul_lcancel vector_ssub_ldistrib) } + hence False using fn unfolding f_def using xc by(auto simp add: vector_ssub_ldistrib) } moreover { assume "\ (f ---> x) sequentially" { fix e::real assume "e>0" @@ -5055,56 +4999,6 @@ unfolding closure_open_interval[OF assms, THEN sym] unfolding open_inter_closure_eq_empty[OF open_interval] .. -(* Some special cases for intervals in R^1. *) - -lemma interval_cases_1: fixes x :: "real^1" shows - "x \ {a .. b} ==> x \ {a<.. (x = a) \ (x = b)" - unfolding Cart_eq vector_less_def vector_le_def mem_interval by(auto simp del:dest_vec1_eq) - -lemma in_interval_1: fixes x :: "real^1" shows - "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b) \ - (x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" - unfolding Cart_eq vector_less_def vector_le_def mem_interval by(auto simp del:dest_vec1_eq) - -lemma interval_eq_empty_1: fixes a :: "real^1" shows - "{a .. b} = {} \ dest_vec1 b < dest_vec1 a" - "{a<.. dest_vec1 b \ dest_vec1 a" - unfolding interval_eq_empty and ex_1 by auto - -lemma subset_interval_1: fixes a :: "real^1" shows - "({a .. b} \ {c .. d} \ dest_vec1 b < dest_vec1 a \ - dest_vec1 c \ dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" - "({a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ - dest_vec1 c < dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b < dest_vec1 d)" - "({a<.. {c .. d} \ dest_vec1 b \ dest_vec1 a \ - dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" - "({a<.. {c<.. dest_vec1 b \ dest_vec1 a \ - dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" - unfolding subset_interval[of a b c d] unfolding forall_1 by auto - -lemma eq_interval_1: fixes a :: "real^1" shows - "{a .. b} = {c .. d} \ - dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ - dest_vec1 a = dest_vec1 c \ dest_vec1 b = dest_vec1 d" -unfolding set_eq_subset[of "{a .. b}" "{c .. d}"] -unfolding subset_interval_1(1)[of a b c d] -unfolding subset_interval_1(1)[of c d a b] -by auto - -lemma disjoint_interval_1: fixes a :: "real^1" shows - "{a .. b} \ {c .. d} = {} \ dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b < dest_vec1 c \ dest_vec1 d < dest_vec1 a" - "{a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" - "{a<.. {c .. d} = {} \ dest_vec1 b \ dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" - "{a<.. {c<.. dest_vec1 b \ dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" - unfolding disjoint_interval and ex_1 by auto - -lemma open_closed_interval_1: fixes a :: "real^1" shows - "{a<.. dest_vec1 b ==> {a .. b} = {a<.. {a,b}" - unfolding expand_set_eq apply simp unfolding vector_less_def and vector_le_def and forall_1 and dest_vec1_eq[THEN sym] by(auto simp del:dest_vec1_eq) - (* Some stuff for half-infinite intervals too; FIXME: notation? *) lemma closed_interval_left: fixes b::"real^'n" @@ -5131,7 +5025,7 @@ thus ?thesis unfolding closed_limpt unfolding islimpt_approachable by blast qed -subsection{* Intervals in general, including infinite and mixtures of open and closed. *} +text {* Intervals in general, including infinite and mixtures of open and closed. *} definition "is_interval s \ (\a\s. \b\s. \x. (\i. ((a$i \ x$i \ x$i \ b$i) \ (b$i \ x$i \ x$i \ a$i))) \ x \ s)" @@ -5238,14 +5132,6 @@ shows "l$i = b" using ev[unfolded order_eq_iff eventually_and] using Lim_component_ge[OF net, of b i] and Lim_component_le[OF net, of i b] by auto -lemma Lim_drop_le: fixes f :: "'a \ real^1" shows - "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. dest_vec1 (f x) \ b) net ==> dest_vec1 l \ b" - using Lim_component_le[of f l net 1 b] by auto - -lemma Lim_drop_ge: fixes f :: "'a \ real^1" shows - "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. b \ dest_vec1 (f x)) net ==> b \ dest_vec1 l" - using Lim_component_ge[of f l net b 1] by auto - text{* Limits relative to a union. *} lemma eventually_within_Un: @@ -5260,11 +5146,17 @@ unfolding tendsto_def by (auto simp add: eventually_within_Un) +lemma Lim_topological: + "(f ---> l) net \ + trivial_limit net \ + (\S. open S \ l \ S \ eventually (\x. f x \ S) net)" + unfolding tendsto_def trivial_limit_eq by auto + lemma continuous_on_union: assumes "closed s" "closed t" "continuous_on s f" "continuous_on t f" shows "continuous_on (s \ t) f" - using assms unfolding continuous_on unfolding Lim_within_union - unfolding Lim unfolding trivial_limit_within unfolding closed_limpt by auto + using assms unfolding continuous_on Lim_within_union + unfolding Lim_topological trivial_limit_within closed_limpt by auto lemma continuous_on_cases: assumes "closed s" "closed t" "continuous_on s f" "continuous_on t g" @@ -5300,23 +5192,7 @@ "connected s \ x \ s \ y \ s \ x$k \ a \ a \ y$k \ (\z\s. z$k = a)" using connected_ivt_hyperplane[of s x y "(basis k)::real^'n" a] by (auto simp add: inner_basis) -text{* Also more convenient formulations of monotone convergence. *} - -lemma bounded_increasing_convergent: fixes s::"nat \ real^1" - assumes "bounded {s n| n::nat. True}" "\n. dest_vec1(s n) \ dest_vec1(s(Suc n))" - shows "\l. (s ---> l) sequentially" -proof- - obtain a where a:"\n. \dest_vec1 (s n)\ \ a" using assms(1)[unfolded bounded_iff abs_dest_vec1] by auto - { fix m::nat - have "\ n. n\m \ dest_vec1 (s m) \ dest_vec1 (s n)" - apply(induct_tac n) apply simp using assms(2) apply(erule_tac x="na" in allE) by(auto simp add: not_less_eq_eq) } - hence "\m n. m \ n \ dest_vec1 (s m) \ dest_vec1 (s n)" by auto - then obtain l where "\e>0. \N. \n\N. \dest_vec1 (s n) - l\ < e" using convergent_bounded_monotone[OF a] unfolding monoseq_def by auto - thus ?thesis unfolding Lim_sequentially apply(rule_tac x="vec1 l" in exI) - unfolding dist_norm unfolding abs_dest_vec1 by auto -qed - -subsection{* Basic homeomorphism definitions. *} +subsection {* Homeomorphisms *} definition "homeomorphism s t f g \ (\x\s. (g(f x) = x)) \ (f ` s = t) \ continuous_on s f \ @@ -5372,13 +5248,7 @@ apply(erule_tac x="f x" in ballE) apply(erule_tac x="x" in ballE) apply auto apply(rule_tac x="f x" in bexI) by auto -subsection{* Relatively weak hypotheses if a set is compact. *} - -definition "inv_on f s = (\x. SOME y. y\s \ f y = x)" - -lemma assumes "inj_on f s" "x\s" - shows "inv_on f s (f x) = x" - using assms unfolding inj_on_def inv_on_def by auto +text {* Relatively weak hypotheses if a set is compact. *} lemma homeomorphism_compact: fixes f :: "'a::heine_borel \ 'b::heine_borel" @@ -5401,7 +5271,7 @@ then obtain y where y:"y\t" "g y = x" by auto then obtain x' where x':"x'\s" "f x' = y" using assms(3) by auto hence "x \ s" unfolding g_def using someI2[of "\b. b\s \ f b = y" x' "\x. x\s"] unfolding y(2)[THEN sym] and g_def by auto } - ultimately have "x\s \ x \ g ` t" by auto } + ultimately have "x\s \ x \ g ` t" .. } hence "g ` t = s" by auto ultimately show ?thesis unfolding homeomorphism_def homeomorphic_def @@ -5543,7 +5413,7 @@ let ?S' = "{x::real^'m. x\s \ norm x = norm a}" let ?S'' = "{x::real^'m. norm x = norm a}" - have "?S'' = frontier(cball 0 (norm a))" unfolding frontier_cball and dist_norm by (auto simp add: norm_minus_cancel) + have "?S'' = frontier(cball 0 (norm a))" unfolding frontier_cball and dist_norm by auto hence "compact ?S''" using compact_frontier[OF compact_cball, of 0 "norm a"] by auto moreover have "?S' = s \ ?S''" by auto ultimately have "compact ?S'" using closed_inter_compact[of s ?S''] using s(1) by auto @@ -5590,7 +5460,7 @@ lemma subspace_substandard: "subspace {x::real^_. (\i. P i \ x$i = 0)}" - unfolding subspace_def by(auto simp add: vector_add_component vector_smult_component elim!: ballE) + unfolding subspace_def by auto lemma closed_substandard: "closed {x::real^'n. \i. P i --> x$i = 0}" (is "closed ?A") @@ -5607,7 +5477,7 @@ then obtain B where BB:"B \ ?Bs" and B:"B = {x::real^'n. inner (basis i) x = 0}" by auto hence "x $ i = 0" unfolding B using x unfolding inner_basis by auto } hence "x\?A" by auto } - ultimately have "x\?A \ x\ \?Bs" by auto } + ultimately have "x\?A \ x\ \?Bs" .. } hence "?A = \ ?Bs" by auto thus ?thesis by(auto simp add: closed_Inter closed_hyperplane) qed @@ -5645,7 +5515,7 @@ moreover have "basis k \ span (?bas ` (insert k F))" by(rule span_superset, auto) hence "x$k *\<^sub>R basis k \ span (?bas ` (insert k F))" - using span_mul [where 'a=real, unfolded smult_conv_scaleR] by auto + using span_mul by auto ultimately have "y + x$k *\<^sub>R basis k \ span (?bas ` (insert k F))" using span_add by auto @@ -5713,7 +5583,7 @@ thus ?thesis using dim_subset[OF closure_subset, of s] by auto qed -text{* Affine transformations of intervals. *} +subsection {* Affine transformations of intervals *} lemma affinity_inverses: assumes m0: "m \ (0::'a::field)" @@ -5752,7 +5622,7 @@ shows "m *s x + c = y \ x = inverse m *s y + -(inverse m *s c)" proof assume h: "m *s x + c = y" - hence "m *s x = y - c" by (simp add: ring_simps) + hence "m *s x = y - c" by (simp add: field_simps) hence "inverse m *s (m *s x) = inverse m *s (y - c)" by simp then show "x = inverse m *s y + - (inverse m *s c)" using m0 by (simp add: vector_smult_assoc vector_ssub_ldistrib) @@ -5783,23 +5653,23 @@ case False { fix y assume "a \ y" "y \ b" "m > 0" hence "m *\<^sub>R a + c \ m *\<^sub>R y + c" "m *\<^sub>R y + c \ m *\<^sub>R b + c" - unfolding vector_le_def by(auto simp add: vector_smult_component vector_add_component) + unfolding vector_le_def by auto } moreover { fix y assume "a \ y" "y \ b" "m < 0" hence "m *\<^sub>R b + c \ m *\<^sub>R y + c" "m *\<^sub>R y + c \ m *\<^sub>R a + c" - unfolding vector_le_def by(auto simp add: vector_smult_component vector_add_component mult_left_mono_neg elim!:ballE) + unfolding vector_le_def by(auto simp add: mult_left_mono_neg) } moreover { fix y assume "m > 0" "m *\<^sub>R a + c \ y" "y \ m *\<^sub>R b + c" hence "y \ (\x. m *\<^sub>R x + c) ` {a..b}" unfolding image_iff Bex_def mem_interval vector_le_def - apply(auto simp add: vector_smult_component vector_add_component vector_minus_component vector_smult_assoc pth_3[symmetric] + apply(auto simp add: vector_smult_assoc pth_3[symmetric] intro!: exI[where x="(1 / m) *\<^sub>R (y - c)"]) by(auto simp add: pos_le_divide_eq pos_divide_le_eq real_mult_commute diff_le_iff) } moreover { fix y assume "m *\<^sub>R b + c \ y" "y \ m *\<^sub>R a + c" "m < 0" hence "y \ (\x. m *\<^sub>R x + c) ` {a..b}" unfolding image_iff Bex_def mem_interval vector_le_def - apply(auto simp add: vector_smult_component vector_add_component vector_minus_component vector_smult_assoc pth_3[symmetric] + apply(auto simp add: vector_smult_assoc pth_3[symmetric] intro!: exI[where x="(1 / m) *\<^sub>R (y - c)"]) by(auto simp add: neg_le_divide_eq neg_divide_le_eq real_mult_commute diff_le_iff) } @@ -5854,11 +5724,11 @@ also have "\ \ (1 - c) * (dist (z m) (z (m + k)) + c ^ (m + k) * d)" using cf_z[of "m + k"] and c by auto also have "\ \ c ^ m * d * (1 - c ^ k) + (1 - c) * c ^ (m + k) * d" - using Suc by (auto simp add: ring_simps) + using Suc by (auto simp add: field_simps) also have "\ = (c ^ m) * (d * (1 - c ^ k) + (1 - c) * c ^ k * d)" - unfolding power_add by (auto simp add: ring_simps) + unfolding power_add by (auto simp add: field_simps) also have "\ \ (c ^ m) * d * (1 - c ^ Suc k)" - using c by (auto simp add: ring_simps) + using c by (auto simp add: field_simps) finally show ?case by auto qed } note cf_z2 = this @@ -6015,7 +5885,7 @@ apply(erule_tac x="Na+Nb+n" in allE) apply simp using dist_triangle_add_half[of a "f (r (Na + Nb + n)) x" "dist a b - dist (f n x) (f n y)" "-b" "- f (r (Na + Nb + n)) y"] - unfolding ** unfolding group_simps(12) by (auto simp add: dist_commute) + unfolding ** by (auto simp add: algebra_simps dist_commute) moreover have "dist (f (r (Na + Nb + n)) x - f (r (Na + Nb + n)) y) (a - b) \ dist a b - dist (f n x) (f n y)" using distf[of n "r (Na+Nb+n)", OF _ `x\s` `y\s`] @@ -6045,7 +5915,7 @@ { fix x y assume "x\s" "y\s" moreover fix e::real assume "e>0" ultimately have "dist y x < e \ dist (g y) (g x) < e" using dist by fastsimp } - hence "continuous_on s g" unfolding continuous_on_def by auto + hence "continuous_on s g" unfolding continuous_on_iff by auto hence "((snd \ h \ r) ---> g a) sequentially" unfolding continuous_on_sequentially apply (rule allE[where x="\n. (fst \ h \ r) n"]) apply (erule ballE[where x=a]) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Multivariate_Analysis/Vec1.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Multivariate_Analysis/Vec1.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,405 @@ +(* Title: Multivariate_Analysis/Vec1.thy + Author: Amine Chaieb, University of Cambridge + Author: Robert Himmelmann, TU Muenchen +*) + +header {* Vectors of size 1, 2, or 3 *} + +theory Vec1 +imports Topology_Euclidean_Space +begin + +text{* Some common special cases.*} + +lemma forall_1[simp]: "(\i::1. P i) \ P 1" + by (metis num1_eq_iff) + +lemma ex_1[simp]: "(\x::1. P x) \ P 1" + by auto (metis num1_eq_iff) + +lemma exhaust_2: + fixes x :: 2 shows "x = 1 \ x = 2" +proof (induct x) + case (of_int z) + then have "0 <= z" and "z < 2" by simp_all + then have "z = 0 | z = 1" by arith + then show ?case by auto +qed + +lemma forall_2: "(\i::2. P i) \ P 1 \ P 2" + by (metis exhaust_2) + +lemma exhaust_3: + fixes x :: 3 shows "x = 1 \ x = 2 \ x = 3" +proof (induct x) + case (of_int z) + then have "0 <= z" and "z < 3" by simp_all + then have "z = 0 \ z = 1 \ z = 2" by arith + then show ?case by auto +qed + +lemma forall_3: "(\i::3. P i) \ P 1 \ P 2 \ P 3" + by (metis exhaust_3) + +lemma UNIV_1 [simp]: "UNIV = {1::1}" + by (auto simp add: num1_eq_iff) + +lemma UNIV_2: "UNIV = {1::2, 2::2}" + using exhaust_2 by auto + +lemma UNIV_3: "UNIV = {1::3, 2::3, 3::3}" + using exhaust_3 by auto + +lemma setsum_1: "setsum f (UNIV::1 set) = f 1" + unfolding UNIV_1 by simp + +lemma setsum_2: "setsum f (UNIV::2 set) = f 1 + f 2" + unfolding UNIV_2 by simp + +lemma setsum_3: "setsum f (UNIV::3 set) = f 1 + f 2 + f 3" + unfolding UNIV_3 by (simp add: add_ac) + +instantiation num1 :: cart_one begin +instance proof + show "CARD(1) = Suc 0" by auto +qed end + +(* "lift" from 'a to 'a^1 and "drop" from 'a^1 to 'a -- FIXME: potential use of transfer *) + +abbreviation vec1:: "'a \ 'a ^ 1" where "vec1 x \ vec x" + +abbreviation dest_vec1:: "'a ^1 \ 'a" + where "dest_vec1 x \ (x$1)" + +lemma vec1_component[simp]: "(vec1 x)$1 = x" + by simp + +lemma vec1_dest_vec1: "vec1(dest_vec1 x) = x" "dest_vec1(vec1 y) = y" + by (simp_all add: Cart_eq) + +declare vec1_dest_vec1(1) [simp] + +lemma forall_vec1: "(\x. P x) \ (\x. P (vec1 x))" + by (metis vec1_dest_vec1(1)) + +lemma exists_vec1: "(\x. P x) \ (\x. P(vec1 x))" + by (metis vec1_dest_vec1(1)) + +lemma vec1_eq[simp]: "vec1 x = vec1 y \ x = y" + by (metis vec1_dest_vec1(2)) + +lemma dest_vec1_eq[simp]: "dest_vec1 x = dest_vec1 y \ x = y" + by (metis vec1_dest_vec1(1)) + +subsection{* The collapse of the general concepts to dimension one. *} + +lemma vector_one: "(x::'a ^1) = (\ i. (x$1))" + by (simp add: Cart_eq) + +lemma forall_one: "(\(x::'a ^1). P x) \ (\x. P(\ i. x))" + apply auto + apply (erule_tac x= "x$1" in allE) + apply (simp only: vector_one[symmetric]) + done + +lemma norm_vector_1: "norm (x :: _^1) = norm (x$1)" + by (simp add: norm_vector_def) + +lemma norm_real: "norm(x::real ^ 1) = abs(x$1)" + by (simp add: norm_vector_1) + +lemma dist_real: "dist(x::real ^ 1) y = abs((x$1) - (y$1))" + by (auto simp add: norm_real dist_norm) + +subsection{* Explicit vector construction from lists. *} + +primrec from_nat :: "nat \ 'a::{monoid_add,one}" +where "from_nat 0 = 0" | "from_nat (Suc n) = 1 + from_nat n" + +lemma from_nat [simp]: "from_nat = of_nat" +by (rule ext, induct_tac x, simp_all) + +primrec + list_fun :: "nat \ _ list \ _ \ _" +where + "list_fun n [] = (\x. 0)" +| "list_fun n (x # xs) = fun_upd (list_fun (Suc n) xs) (from_nat n) x" + +definition "vector l = (\ i. list_fun 1 l i)" +(*definition "vector l = (\ i. if i <= length l then l ! (i - 1) else 0)"*) + +lemma vector_1: "(vector[x]) $1 = x" + unfolding vector_def by simp + +lemma vector_2: + "(vector[x,y]) $1 = x" + "(vector[x,y] :: 'a^2)$2 = (y::'a::zero)" + unfolding vector_def by simp_all + +lemma vector_3: + "(vector [x,y,z] ::('a::zero)^3)$1 = x" + "(vector [x,y,z] ::('a::zero)^3)$2 = y" + "(vector [x,y,z] ::('a::zero)^3)$3 = z" + unfolding vector_def by simp_all + +lemma forall_vector_1: "(\v::'a::zero^1. P v) \ (\x. P(vector[x]))" + apply auto + apply (erule_tac x="v$1" in allE) + apply (subgoal_tac "vector [v$1] = v") + apply simp + apply (vector vector_def) + apply simp + done + +lemma forall_vector_2: "(\v::'a::zero^2. P v) \ (\x y. P(vector[x, y]))" + apply auto + apply (erule_tac x="v$1" in allE) + apply (erule_tac x="v$2" in allE) + apply (subgoal_tac "vector [v$1, v$2] = v") + apply simp + apply (vector vector_def) + apply (simp add: forall_2) + done + +lemma forall_vector_3: "(\v::'a::zero^3. P v) \ (\x y z. P(vector[x, y, z]))" + apply auto + apply (erule_tac x="v$1" in allE) + apply (erule_tac x="v$2" in allE) + apply (erule_tac x="v$3" in allE) + apply (subgoal_tac "vector [v$1, v$2, v$3] = v") + apply simp + apply (vector vector_def) + apply (simp add: forall_3) + done + +lemma range_vec1[simp]:"range vec1 = UNIV" apply(rule set_ext,rule) unfolding image_iff defer + apply(rule_tac x="dest_vec1 x" in bexI) by auto + +lemma dest_vec1_lambda: "dest_vec1(\ i. x i) = x 1" + by (simp) + +lemma dest_vec1_vec: "dest_vec1(vec x) = x" + by (simp) + +lemma dest_vec1_sum: assumes fS: "finite S" + shows "dest_vec1(setsum f S) = setsum (dest_vec1 o f) S" + apply (induct rule: finite_induct[OF fS]) + apply simp + apply auto + done + +lemma norm_vec1 [simp]: "norm(vec1 x) = abs(x)" + by (simp add: vec_def norm_real) + +lemma dist_vec1: "dist(vec1 x) (vec1 y) = abs(x - y)" + by (simp only: dist_real vec1_component) +lemma abs_dest_vec1: "norm x = \dest_vec1 x\" + by (metis vec1_dest_vec1(1) norm_vec1) + +lemmas vec1_dest_vec1_simps = forall_vec1 vec_add[THEN sym] dist_vec1 vec_sub[THEN sym] vec1_dest_vec1 norm_vec1 vector_smult_component + vec1_eq vec_cmul[THEN sym] smult_conv_scaleR[THEN sym] o_def dist_real_def norm_vec1 real_norm_def + +lemma bounded_linear_vec1:"bounded_linear (vec1::real\real^1)" + unfolding bounded_linear_def additive_def bounded_linear_axioms_def + unfolding smult_conv_scaleR[THEN sym] unfolding vec1_dest_vec1_simps + apply(rule conjI) defer apply(rule conjI) defer apply(rule_tac x=1 in exI) by auto + +lemma linear_vmul_dest_vec1: + fixes f:: "real^_ \ real^1" + shows "linear f \ linear (\x. dest_vec1(f x) *s v)" + unfolding smult_conv_scaleR + by (rule linear_vmul_component) + +lemma linear_from_scalars: + assumes lf: "linear (f::real^1 \ real^_)" + shows "f = (\x. dest_vec1 x *s column 1 (matrix f))" + unfolding smult_conv_scaleR + apply (rule ext) + apply (subst matrix_works[OF lf, symmetric]) + apply (auto simp add: Cart_eq matrix_vector_mult_def column_def mult_commute) + done + +lemma linear_to_scalars: assumes lf: "linear (f::real ^'n \ real^1)" + shows "f = (\x. vec1(row 1 (matrix f) \ x))" + apply (rule ext) + apply (subst matrix_works[OF lf, symmetric]) + apply (simp add: Cart_eq matrix_vector_mult_def row_def inner_vector_def mult_commute) + done + +lemma dest_vec1_eq_0: "dest_vec1 x = 0 \ x = 0" + by (simp add: dest_vec1_eq[symmetric]) + +lemma setsum_scalars: assumes fS: "finite S" + shows "setsum f S = vec1 (setsum (dest_vec1 o f) S)" + unfolding vec_setsum[OF fS] by simp + +lemma dest_vec1_wlog_le: "(\(x::'a::linorder ^ 1) y. P x y \ P y x) \ (\x y. dest_vec1 x <= dest_vec1 y ==> P x y) \ P x y" + apply (cases "dest_vec1 x \ dest_vec1 y") + apply simp + apply (subgoal_tac "dest_vec1 y \ dest_vec1 x") + apply (auto) + done + +text{* Lifting and dropping *} + +lemma continuous_on_o_dest_vec1: fixes f::"real \ 'a::real_normed_vector" + assumes "continuous_on {a..b::real} f" shows "continuous_on {vec1 a..vec1 b} (f o dest_vec1)" + using assms unfolding continuous_on_iff apply safe + apply(erule_tac x="x$1" in ballE,erule_tac x=e in allE) apply safe + apply(rule_tac x=d in exI) apply safe unfolding o_def dist_real_def dist_real + apply(erule_tac x="dest_vec1 x'" in ballE) by(auto simp add:vector_le_def) + +lemma continuous_on_o_vec1: fixes f::"real^1 \ 'a::real_normed_vector" + assumes "continuous_on {a..b} f" shows "continuous_on {dest_vec1 a..dest_vec1 b} (f o vec1)" + using assms unfolding continuous_on_iff apply safe + apply(erule_tac x="vec x" in ballE,erule_tac x=e in allE) apply safe + apply(rule_tac x=d in exI) apply safe unfolding o_def dist_real_def dist_real + apply(erule_tac x="vec1 x'" in ballE) by(auto simp add:vector_le_def) + +lemma continuous_on_vec1:"continuous_on A (vec1::real\real^1)" + by(rule linear_continuous_on[OF bounded_linear_vec1]) + +lemma mem_interval_1: fixes x :: "real^1" shows + "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b)" + "(x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" +by(simp_all add: Cart_eq vector_less_def vector_le_def) + +lemma vec1_interval:fixes a::"real" shows + "vec1 ` {a .. b} = {vec1 a .. vec1 b}" + "vec1 ` {a<.. {a .. b} ==> x \ {a<.. (x = a) \ (x = b)" + unfolding Cart_eq vector_less_def vector_le_def mem_interval by(auto simp del:dest_vec1_eq) + +lemma in_interval_1: fixes x :: "real^1" shows + "(x \ {a .. b} \ dest_vec1 a \ dest_vec1 x \ dest_vec1 x \ dest_vec1 b) \ + (x \ {a<.. dest_vec1 a < dest_vec1 x \ dest_vec1 x < dest_vec1 b)" + unfolding Cart_eq vector_less_def vector_le_def mem_interval by(auto simp del:dest_vec1_eq) + +lemma interval_eq_empty_1: fixes a :: "real^1" shows + "{a .. b} = {} \ dest_vec1 b < dest_vec1 a" + "{a<.. dest_vec1 b \ dest_vec1 a" + unfolding interval_eq_empty and ex_1 by auto + +lemma subset_interval_1: fixes a :: "real^1" shows + "({a .. b} \ {c .. d} \ dest_vec1 b < dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + "({a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ + dest_vec1 c < dest_vec1 a \ dest_vec1 a \ dest_vec1 b \ dest_vec1 b < dest_vec1 d)" + "({a<.. {c .. d} \ dest_vec1 b \ dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + "({a<.. {c<.. dest_vec1 b \ dest_vec1 a \ + dest_vec1 c \ dest_vec1 a \ dest_vec1 a < dest_vec1 b \ dest_vec1 b \ dest_vec1 d)" + unfolding subset_interval[of a b c d] unfolding forall_1 by auto + +lemma eq_interval_1: fixes a :: "real^1" shows + "{a .. b} = {c .. d} \ + dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ + dest_vec1 a = dest_vec1 c \ dest_vec1 b = dest_vec1 d" +unfolding set_eq_subset[of "{a .. b}" "{c .. d}"] +unfolding subset_interval_1(1)[of a b c d] +unfolding subset_interval_1(1)[of c d a b] +by auto + +lemma disjoint_interval_1: fixes a :: "real^1" shows + "{a .. b} \ {c .. d} = {} \ dest_vec1 b < dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b < dest_vec1 c \ dest_vec1 d < dest_vec1 a" + "{a .. b} \ {c<.. dest_vec1 b < dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + "{a<.. {c .. d} = {} \ dest_vec1 b \ dest_vec1 a \ dest_vec1 d < dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + "{a<.. {c<.. dest_vec1 b \ dest_vec1 a \ dest_vec1 d \ dest_vec1 c \ dest_vec1 b \ dest_vec1 c \ dest_vec1 d \ dest_vec1 a" + unfolding disjoint_interval and ex_1 by auto + +lemma open_closed_interval_1: fixes a :: "real^1" shows + "{a<.. dest_vec1 b ==> {a .. b} = {a<.. {a,b}" + unfolding expand_set_eq apply simp unfolding vector_less_def and vector_le_def and forall_1 and dest_vec1_eq[THEN sym] by(auto simp del:dest_vec1_eq) + +lemma Lim_drop_le: fixes f :: "'a \ real^1" shows + "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. dest_vec1 (f x) \ b) net ==> dest_vec1 l \ b" + using Lim_component_le[of f l net 1 b] by auto + +lemma Lim_drop_ge: fixes f :: "'a \ real^1" shows + "(f ---> l) net \ ~(trivial_limit net) \ eventually (\x. b \ dest_vec1 (f x)) net ==> b \ dest_vec1 l" + using Lim_component_ge[of f l net b 1] by auto + +text{* Also more convenient formulations of monotone convergence. *} + +lemma bounded_increasing_convergent: fixes s::"nat \ real^1" + assumes "bounded {s n| n::nat. True}" "\n. dest_vec1(s n) \ dest_vec1(s(Suc n))" + shows "\l. (s ---> l) sequentially" +proof- + obtain a where a:"\n. \dest_vec1 (s n)\ \ a" using assms(1)[unfolded bounded_iff abs_dest_vec1] by auto + { fix m::nat + have "\ n. n\m \ dest_vec1 (s m) \ dest_vec1 (s n)" + apply(induct_tac n) apply simp using assms(2) apply(erule_tac x="na" in allE) by(auto simp add: not_less_eq_eq) } + hence "\m n. m \ n \ dest_vec1 (s m) \ dest_vec1 (s n)" by auto + then obtain l where "\e>0. \N. \n\N. \dest_vec1 (s n) - l\ < e" using convergent_bounded_monotone[OF a] unfolding monoseq_def by auto + thus ?thesis unfolding Lim_sequentially apply(rule_tac x="vec1 l" in exI) + unfolding dist_norm unfolding abs_dest_vec1 by auto +qed + +lemma dest_vec1_simps[simp]: fixes a::"real^1" + shows "a$1 = 0 \ a = 0" (*"a \ 1 \ dest_vec1 a \ 1" "0 \ a \ 0 \ dest_vec1 a"*) + "a \ b \ dest_vec1 a \ dest_vec1 b" "dest_vec1 (1::real^1) = 1" + by(auto simp add: vector_le_def Cart_eq) + +lemma dest_vec1_inverval: + "dest_vec1 ` {a .. b} = {dest_vec1 a .. dest_vec1 b}" + "dest_vec1 ` {a<.. b} = {dest_vec1 a<.. dest_vec1 b}" + "dest_vec1 ` {a ..x. dest_vec1 (f x)) S" + using dest_vec1_sum[OF assms] by auto + +lemma open_dest_vec1_vimage: "open S \ open (dest_vec1 -` S)" +unfolding open_vector_def forall_1 by auto + +lemma tendsto_dest_vec1 [tendsto_intros]: + "(f ---> l) net \ ((\x. dest_vec1 (f x)) ---> dest_vec1 l) net" +by(rule tendsto_Cart_nth) + +lemma continuous_dest_vec1: "continuous net f \ continuous net (\x. dest_vec1 (f x))" + unfolding continuous_def by (rule tendsto_dest_vec1) + +lemma forall_dest_vec1: "(\x. P x) \ (\x. P(dest_vec1 x))" + apply safe defer apply(erule_tac x="vec1 x" in allE) by auto + +lemma forall_of_dest_vec1: "(\v. P (\x. dest_vec1 (v x))) \ (\x. P x)" + apply rule apply rule apply(erule_tac x="(vec1 \ x)" in allE) unfolding o_def vec1_dest_vec1 by auto + +lemma forall_of_dest_vec1': "(\v. P (dest_vec1 v)) \ (\x. P x)" + apply rule apply rule apply(erule_tac x="(vec1 x)" in allE) defer apply rule + apply(erule_tac x="dest_vec1 v" in allE) unfolding o_def vec1_dest_vec1 by auto + +lemma dist_vec1_0[simp]: "dist(vec1 (x::real)) 0 = norm x" unfolding dist_norm by auto + +lemma bounded_linear_vec1_dest_vec1: fixes f::"real \ real" + shows "linear (vec1 \ f \ dest_vec1) = bounded_linear f" (is "?l = ?r") proof- + { assume ?l guess K using linear_bounded[OF `?l`] .. + hence "\K. \x. \f x\ \ \x\ * K" apply(rule_tac x=K in exI) + unfolding vec1_dest_vec1_simps by (auto simp add:field_simps) } + thus ?thesis unfolding linear_def bounded_linear_def additive_def bounded_linear_axioms_def o_def + unfolding vec1_dest_vec1_simps by auto qed + +lemma vec1_le[simp]:fixes a::real shows "vec1 a \ vec1 b \ a \ b" + unfolding vector_le_def by auto +lemma vec1_less[simp]:fixes a::real shows "vec1 a < vec1 b \ a < b" + unfolding vector_less_def by auto + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Mutabelle/Mutabelle.thy --- a/src/HOL/Mutabelle/Mutabelle.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Mutabelle/Mutabelle.thy Tue May 04 20:30:22 2010 +0200 @@ -61,16 +61,16 @@ (* ML {* -Quickcheck.test_term (ProofContext.init @{theory}) +Quickcheck.test_term (ProofContext.init_global @{theory}) false (SOME "SML") 1 1 (prop_of (hd @{thms nibble_pair_of_char_simps})) *} ML {* fun is_executable thy th = can (Quickcheck.test_term - (ProofContext.init thy) false (SOME "SML") 1 1) (prop_of th); + (ProofContext.init_global thy) false (SOME "SML") 1 1) (prop_of th); fun is_executable_term thy t = can (Quickcheck.test_term - (ProofContext.init thy) false (SOME "SML") 1 1) t; + (ProofContext.init_global thy) false (SOME "SML") 1 1) t; fun thms_of thy = filter (fn (_, th) => not (Thm.is_internal th) andalso Context.theory_name (theory_of_thm th) = Context.theory_name thy andalso diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Mutabelle/mutabelle.ML --- a/src/HOL/Mutabelle/mutabelle.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Mutabelle/mutabelle.ML Tue May 04 20:30:22 2010 +0200 @@ -499,14 +499,14 @@ fun is_executable thy insts th = (Quickcheck.test_term - (ProofContext.init thy) false (SOME (!testgen_name)) 1 1 (preprocess thy insts (prop_of th)); + (ProofContext.init_global thy) false (SOME (!testgen_name)) 1 1 (preprocess thy insts (prop_of th)); priority "executable"; true) handle ERROR s => (priority ("not executable: " ^ s); false); fun qc_recursive usedthy [] insts sz qciter acc = rev acc | qc_recursive usedthy (x::xs) insts sz qciter acc = qc_recursive usedthy xs insts sz qciter (priority ("qc_recursive: " ^ string_of_int (length xs)); ((x, pretty (the_default [] (Quickcheck.test_term - (ProofContext.init usedthy) false (SOME (!testgen_name)) sz qciter (preprocess usedthy insts x)))) :: acc)) + (ProofContext.init_global usedthy) false (SOME (!testgen_name)) sz qciter (preprocess usedthy insts x)))) :: acc)) handle ERROR msg => (priority msg; qc_recursive usedthy xs insts sz qciter acc); diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Mutabelle/mutabelle_extra.ML --- a/src/HOL/Mutabelle/mutabelle_extra.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Mutabelle/mutabelle_extra.ML Tue May 04 20:30:22 2010 +0200 @@ -97,7 +97,7 @@ fun invoke_quickcheck quickcheck_generator thy t = TimeLimit.timeLimit (Time.fromSeconds (! Auto_Counterexample.time_limit)) (fn _ => - case Quickcheck.gen_test_term (ProofContext.init thy) true true (SOME quickcheck_generator) + case Quickcheck.gen_test_term (ProofContext.init_global thy) true true (SOME quickcheck_generator) size iterations (preprocess thy [] t) of (NONE, (time_report, report)) => (NoCex, (time_report, report)) | (SOME _, (time_report, report)) => (GenuineCex, (time_report, report))) () @@ -133,7 +133,7 @@ fun invoke_nitpick thy t = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val state = Proof.init ctxt in let @@ -251,7 +251,7 @@ end fun is_executable_term thy t = can (TimeLimit.timeLimit (Time.fromMilliseconds 2000) (Quickcheck.test_term - (ProofContext.init thy) false (SOME "SML") 1 0)) (preprocess thy [] t) + (ProofContext.init_global thy) false (SOME "SML") 1 0)) (preprocess thy [] t) fun is_executable_thm thy th = is_executable_term thy (prop_of th) val freezeT = diff -r aace7a969410 -r 8629ac3efb19 src/HOL/NSA/HyperDef.thy --- a/src/HOL/NSA/HyperDef.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/NSA/HyperDef.thy Tue May 04 20:30:22 2010 +0200 @@ -140,12 +140,12 @@ lemma of_hypreal_inverse [simp]: "\x. of_hypreal (inverse x) = - inverse (of_hypreal x :: 'a::{real_div_algebra,division_by_zero} star)" + inverse (of_hypreal x :: 'a::{real_div_algebra, division_ring_inverse_zero} star)" by transfer (rule of_real_inverse) lemma of_hypreal_divide [simp]: "\x y. of_hypreal (x / y) = - (of_hypreal x / of_hypreal y :: 'a::{real_field,division_by_zero} star)" + (of_hypreal x / of_hypreal y :: 'a::{real_field, field_inverse_zero} star)" by transfer (rule of_real_divide) lemma of_hypreal_eq_iff [simp]: @@ -454,7 +454,7 @@ by transfer (rule field_power_not_zero) lemma hyperpow_inverse: - "\r n. r \ (0::'a::{division_by_zero,field} star) + "\r n. r \ (0::'a::field_inverse_zero star) \ inverse (r pow n) = (inverse r) pow n" by transfer (rule power_inverse) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/NSA/NSA.thy --- a/src/HOL/NSA/NSA.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/NSA/NSA.thy Tue May 04 20:30:22 2010 +0200 @@ -145,12 +145,12 @@ by transfer (rule nonzero_norm_inverse) lemma hnorm_inverse: - "\a::'a::{real_normed_div_algebra,division_by_zero} star. + "\a::'a::{real_normed_div_algebra, division_ring_inverse_zero} star. hnorm (inverse a) = inverse (hnorm a)" by transfer (rule norm_inverse) lemma hnorm_divide: - "\a b::'a::{real_normed_field,division_by_zero} star. + "\a b::'a::{real_normed_field, field_inverse_zero} star. hnorm (a / b) = hnorm a / hnorm b" by transfer (rule norm_divide) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/NSA/StarDef.thy --- a/src/HOL/NSA/StarDef.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/NSA/StarDef.thy Tue May 04 20:30:22 2010 +0200 @@ -896,14 +896,19 @@ apply (transfer, fact divide_inverse) done +instance star :: (division_ring_inverse_zero) division_ring_inverse_zero +by (intro_classes, transfer, rule inverse_zero) + instance star :: (field) field apply (intro_classes) apply (transfer, erule left_inverse) apply (transfer, rule divide_inverse) done -instance star :: (division_by_zero) division_by_zero -by (intro_classes, transfer, rule inverse_zero) +instance star :: (field_inverse_zero) field_inverse_zero +apply intro_classes +apply (rule inverse_zero) +done instance star :: (ordered_semiring) ordered_semiring apply (intro_classes) @@ -945,6 +950,7 @@ instance star :: (linordered_idom) linordered_idom .. instance star :: (linordered_field) linordered_field .. +instance star :: (linordered_field_inverse_zero) linordered_field_inverse_zero .. subsection {* Power *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nitpick_Examples/Mono_Nits.thy --- a/src/HOL/Nitpick_Examples/Mono_Nits.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nitpick_Examples/Mono_Nits.thy Tue May 04 20:30:22 2010 +0200 @@ -21,9 +21,9 @@ {thy = @{theory}, ctxt = @{context}, max_bisim_depth = ~1, boxes = [], stds = [(NONE, true)], wfs = [], user_axioms = NONE, debug = false, binary_ints = SOME false, destroy_constrs = false, specialize = false, - skolemize = false, star_linear_preds = false, uncurry = false, - fast_descrs = false, tac_timeout = NONE, evals = [], case_names = [], - def_table = def_table, nondef_table = Symtab.empty, user_nondefs = [], + star_linear_preds = false, fast_descrs = false, tac_timeout = NONE, + evals = [], case_names = [], def_table = def_table, + nondef_table = Symtab.empty, user_nondefs = [], simp_table = Unsynchronized.ref Symtab.empty, psimp_table = Symtab.empty, choice_spec_table = Symtab.empty, intro_table = Symtab.empty, ground_thm_table = Inttab.empty, ersatz_table = [], diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nitpick_Examples/Special_Nits.thy --- a/src/HOL/Nitpick_Examples/Special_Nits.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nitpick_Examples/Special_Nits.thy Tue May 04 20:30:22 2010 +0200 @@ -73,8 +73,6 @@ \ (\u. b = u \ f3 b b u b b = f3 u u b u u)" nitpick [expect = none] nitpick [dont_specialize, expect = none] -nitpick [dont_skolemize, expect = none] -nitpick [dont_specialize, dont_skolemize, expect = none] sorry function f4 :: "nat \ nat \ nat" where diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nominal/nominal_datatype.ML --- a/src/HOL/Nominal/nominal_datatype.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nominal/nominal_datatype.ML Tue May 04 20:30:22 2010 +0200 @@ -151,7 +151,7 @@ val meta_spec = thm "meta_spec"; fun projections rule = - Project_Rule.projections (ProofContext.init (Thm.theory_of_thm rule)) rule + Project_Rule.projections (ProofContext.init_global (Thm.theory_of_thm rule)) rule |> map (Drule.export_without_context #> Rule_Cases.save rule); val supp_prod = thm "supp_prod"; @@ -215,7 +215,7 @@ fun inter_sort thy S S' = Type.inter_sort (Sign.tsig_of thy) (S, S'); fun augment_sort_typ thy S = - let val S = Sign.certify_sort thy S + let val S = Sign.minimize_sort thy (Sign.certify_sort thy S) in map_type_tfree (fn (s, S') => TFree (s, if member (op = o apsnd fst) sorts s then inter_sort thy S S' else S')) end; @@ -449,7 +449,7 @@ at_inst RS (pt_inst RS pt_perm_compose) RS sym, at_inst RS (pt_inst RS pt_perm_compose_rev) RS sym] end)) - val sort = Sign.certify_sort thy (cp_class :: pt_class); + val sort = Sign.minimize_sort thy (Sign.certify_sort thy (cp_class :: pt_class)); val thms = split_conj_thm (Goal.prove_global thy [] [] (augment_sort thy sort (HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj @@ -654,8 +654,8 @@ perm_def), name), tvs), perm_closed) => fn thy => let val pt_class = pt_class_of thy atom; - val sort = Sign.certify_sort thy - (pt_class :: map (cp_class_of thy atom) (remove (op =) atom dt_atoms)) + val sort = Sign.minimize_sort thy (Sign.certify_sort thy + (pt_class :: map (cp_class_of thy atom) (remove (op =) atom dt_atoms))) in AxClass.prove_arity (Sign.intern_type thy name, map (inter_sort thy sort o snd) tvs, [pt_class]) @@ -678,10 +678,10 @@ fun cp_instance (atom1, perm_closed_thms1) (atom2, perm_closed_thms2) thy = let val cp_class = cp_class_of thy atom1 atom2; - val sort = Sign.certify_sort thy + val sort = Sign.minimize_sort thy (Sign.certify_sort thy (pt_class_of thy atom1 :: map (cp_class_of thy atom1) (remove (op =) atom1 dt_atoms) @ (if atom1 = atom2 then [cp_class_of thy atom1 atom1] else - pt_class_of thy atom2 :: map (cp_class_of thy atom2) (remove (op =) atom2 dt_atoms))); + pt_class_of thy atom2 :: map (cp_class_of thy atom2) (remove (op =) atom2 dt_atoms)))); val cp1' = cp_inst_of thy atom1 atom2 RS cp1 in fold (fn ((((((Abs_inverse, Rep), perm_def), name), tvs), perm_closed1), perm_closed2) => fn thy => @@ -1131,7 +1131,7 @@ fold (fn (atom, ths) => fn thy => let val class = fs_class_of thy atom; - val sort = Sign.certify_sort thy (class :: pt_cp_sort) + val sort = Sign.minimize_sort thy (Sign.certify_sort thy (class :: pt_cp_sort)); in fold (fn Type (s, Ts) => AxClass.prove_arity (s, map (inter_sort thy sort o snd o dest_TFree) Ts, [class]) (Class.intro_classes_tac [] THEN resolve_tac ths 1)) newTs thy @@ -1142,7 +1142,7 @@ val pnames = if length descr'' = 1 then ["P"] else map (fn i => "P" ^ string_of_int i) (1 upto length descr''); val ind_sort = if null dt_atomTs then HOLogic.typeS - else Sign.certify_sort thy9 (map (fs_class_of thy9) dt_atoms); + else Sign.minimize_sort thy9 (Sign.certify_sort thy9 (map (fs_class_of thy9) dt_atoms)); val fsT = TFree ("'n", ind_sort); val fsT' = TFree ("'n", HOLogic.typeS); @@ -1423,7 +1423,7 @@ val (rec_result_Ts', rec_fn_Ts') = Datatype_Prop.make_primrec_Ts descr' sorts used; val rec_sort = if null dt_atomTs then HOLogic.typeS else - Sign.certify_sort thy10 pt_cp_sort; + Sign.minimize_sort thy10 (Sign.certify_sort thy10 pt_cp_sort); val rec_result_Ts = map (fn TFree (s, _) => TFree (s, rec_sort)) rec_result_Ts'; val rec_fn_Ts = map (typ_subst_atomic (rec_result_Ts' ~~ rec_result_Ts)) rec_fn_Ts'; @@ -1617,7 +1617,7 @@ val y = Free ("y", U); val y' = Free ("y'", U) in - Drule.export_without_context (Goal.prove (ProofContext.init thy11) [] + Drule.export_without_context (Goal.prove (ProofContext.init_global thy11) [] (map (augment_sort thy11 fs_cp_sort) (finite_prems @ [HOLogic.mk_Trueprop (R $ x $ y), @@ -1712,7 +1712,7 @@ (Const ("Nominal.supp", fsT' --> HOLogic.mk_setT aT) $ rec_ctxt))) dt_atomTs; val rec_unique_thms = split_conj_thm (Goal.prove - (ProofContext.init thy11) (map fst rec_unique_frees) + (ProofContext.init_global thy11) (map fst rec_unique_frees) (map (augment_sort thy11 fs_cp_sort) (flat finite_premss @ finite_ctxt_prems @ rec_prems @ rec_prems')) (augment_sort thy11 fs_cp_sort diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nominal/nominal_fresh_fun.ML --- a/src/HOL/Nominal/nominal_fresh_fun.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nominal/nominal_fresh_fun.ML Tue May 04 20:30:22 2010 +0200 @@ -124,7 +124,7 @@ (* Find the variable we instantiate *) let val thy = theory_of_thm thm; - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val ss = global_simpset_of thy; val abs_fresh = PureThy.get_thms thy "abs_fresh"; val fresh_perm_app = PureThy.get_thms thy "fresh_perm_app"; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nominal/nominal_inductive.ML --- a/src/HOL/Nominal/nominal_inductive.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nominal/nominal_inductive.ML Tue May 04 20:30:22 2010 +0200 @@ -198,8 +198,8 @@ val atomTs = distinct op = (maps (map snd o #2) prems); val ind_sort = if null atomTs then HOLogic.typeS - else Sign.certify_sort thy (map (fn T => Sign.intern_class thy - ("fs_" ^ Long_Name.base_name (fst (dest_Type T)))) atomTs); + else Sign.minimize_sort thy (Sign.certify_sort thy (map (fn T => Sign.intern_class thy + ("fs_" ^ Long_Name.base_name (fst (dest_Type T)))) atomTs)); val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt'); val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt'; val fsT = TFree (fs_ctxt_tyname, ind_sort); @@ -543,7 +543,7 @@ in ctxt'' |> - Proof.theorem_i NONE (fn thss => fn ctxt => + Proof.theorem NONE (fn thss => fn ctxt => let val rec_name = space_implode "_" (map Long_Name.base_name names); val rec_qualified = Binding.qualify false rec_name; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nominal/nominal_inductive2.ML --- a/src/HOL/Nominal/nominal_inductive2.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nominal/nominal_inductive2.ML Tue May 04 20:30:22 2010 +0200 @@ -223,8 +223,8 @@ val atomTs = distinct op = (maps (map snd o #2) prems); val atoms = map (fst o dest_Type) atomTs; val ind_sort = if null atomTs then HOLogic.typeS - else Sign.certify_sort thy (map (fn a => Sign.intern_class thy - ("fs_" ^ Long_Name.base_name a)) atoms); + else Sign.minimize_sort thy (Sign.certify_sort thy (map (fn a => Sign.intern_class thy + ("fs_" ^ Long_Name.base_name a)) atoms)); val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt'); val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt'; val fsT = TFree (fs_ctxt_tyname, ind_sort); @@ -445,7 +445,7 @@ in ctxt'' |> - Proof.theorem_i NONE (fn thss => fn ctxt => + Proof.theorem NONE (fn thss => fn ctxt => let val rec_name = space_implode "_" (map Long_Name.base_name names); val rec_qualified = Binding.qualify false rec_name; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Nominal/nominal_primrec.ML --- a/src/HOL/Nominal/nominal_primrec.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Nominal/nominal_primrec.ML Tue May 04 20:30:22 2010 +0200 @@ -363,7 +363,7 @@ in lthy' |> Variable.add_fixes (map fst lsrs) |> snd |> - Proof.theorem_i NONE + Proof.theorem NONE (fn thss => fn goal_ctxt => let val simps = ProofContext.export goal_ctxt lthy' (flat thss); diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Number_Theory/Binomial.thy --- a/src/HOL/Number_Theory/Binomial.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Number_Theory/Binomial.thy Tue May 04 20:30:22 2010 +0200 @@ -208,7 +208,7 @@ have "fact (k + 1) * fact (n - k) * (n + 1 choose (k + 1)) = fact (k + 1) * fact (n - k) * (n choose (k + 1)) + fact (k + 1) * fact (n - k) * (n choose k)" - by (subst choose_reduce_nat, auto simp add: ring_simps) + by (subst choose_reduce_nat, auto simp add: field_simps) also note one also note two also with less have "(n - k) * fact n + (k + 1) * fact n= fact (n + 1)" @@ -279,7 +279,7 @@ also have "... = (SUM k=0..n. of_nat (n choose k) * a^k * b^(n+1-k)) + (SUM k=1..n+1. of_nat (n choose (k - 1)) * a^k * b^(n+1-k))" by (simp add:setsum_shift_bounds_cl_Suc_ivl Suc_diff_le - power_Suc ring_simps One_nat_def del:setsum_cl_ivl_Suc) + power_Suc field_simps One_nat_def del:setsum_cl_ivl_Suc) also have "... = a^(n+1) + b^(n+1) + (SUM k=1..n. of_nat (n choose (k - 1)) * a^k * b^(n+1-k)) + (SUM k=1..n. of_nat (n choose k) * a^k * b^(n+1-k))" @@ -287,10 +287,10 @@ also have "... = a^(n+1) + b^(n+1) + (SUM k=1..n. of_nat(n+1 choose k) * a^k * b^(n+1-k))" - by (auto simp add: ring_simps setsum_addf [symmetric] + by (auto simp add: field_simps setsum_addf [symmetric] choose_reduce_nat) also have "... = (SUM k=0..n+1. of_nat (n+1 choose k) * a^k * b^(n+1-k))" - using decomp by (simp add: ring_simps) + using decomp by (simp add: field_simps) finally show "?P (n + 1)" by simp qed diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Number_Theory/Cong.thy --- a/src/HOL/Number_Theory/Cong.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Number_Theory/Cong.thy Tue May 04 20:30:22 2010 +0200 @@ -350,7 +350,7 @@ apply (subst prime_dvd_mult_eq_int [symmetric], assumption) (* any way around this? *) apply (subgoal_tac "a * a - 1 = (a - 1) * (a - -1)") - apply (auto simp add: ring_simps) + apply (auto simp add: field_simps) done lemma cong_mult_rcancel_int: @@ -416,7 +416,7 @@ done lemma cong_iff_lin_int: "([(a::int) = b] (mod m)) = (\k. b = a + m * k)" - apply (auto simp add: cong_altdef_int dvd_def ring_simps) + apply (auto simp add: cong_altdef_int dvd_def field_simps) apply (rule_tac [!] x = "-k" in exI, auto) done @@ -428,14 +428,14 @@ apply (unfold dvd_def, auto) apply (rule_tac x = k in exI) apply (rule_tac x = 0 in exI) - apply (auto simp add: ring_simps) + apply (auto simp add: field_simps) apply (subst (asm) cong_sym_eq_nat) apply (subst (asm) cong_altdef_nat) apply force apply (unfold dvd_def, auto) apply (rule_tac x = 0 in exI) apply (rule_tac x = k in exI) - apply (auto simp add: ring_simps) + apply (auto simp add: field_simps) apply (unfold cong_nat_def) apply (subgoal_tac "a mod m = (a + k2 * m) mod m") apply (erule ssubst)back @@ -533,7 +533,7 @@ apply (auto simp add: cong_iff_lin_nat dvd_def) apply (rule_tac x="k1 * k" in exI) apply (rule_tac x="k2 * k" in exI) - apply (simp add: ring_simps) + apply (simp add: field_simps) done lemma cong_dvd_modulus_int: "[(x::int) = y] (mod m) \ n dvd m \ @@ -559,7 +559,7 @@ lemma neg_cong_int: "([(a::int) = b] (mod m)) = ([-a = -b] (mod m))" apply (simp add: cong_altdef_int) apply (subst dvd_minus_iff [symmetric]) - apply (simp add: ring_simps) + apply (simp add: field_simps) done lemma cong_modulus_neg_int: "([(a::int) = b] (mod m)) = ([a = b] (mod -m))" @@ -603,7 +603,7 @@ apply (unfold dvd_def) apply auto [1] apply (rule_tac x = k in exI) - apply (auto simp add: ring_simps) [1] + apply (auto simp add: field_simps) [1] apply (subst cong_altdef_nat) apply (auto simp add: dvd_def) done @@ -611,7 +611,7 @@ lemma cong_le_nat: "(y::nat) <= x \ [x = y] (mod n) \ (\q. x = q * n + y)" apply (subst cong_altdef_nat) apply assumption - apply (unfold dvd_def, auto simp add: ring_simps) + apply (unfold dvd_def, auto simp add: field_simps) apply (rule_tac x = k in exI) apply auto done diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Number_Theory/Fib.thy --- a/src/HOL/Number_Theory/Fib.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Number_Theory/Fib.thy Tue May 04 20:30:22 2010 +0200 @@ -143,9 +143,9 @@ apply (induct n rule: fib_induct_nat) apply auto apply (subst fib_reduce_nat) - apply (auto simp add: ring_simps) + apply (auto simp add: field_simps) apply (subst (1 3 5) fib_reduce_nat) - apply (auto simp add: ring_simps Suc_eq_plus1) + apply (auto simp add: field_simps Suc_eq_plus1) (* hmmm. Why doesn't "n + (1 + (1 + k))" simplify to "n + k + 2"? *) apply (subgoal_tac "n + (k + 2) = n + (1 + (1 + k))") apply (erule ssubst) back back @@ -184,7 +184,7 @@ lemma fib_Cassini_aux_int: "fib (int n + 2) * fib (int n) - (fib (int n + 1))^2 = (-1)^(n + 1)" apply (induct n) - apply (auto simp add: ring_simps power2_eq_square fib_reduce_int + apply (auto simp add: field_simps power2_eq_square fib_reduce_int power_add) done @@ -222,7 +222,7 @@ apply (subst (2) fib_reduce_nat) apply (auto simp add: Suc_eq_plus1) (* again, natdiff_cancel *) apply (subst add_commute, auto) - apply (subst gcd_commute_nat, auto simp add: ring_simps) + apply (subst gcd_commute_nat, auto simp add: field_simps) done lemma coprime_fib_Suc_nat: "coprime (fib n) (fib (Suc n))" @@ -305,7 +305,7 @@ theorem fib_mult_eq_setsum_nat: "fib ((n::nat) + 1) * fib n = (\k \ {..n}. fib k * fib k)" apply (induct n) - apply (auto simp add: atMost_plus_one_nat fib_plus_2_nat ring_simps) + apply (auto simp add: atMost_plus_one_nat fib_plus_2_nat field_simps) done theorem fib_mult_eq_setsum'_nat: diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Number_Theory/Residues.thy --- a/src/HOL/Number_Theory/Residues.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Number_Theory/Residues.thy Tue May 04 20:30:22 2010 +0200 @@ -69,7 +69,7 @@ apply (subst mod_add_eq [symmetric]) apply (subst mult_commute) apply (subst zmod_zmult1_eq [symmetric]) - apply (simp add: ring_simps) + apply (simp add: field_simps) done end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Orderings.thy --- a/src/HOL/Orderings.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Orderings.thy Tue May 04 20:30:22 2010 +0200 @@ -106,7 +106,7 @@ text {* Dual order *} lemma dual_preorder: - "preorder (op \) (op >)" + "class.preorder (op \) (op >)" proof qed (auto simp add: less_le_not_le intro: order_trans) end @@ -186,7 +186,7 @@ text {* Dual order *} lemma dual_order: - "order (op \) (op >)" + "class.order (op \) (op >)" by (intro_locales, rule dual_preorder) (unfold_locales, rule antisym) end @@ -257,8 +257,8 @@ text {* Dual order *} lemma dual_linorder: - "linorder (op \) (op >)" -by (rule linorder.intro, rule dual_order) (unfold_locales, rule linear) + "class.linorder (op \) (op >)" +by (rule class.linorder.intro, rule dual_order) (unfold_locales, rule linear) text {* min/max *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Power.thy --- a/src/HOL/Power.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Power.thy Tue May 04 20:30:22 2010 +0200 @@ -392,27 +392,26 @@ by (induct n) (auto simp add: no_zero_divisors elim: contrapos_pp) -lemma power_diff: - fixes a :: "'a::field" +lemma (in field) power_diff: assumes nz: "a \ 0" shows "n \ m \ a ^ (m - n) = a ^ m / a ^ n" - by (induct m n rule: diff_induct) (simp_all add: nz) + by (induct m n rule: diff_induct) (simp_all add: nz field_power_not_zero) text{*Perhaps these should be simprules.*} lemma power_inverse: - fixes a :: "'a::{division_ring,division_by_zero,power}" - shows "inverse (a ^ n) = (inverse a) ^ n" + fixes a :: "'a::division_ring_inverse_zero" + shows "inverse (a ^ n) = inverse a ^ n" apply (cases "a = 0") apply (simp add: power_0_left) apply (simp add: nonzero_power_inverse) done (* TODO: reorient or rename to inverse_power *) lemma power_one_over: - "1 / (a::'a::{field,division_by_zero, power}) ^ n = (1 / a) ^ n" + "1 / (a::'a::{field_inverse_zero, power}) ^ n = (1 / a) ^ n" by (simp add: divide_inverse) (rule power_inverse) lemma power_divide: - "(a / b) ^ n = (a::'a::{field,division_by_zero}) ^ n / b ^ n" + "(a / b) ^ n = (a::'a::field_inverse_zero) ^ n / b ^ n" apply (cases "b = 0") apply (simp add: power_0_left) apply (rule nonzero_power_divide) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Predicate.thy --- a/src/HOL/Predicate.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Predicate.thy Tue May 04 20:30:22 2010 +0200 @@ -880,6 +880,10 @@ code_abort not_unique +code_reflect Predicate + datatypes pred = Seq and seq = Empty | Insert | Join + functions map + ML {* signature PREDICATE = sig @@ -893,15 +897,17 @@ structure Predicate : PREDICATE = struct -@{code_datatype pred = Seq}; -@{code_datatype seq = Empty | Insert | Join}; +datatype pred = datatype Predicate.pred +datatype seq = datatype Predicate.seq + +fun map f = Predicate.map f; -fun yield (@{code Seq} f) = next (f ()) -and next @{code Empty} = NONE - | next (@{code Insert} (x, P)) = SOME (x, P) - | next (@{code Join} (P, xq)) = (case yield P +fun yield (Seq f) = next (f ()) +and next Empty = NONE + | next (Insert (x, P)) = SOME (x, P) + | next (Join (P, xq)) = (case yield P of NONE => next xq - | SOME (x, Q) => SOME (x, @{code Seq} (fn _ => @{code Join} (Q, xq)))); + | SOME (x, Q) => SOME (x, Seq (fn _ => Join (Q, xq)))); fun anamorph f k x = (if k = 0 then ([], x) else case f x @@ -912,19 +918,9 @@ fun yieldn P = anamorph yield P; -fun map f = @{code map} f; - end; *} -code_reserved Eval Predicate - -code_type pred and seq - (Eval "_/ Predicate.pred" and "_/ Predicate.seq") - -code_const Seq and Empty and Insert and Join - (Eval "Predicate.Seq" and "Predicate.Empty" and "Predicate.Insert/ (_,/ _)" and "Predicate.Join/ (_,/ _)") - no_notation inf (infixl "\" 70) and sup (infixl "\" 65) and diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Predicate_Compile_Examples/Predicate_Compile_Examples.thy --- a/src/HOL/Predicate_Compile_Examples/Predicate_Compile_Examples.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Predicate_Compile_Examples/Predicate_Compile_Examples.thy Tue May 04 20:30:22 2010 +0200 @@ -686,6 +686,13 @@ (*values [depth_limit = 4] "{x. not_reachable_in_example_graph' 0 4}"*) (* fails with undefined *) (*values [depth_limit = 20] "{x. not_reachable_in_example_graph' 0 4}"*) (* fails with undefined *) +subsection {* Free function variable *} + +inductive FF :: "nat => nat => bool" +where + "f x = y ==> FF x y" + +code_pred FF . subsection {* IMP *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Caratheodory.thy --- a/src/HOL/Probability/Caratheodory.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Caratheodory.thy Tue May 04 20:30:22 2010 +0200 @@ -1,7 +1,7 @@ header {*Caratheodory Extension Theorem*} theory Caratheodory - imports Sigma_Algebra SupInf SeriesPlus + imports Sigma_Algebra SeriesPlus begin text{*From the Hurd/Coble measure theory development, translated by Lawrence Paulson.*} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Information.thy --- a/src/HOL/Probability/Information.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Information.thy Tue May 04 20:30:22 2010 +0200 @@ -1,169 +1,264 @@ theory Information -imports Probability_Space Product_Measure +imports Probability_Space Product_Measure Convex begin -lemma pos_neg_part_abs: - fixes f :: "'a \ real" - shows "pos_part f x + neg_part f x = \f x\" -unfolding real_abs_def pos_part_def neg_part_def by auto +section "Convex theory" -lemma pos_part_abs: - fixes f :: "'a \ real" - shows "pos_part (\ x. \f x\) y = \f y\" -unfolding pos_part_def real_abs_def by auto - -lemma neg_part_abs: - fixes f :: "'a \ real" - shows "neg_part (\ x. \f x\) y = 0" -unfolding neg_part_def real_abs_def by auto +lemma log_setsum: + assumes "finite s" "s \ {}" + assumes "b > 1" + assumes "(\ i \ s. a i) = 1" + assumes "\ i. i \ s \ a i \ 0" + assumes "\ i. i \ s \ y i \ {0 <..}" + shows "log b (\ i \ s. a i * y i) \ (\ i \ s. a i * log b (y i))" +proof - + have "convex_on {0 <..} (\ x. - log b x)" + by (rule minus_log_convex[OF `b > 1`]) + hence "- log b (\ i \ s. a i * y i) \ (\ i \ s. a i * - log b (y i))" + using convex_on_setsum[of _ _ "\ x. - log b x"] assms pos_is_convex by fastsimp + thus ?thesis by (auto simp add:setsum_negf le_imp_neg_le) +qed -lemma (in measure_space) int_abs: - assumes "integrable f" - shows "integrable (\ x. \f x\)" -using assms +lemma log_setsum': + assumes "finite s" "s \ {}" + assumes "b > 1" + assumes "(\ i \ s. a i) = 1" + assumes pos: "\ i. i \ s \ 0 \ a i" + "\ i. \ i \ s ; 0 < a i \ \ 0 < y i" + shows "log b (\ i \ s. a i * y i) \ (\ i \ s. a i * log b (y i))" proof - - from assms obtain p q where pq: "p \ nnfis (pos_part f)" "q \ nnfis (neg_part f)" - unfolding integrable_def by auto - hence "p + q \ nnfis (\ x. pos_part f x + neg_part f x)" - using nnfis_add by auto - hence "p + q \ nnfis (\ x. \f x\)" using pos_neg_part_abs[of f] by simp - thus ?thesis unfolding integrable_def - using ext[OF pos_part_abs[of f], of "\ y. y"] - ext[OF neg_part_abs[of f], of "\ y. y"] - using nnfis_0 by auto + have "\y. (\ i \ s - {i. a i = 0}. a i * y i) = (\ i \ s. a i * y i)" + using assms by (auto intro!: setsum_mono_zero_cong_left) + moreover have "log b (\ i \ s - {i. a i = 0}. a i * y i) \ (\ i \ s - {i. a i = 0}. a i * log b (y i))" + proof (rule log_setsum) + have "setsum a (s - {i. a i = 0}) = setsum a s" + using assms(1) by (rule setsum_mono_zero_cong_left) auto + thus sum_1: "setsum a (s - {i. a i = 0}) = 1" + "finite (s - {i. a i = 0})" using assms by simp_all + + show "s - {i. a i = 0} \ {}" + proof + assume *: "s - {i. a i = 0} = {}" + hence "setsum a (s - {i. a i = 0}) = 0" by (simp add: * setsum_empty) + with sum_1 show False by simp +qed + + fix i assume "i \ s - {i. a i = 0}" + hence "i \ s" "a i \ 0" by simp_all + thus "0 \ a i" "y i \ {0<..}" using pos[of i] by auto + qed fact+ + ultimately show ?thesis by simp qed -lemma (in measure_space) measure_mono: - assumes "a \ b" "a \ sets M" "b \ sets M" - shows "measure M a \ measure M b" +section "Information theory" + +lemma (in finite_prob_space) sum_over_space_distrib: + "(\x\X`space M. distribution X {x}) = 1" + unfolding distribution_def prob_space[symmetric] using finite_space + by (subst measure_finitely_additive'') + (auto simp add: disjoint_family_on_def sets_eq_Pow intro!: arg_cong[where f=prob]) + +locale finite_information_space = finite_prob_space + + fixes b :: real assumes b_gt_1: "1 < b" + +definition + "KL_divergence b M X Y = + measure_space.integral (M\measure := X\) + (\x. log b ((measure_space.RN_deriv (M \measure := Y\ ) X) x))" + +lemma (in finite_prob_space) distribution_mono: + assumes "\t. \ t \ space M ; X t \ x \ \ Y t \ y" + shows "distribution X x \ distribution Y y" + unfolding distribution_def + using assms by (auto simp: sets_eq_Pow intro!: measure_mono) + +lemma (in prob_space) distribution_remove_const: + shows "joint_distribution X (\x. ()) {(x, ())} = distribution X {x}" + and "joint_distribution (\x. ()) X {((), x)} = distribution X {x}" + and "joint_distribution X (\x. (Y x, ())) {(x, y, ())} = joint_distribution X Y {(x, y)}" + and "joint_distribution X (\x. ((), Y x)) {(x, (), y)} = joint_distribution X Y {(x, y)}" + and "distribution (\x. ()) {()} = 1" + unfolding prob_space[symmetric] + by (auto intro!: arg_cong[where f=prob] simp: distribution_def) + + +context finite_information_space +begin + +lemma distribution_mono_gt_0: + assumes gt_0: "0 < distribution X x" + assumes *: "\t. \ t \ space M ; X t \ x \ \ Y t \ y" + shows "0 < distribution Y y" + by (rule less_le_trans[OF gt_0 distribution_mono]) (rule *) + +lemma + assumes "0 \ A" and pos: "0 < A \ 0 < B" "0 < A \ 0 < C" + shows mult_log_mult: "A * log b (B * C) = A * log b B + A * log b C" (is "?mult") + and mult_log_divide: "A * log b (B / C) = A * log b B - A * log b C" (is "?div") proof - - have "b = a \ (b - a)" using assms by auto - moreover have "{} = a \ (b - a)" by auto - ultimately have "measure M b = measure M a + measure M (b - a)" - using measure_additive[of a "b - a"] local.Diff[of b a] assms by auto - moreover have "measure M (b - a) \ 0" using positive assms by auto - ultimately show "measure M a \ measure M b" by auto + have "?mult \ ?div" +proof (cases "A = 0") + case False + hence "0 < A" using `0 \ A` by auto + with pos[OF this] show "?mult \ ?div" using b_gt_1 + by (auto simp: log_divide log_mult field_simps) +qed simp + thus ?mult and ?div by auto qed -lemma (in measure_space) integral_0: - fixes f :: "'a \ real" - assumes "integrable f" "integral f = 0" "nonneg f" and borel: "f \ borel_measurable M" - shows "measure M ({x. f x \ 0} \ space M) = 0" -proof - - have "{x. f x \ 0} = {x. \f x\ > 0}" by auto - moreover - { fix y assume "y \ {x. \ f x \ > 0}" - hence "\ f y \ > 0" by auto - hence "\ n. \f y\ \ inverse (real (Suc n))" - using ex_inverse_of_nat_Suc_less[of "\f y\"] less_imp_le unfolding real_of_nat_def by auto - hence "y \ (\ n. {x. \f x\ \ inverse (real (Suc n))})" - by auto } - moreover - { fix y assume "y \ (\ n. {x. \f x\ \ inverse (real (Suc n))})" - then obtain n where n: "y \ {x. \f x\ \ inverse (real (Suc n))}" by auto - hence "\f y\ \ inverse (real (Suc n))" by auto - hence "\f y\ > 0" - using real_of_nat_Suc_gt_zero - positive_imp_inverse_positive[of "real_of_nat (Suc n)"] by fastsimp - hence "y \ {x. \f x\ > 0}" by auto } - ultimately have fneq0_UN: "{x. f x \ 0} = (\ n. {x. \f x\ \ inverse (real (Suc n))})" - by blast - { fix n - have int_one: "integrable (\ x. \f x\ ^ 1)" using int_abs assms by auto - have "measure M (f -` {inverse (real (Suc n))..} \ space M) - \ integral (\ x. \f x\ ^ 1) / (inverse (real (Suc n)) ^ 1)" - using markov_ineq[OF `integrable f` _ int_one] real_of_nat_Suc_gt_zero by auto - hence le0: "measure M (f -` {inverse (real (Suc n))..} \ space M) \ 0" - using assms unfolding nonneg_def by auto - have "{x. f x \ inverse (real (Suc n))} \ space M \ sets M" - apply (subst Int_commute) unfolding Int_def - using borel[unfolded borel_measurable_ge_iff] by simp - hence m0: "measure M ({x. f x \ inverse (real (Suc n))} \ space M) = 0 \ - {x. f x \ inverse (real (Suc n))} \ space M \ sets M" - using positive le0 unfolding atLeast_def by fastsimp } - moreover hence "range (\ n. {x. f x \ inverse (real (Suc n))} \ space M) \ sets M" - by auto - moreover - { fix n - have "inverse (real (Suc n)) \ inverse (real (Suc (Suc n)))" - using less_imp_inverse_less real_of_nat_Suc_gt_zero[of n] by fastsimp - hence "\ x. f x \ inverse (real (Suc n)) \ f x \ inverse (real (Suc (Suc n)))" by (rule order_trans) - hence "{x. f x \ inverse (real (Suc n))} \ space M - \ {x. f x \ inverse (real (Suc (Suc n)))} \ space M" by auto } - ultimately have "(\ x. 0) ----> measure M (\ n. {x. f x \ inverse (real (Suc n))} \ space M)" - using monotone_convergence[of "\ n. {x. f x \ inverse (real (Suc n))} \ space M"] - unfolding o_def by (simp del: of_nat_Suc) - hence "measure M (\ n. {x. f x \ inverse (real (Suc n))} \ space M) = 0" - using LIMSEQ_const[of 0] LIMSEQ_unique by simp - hence "measure M ((\ n. {x. \f x\ \ inverse (real (Suc n))}) \ space M) = 0" - using assms unfolding nonneg_def by auto - thus "measure M ({x. f x \ 0} \ space M) = 0" using fneq0_UN by simp +lemma split_pairs: + shows + "((A, B) = X) \ (fst X = A \ snd X = B)" and + "(X = (A, B)) \ (fst X = A \ snd X = B)" by auto + +ML {* + + (* tactic to solve equations of the form @{term "W * log b (X / (Y * Z)) = W * log b X - W * log b (Y * Z)"} + where @{term W} is a joint distribution of @{term X}, @{term Y}, and @{term Z}. *) + + val mult_log_intros = [@{thm mult_log_divide}, @{thm mult_log_mult}] + val intros = [@{thm divide_pos_pos}, @{thm mult_pos_pos}, @{thm positive_distribution}] + + val distribution_gt_0_tac = (rtac @{thm distribution_mono_gt_0} + THEN' assume_tac + THEN' clarsimp_tac (clasimpset_of @{context} addsimps2 @{thms split_pairs})) + + val distr_mult_log_eq_tac = REPEAT_ALL_NEW (CHANGED o TRY o + (resolve_tac (mult_log_intros @ intros) + ORELSE' distribution_gt_0_tac + ORELSE' clarsimp_tac (clasimpset_of @{context}))) + + fun instanciate_term thy redex intro = + let + val intro_concl = Thm.concl_of intro + + val lhs = intro_concl |> HOLogic.dest_Trueprop |> HOLogic.dest_eq |> fst + + val m = SOME (Pattern.match thy (lhs, redex) (Vartab.empty, Vartab.empty)) + handle Pattern.MATCH => NONE + + in + Option.map (fn m => Envir.subst_term m intro_concl) m + end + + fun mult_log_simproc simpset redex = + let + val ctxt = Simplifier.the_context simpset + val thy = ProofContext.theory_of ctxt + fun prove (SOME thm) = (SOME + (Goal.prove ctxt [] [] thm (K (distr_mult_log_eq_tac 1)) + |> mk_meta_eq) + handle THM _ => NONE) + | prove NONE = NONE + in + get_first (instanciate_term thy (term_of redex) #> prove) mult_log_intros + end +*} + +simproc_setup mult_log ("distribution X x * log b (A * B)" | + "distribution X x * log b (A / B)") = {* K mult_log_simproc *} + +end + +lemma KL_divergence_eq_finite: + assumes u: "finite_measure_space (M\measure := u\)" + assumes v: "finite_measure_space (M\measure := v\)" + assumes u_0: "\x. \ x \ space M ; v {x} = 0 \ \ u {x} = 0" + shows "KL_divergence b M u v = (\x\space M. u {x} * log b (u {x} / v {x}))" (is "_ = ?sum") +proof (simp add: KL_divergence_def, subst finite_measure_space.integral_finite_singleton, simp_all add: u) + have ms_u: "measure_space (M\measure := u\)" + using u unfolding finite_measure_space_def by simp + + show "(\x \ space M. log b (measure_space.RN_deriv (M\measure := v\) u x) * u {x}) = ?sum" + apply (rule setsum_cong[OF refl]) + apply simp + apply (safe intro!: arg_cong[where f="log b"] ) + apply (subst finite_measure_space.RN_deriv_finite_singleton) + using assms ms_u by auto qed -definition - "KL_divergence b M u v = - measure_space.integral (M\measure := u\) - (\x. log b ((measure_space.RN_deriv (M \measure := v\ ) u) x))" - -lemma (in finite_prob_space) finite_measure_space: - shows "finite_measure_space \ space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" - (is "finite_measure_space ?S") -proof (rule finite_Pow_additivity_sufficient, simp_all) - show "finite (X ` space M)" using finite_space by simp - - show "positive ?S (distribution X)" unfolding distribution_def - unfolding positive_def using positive'[unfolded positive_def] sets_eq_Pow by auto +lemma log_setsum_divide: + assumes "finite S" and "S \ {}" and "1 < b" + assumes "(\x\S. g x) = 1" + assumes pos: "\x. x \ S \ g x \ 0" "\x. x \ S \ f x \ 0" + assumes g_pos: "\x. \ x \ S ; 0 < g x \ \ 0 < f x" + shows "- (\x\S. g x * log b (g x / f x)) \ log b (\x\S. f x)" +proof - + have log_mono: "\x y. 0 < x \ x \ y \ log b x \ log b y" + using `1 < b` by (subst log_le_cancel_iff) auto - show "additive ?S (distribution X)" unfolding additive_def distribution_def - proof (simp, safe) - fix x y - have x: "(X -` x) \ space M \ sets M" - and y: "(X -` y) \ space M \ sets M" using sets_eq_Pow by auto - assume "x \ y = {}" - from additive[unfolded additive_def, rule_format, OF x y] this - have "prob (((X -` x) \ (X -` y)) \ space M) = - prob ((X -` x) \ space M) + prob ((X -` y) \ space M)" - apply (subst Int_Un_distrib2) - by auto - thus "prob ((X -` x \ X -` y) \ space M) = prob (X -` x \ space M) + prob (X -` y \ space M)" - by auto + have "- (\x\S. g x * log b (g x / f x)) = (\x\S. g x * log b (f x / g x))" + proof (unfold setsum_negf[symmetric], rule setsum_cong) + fix x assume x: "x \ S" + show "- (g x * log b (g x / f x)) = g x * log b (f x / g x)" + proof (cases "g x = 0") + case False + with pos[OF x] g_pos[OF x] have "0 < f x" "0 < g x" by simp_all + thus ?thesis using `1 < b` by (simp add: log_divide field_simps) + qed simp + qed rule + also have "... \ log b (\x\S. g x * (f x / g x))" + proof (rule log_setsum') + fix x assume x: "x \ S" "0 < g x" + with g_pos[OF x] show "0 < f x / g x" by (safe intro!: divide_pos_pos) + qed fact+ + also have "... = log b (\x\S - {x. g x = 0}. f x)" using `finite S` + by (auto intro!: setsum_mono_zero_cong_right arg_cong[where f="log b"] + split: split_if_asm) + also have "... \ log b (\x\S. f x)" + proof (rule log_mono) + have "0 = (\x\S - {x. g x = 0}. 0)" by simp + also have "... < (\x\S - {x. g x = 0}. f x)" (is "_ < ?sum") + proof (rule setsum_strict_mono) + show "finite (S - {x. g x = 0})" using `finite S` by simp + show "S - {x. g x = 0} \ {}" + proof + assume "S - {x. g x = 0} = {}" + hence "(\x\S. g x) = 0" by (subst setsum_0') auto + with `(\x\S. g x) = 1` show False by simp + qed + fix x assume "x \ S - {x. g x = 0}" + thus "0 < f x" using g_pos[of x] pos(1)[of x] by auto + qed + finally show "0 < ?sum" . + show "(\x\S - {x. g x = 0}. f x) \ (\x\S. f x)" + using `finite S` pos by (auto intro!: setsum_mono2) qed + finally show ?thesis . qed -lemma (in finite_prob_space) finite_prob_space: - "finite_prob_space \ space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" - (is "finite_prob_space ?S") - unfolding finite_prob_space_def prob_space_def prob_space_axioms_def -proof safe - show "finite_measure_space ?S" by (rule finite_measure_space) - thus "measure_space ?S" by (simp add: finite_measure_space_def) +lemma KL_divergence_positive_finite: + assumes u: "finite_prob_space (M\measure := u\)" + assumes v: "finite_prob_space (M\measure := v\)" + assumes u_0: "\x. \ x \ space M ; v {x} = 0 \ \ u {x} = 0" + and "1 < b" + shows "0 \ KL_divergence b M u v" +proof - + interpret u: finite_prob_space "M\measure := u\" using u . + interpret v: finite_prob_space "M\measure := v\" using v . - have "X -` X ` space M \ space M = space M" by auto - thus "measure ?S (space ?S) = 1" - by (simp add: distribution_def prob_space) -qed + have *: "space M \ {}" using u.not_empty by simp -lemma (in finite_prob_space) finite_measure_space_image_prod: - "finite_measure_space \space = X ` space M \ Y ` space M, - sets = Pow (X ` space M \ Y ` space M), measure_space.measure = distribution (\x. (X x, Y x))\" - (is "finite_measure_space ?Z") -proof (rule finite_Pow_additivity_sufficient, simp_all) - show "finite (X ` space M \ Y ` space M)" using finite_space by simp + have "- (KL_divergence b M u v) \ log b (\x\space M. v {x})" + proof (subst KL_divergence_eq_finite, safe intro!: log_setsum_divide *) + show "finite_measure_space (M\measure := u\)" + "finite_measure_space (M\measure := v\)" + using u v unfolding finite_prob_space_eq by simp_all - let ?d = "distribution (\x. (X x, Y x))" + show "finite (space M)" using u.finite_space by simp + show "1 < b" by fact + show "(\x\space M. u {x}) = 1" using u.sum_over_space_eq_1 by simp - show "positive ?Z ?d" - using sets_eq_Pow by (auto simp: positive_def distribution_def intro!: positive) + fix x assume x: "x \ space M" + thus pos: "0 \ u {x}" "0 \ v {x}" + using u.positive u.sets_eq_Pow v.positive v.sets_eq_Pow by simp_all - show "additive ?Z ?d" unfolding additive_def - proof safe - fix x y assume "x \ sets ?Z" and "y \ sets ?Z" - assume "x \ y = {}" - thus "?d (x \ y) = ?d x + ?d y" - apply (simp add: distribution_def) - apply (subst measure_additive[unfolded sets_eq_Pow, simplified]) - by (auto simp: Un_Int_distrib Un_Int_distrib2 intro!: arg_cong[where f=prob]) + { assume "v {x} = 0" from u_0[OF x this] show "u {x} = 0" . } + { assume "0 < u {x}" + hence "v {x} \ 0" using u_0[OF x] by auto + with pos show "0 < v {x}" by simp } qed + thus "0 \ KL_divergence b M u v" using v.sum_over_space_eq_1 by simp qed definition (in prob_space) @@ -174,346 +269,142 @@ in KL_divergence b prod_space (joint_distribution X Y) (measure prod_space)" -abbreviation (in finite_prob_space) - finite_mutual_information ("\\<^bsub>_\<^esub>'(_ ; _')") where - "\\<^bsub>b\<^esub>(X ; Y) \ mutual_information b +abbreviation (in finite_information_space) + finite_mutual_information ("\'(_ ; _')") where + "\(X ; Y) \ mutual_information b \ space = X`space M, sets = Pow (X`space M) \ \ space = Y`space M, sets = Pow (Y`space M) \ X Y" -abbreviation (in finite_prob_space) - finite_mutual_information_2 :: "('a \ 'c) \ ('a \ 'd) \ real" ("\'(_ ; _')") where - "\(X ; Y) \ \\<^bsub>2\<^esub>(X ; Y)" +lemma (in finite_measure_space) measure_spaceI: "measure_space M" + by unfold_locales -lemma (in prob_space) mutual_information_cong: - assumes [simp]: "space S1 = space S3" "sets S1 = sets S3" - "space S2 = space S4" "sets S2 = sets S4" - shows "mutual_information b S1 S2 X Y = mutual_information b S3 S4 X Y" - unfolding mutual_information_def by simp +lemma prod_measure_times_finite: + assumes fms: "finite_measure_space M" "finite_measure_space M'" and a: "a \ space M \ space M'" + shows "prod_measure M M' {a} = measure M {fst a} * measure M' {snd a}" +proof (cases a) + case (Pair b c) + hence a_eq: "{a} = {b} \ {c}" by simp -lemma (in prob_space) joint_distribution: - "joint_distribution X Y = distribution (\x. (X x, Y x))" - unfolding joint_distribution_def_raw distribution_def_raw .. + with fms[THEN finite_measure_space.measure_spaceI] + fms[THEN finite_measure_space.sets_eq_Pow] a Pair + show ?thesis unfolding a_eq + by (subst prod_measure_times) simp_all +qed -lemma (in finite_prob_space) finite_mutual_information_reduce: - "\\<^bsub>b\<^esub>(X;Y) = (\ (x,y) \ X ` space M \ Y ` space M. - distribution (\x. (X x, Y x)) {(x,y)} * log b (distribution (\x. (X x, Y x)) {(x,y)} / - (distribution X {x} * distribution Y {y})))" - (is "_ = setsum ?log ?prod") - unfolding Let_def mutual_information_def KL_divergence_def -proof (subst finite_measure_space.integral_finite_singleton, simp_all add: joint_distribution) - let ?X = "\space = X ` space M, sets = Pow (X ` space M), measure_space.measure = distribution X\" - let ?Y = "\space = Y ` space M, sets = Pow (Y ` space M), measure_space.measure = distribution Y\" - let ?P = "prod_measure_space ?X ?Y" +lemma setsum_cartesian_product': + "(\x\A \ B. f x) = (\x\A. setsum (\y. f (x, y)) B)" + unfolding setsum_cartesian_product by simp - interpret X: finite_measure_space "?X" by (rule finite_measure_space) - moreover interpret Y: finite_measure_space "?Y" by (rule finite_measure_space) - ultimately have ms_X: "measure_space ?X" and ms_Y: "measure_space ?Y" by unfold_locales - - interpret P: finite_measure_space "?P" by (rule finite_measure_space_finite_prod_measure) (fact+) - - let ?P' = "measure_update (\_. distribution (\x. (X x, Y x))) ?P" - from finite_measure_space_image_prod[of X Y] - sigma_prod_sets_finite[OF X.finite_space Y.finite_space] - show "finite_measure_space ?P'" - by (simp add: X.sets_eq_Pow Y.sets_eq_Pow joint_distribution finite_measure_space_def prod_measure_space_def) +lemma (in finite_information_space) + assumes MX: "finite_prob_space \ space = space MX, sets = sets MX, measure = distribution X\" + (is "finite_prob_space ?MX") + assumes MY: "finite_prob_space \ space = space MY, sets = sets MY, measure = distribution Y\" + (is "finite_prob_space ?MY") + and X_space: "X ` space M \ space MX" and Y_space: "Y ` space M \ space MY" + shows mutual_information_eq_generic: + "mutual_information b MX MY X Y = (\ (x,y) \ space MX \ space MY. + joint_distribution X Y {(x,y)} * + log b (joint_distribution X Y {(x,y)} / + (distribution X {x} * distribution Y {y})))" + (is "?equality") + and mutual_information_positive_generic: + "0 \ mutual_information b MX MY X Y" (is "?positive") +proof - + let ?P = "prod_measure_space ?MX ?MY" + let ?measure = "joint_distribution X Y" + let ?P' = "measure_update (\_. ?measure) ?P" - show "(\x \ space ?P. log b (measure_space.RN_deriv ?P (distribution (\x. (X x, Y x))) x) * distribution (\x. (X x, Y x)) {x}) - = setsum ?log ?prod" - proof (rule setsum_cong) - show "space ?P = ?prod" unfolding prod_measure_space_def by simp - next - fix x assume x: "x \ X ` space M \ Y ` space M" - then obtain d e where x_Pair: "x = (d, e)" - and d: "d \ X ` space M" - and e: "e \ Y ` space M" by auto - - { fix x assume m_0: "measure ?P {x} = 0" - have "distribution (\x. (X x, Y x)) {x} = 0" - proof (cases x) - case (Pair a b) - hence "(\x. (X x, Y x)) -` {x} \ space M = (X -` {a} \ space M) \ (Y -` {b} \ space M)" - and x_prod: "{x} = {a} \ {b}" by auto + interpret X: finite_prob_space "?MX" using MX . + moreover interpret Y: finite_prob_space "?MY" using MY . + ultimately have ms_X: "measure_space ?MX" + and ms_Y: "measure_space ?MY" by unfold_locales - let ?PROD = "(\x. (X x, Y x)) -` {x} \ space M" + have fms_P: "finite_measure_space ?P" + by (rule finite_measure_space_finite_prod_measure) fact+ + + have fms_P': "finite_measure_space ?P'" + using finite_product_measure_space[of "space MX" "space MY"] + X.finite_space Y.finite_space sigma_prod_sets_finite[OF X.finite_space Y.finite_space] + X.sets_eq_Pow Y.sets_eq_Pow + by (simp add: prod_measure_space_def) - show ?thesis - proof (cases "{a} \ X ` space M \ {b} \ Y ` space M") - case False - hence "?PROD = {}" - unfolding Pair by auto - thus ?thesis by (auto simp: distribution_def) - next - have [intro]: "prob ?PROD \ 0 \ prob ?PROD = 0" - using sets_eq_Pow by (auto intro!: positive real_le_antisym[of _ 0]) + { fix x assume "x \ space ?P" + hence x_in_MX: "{fst x} \ sets MX" using X.sets_eq_Pow + by (auto simp: prod_measure_space_def) + + assume "measure ?P {x} = 0" + with prod_measure_times[OF ms_X ms_Y, of "{fst x}" "{snd x}"] x_in_MX + have "distribution X {fst x} = 0 \ distribution Y {snd x} = 0" + by (simp add: prod_measure_space_def) + + hence "joint_distribution X Y {x} = 0" + by (cases x) (auto simp: distribution_order) } + note measure_0 = this - case True - with prod_measure_times[OF ms_X ms_Y, simplified, of "{a}" "{b}"] - have "prob (X -` {a} \ space M) = 0 \ prob (Y -` {b} \ space M) = 0" (is "?X_0 \ ?Y_0") using m_0 - by (simp add: prod_measure_space_def distribution_def Pair) - thus ?thesis - proof (rule disjE) - assume ?X_0 - have "prob ?PROD \ prob (X -` {a} \ space M)" - using sets_eq_Pow Pair by (auto intro!: measure_mono) - thus ?thesis using `?X_0` by (auto simp: distribution_def) - next - assume ?Y_0 - have "prob ?PROD \ prob (Y -` {b} \ space M)" - using sets_eq_Pow Pair by (auto intro!: measure_mono) - thus ?thesis using `?Y_0` by (auto simp: distribution_def) - qed - qed - qed } - note measure_zero_joint_distribution = this + show ?equality + unfolding Let_def mutual_information_def using fms_P fms_P' measure_0 MX MY + by (subst KL_divergence_eq_finite) + (simp_all add: prod_measure_space_def prod_measure_times_finite + finite_prob_space_eq setsum_cartesian_product') - show "log b (measure_space.RN_deriv ?P (distribution (\x. (X x, Y x))) x) * distribution (\x. (X x, Y x)) {x} = ?log x" - apply (cases "distribution (\x. (X x, Y x)) {x} \ 0") - apply (subst P.RN_deriv_finite_singleton) - proof (simp_all add: x_Pair) - from `finite_measure_space ?P'` show "measure_space ?P'" by (simp add: finite_measure_space_def) - next - fix x assume m_0: "measure ?P {x} = 0" thus "distribution (\x. (X x, Y x)) {x} = 0" by fact - next - show "(d,e) \ space ?P" unfolding prod_measure_space_def using x x_Pair by simp - next - assume jd_0: "distribution (\x. (X x, Y x)) {(d, e)} \ 0" - show "measure ?P {(d,e)} \ 0" - proof - assume "measure ?P {(d,e)} = 0" - from measure_zero_joint_distribution[OF this] jd_0 - show False by simp - qed - next - assume jd_0: "distribution (\x. (X x, Y x)) {(d, e)} \ 0" - with prod_measure_times[OF ms_X ms_Y, simplified, of "{d}" "{e}"] d - show "log b (distribution (\x. (X x, Y x)) {(d, e)} / measure ?P {(d, e)}) = - log b (distribution (\x. (X x, Y x)) {(d, e)} / (distribution X {d} * distribution Y {e}))" - by (auto intro!: arg_cong[where f="log b"] simp: prod_measure_space_def) - qed + show ?positive + unfolding Let_def mutual_information_def using measure_0 b_gt_1 + proof (safe intro!: KL_divergence_positive_finite, simp_all) + from ms_X ms_Y X.top Y.top X.prob_space Y.prob_space + have "measure ?P (space ?P) = 1" + by (simp add: prod_measure_space_def, subst prod_measure_times, simp_all) + with fms_P show "finite_prob_space ?P" + by (simp add: finite_prob_space_eq) + + from ms_X ms_Y X.top Y.top X.prob_space Y.prob_space Y.not_empty X_space Y_space + have "measure ?P' (space ?P') = 1" unfolding prob_space[symmetric] + by (auto simp add: prod_measure_space_def distribution_def vimage_Times comp_def + intro!: arg_cong[where f=prob]) + with fms_P' show "finite_prob_space ?P'" + by (simp add: finite_prob_space_eq) qed qed -lemma (in finite_prob_space) distribution_log_split: - assumes "1 < b" - shows - "distribution (\x. (X x, Z x)) {(X x, z)} * log b (distribution (\x. (X x, Z x)) {(X x, z)} / - (distribution X {X x} * distribution Z {z})) = - distribution (\x. (X x, Z x)) {(X x, z)} * log b (distribution (\x. (X x, Z x)) {(X x, z)} / - distribution Z {z}) - - distribution (\x. (X x, Z x)) {(X x, z)} * log b (distribution X {X x})" - (is "?lhs = ?rhs") -proof (cases "distribution (\x. (X x, Z x)) {(X x, z)} = 0") - case True thus ?thesis by simp -next - case False - - let ?dZ = "distribution Z" - let ?dX = "distribution X" - let ?dXZ = "distribution (\x. (X x, Z x))" - - have dist_nneg: "\x X. 0 \ distribution X x" - unfolding distribution_def using sets_eq_Pow by (auto intro: positive) - - have "?lhs = ?dXZ {(X x, z)} * (log b (?dXZ {(X x, z)} / ?dZ {z}) - log b (?dX {X x}))" - proof - - have pos_dXZ: "0 < ?dXZ {(X x, z)}" - using False dist_nneg[of "\x. (X x, Z x)" "{(X x, z)}"] by auto - moreover - have "((\x. (X x, Z x)) -` {(X x, z)}) \ space M \ (X -` {X x}) \ space M" by auto - hence "?dXZ {(X x, z)} \ ?dX {X x}" - unfolding distribution_def - by (rule measure_mono) (simp_all add: sets_eq_Pow) - with pos_dXZ have "0 < ?dX {X x}" by (rule less_le_trans) - moreover - have "((\x. (X x, Z x)) -` {(X x, z)}) \ space M \ (Z -` {z}) \ space M" by auto - hence "?dXZ {(X x, z)} \ ?dZ {z}" - unfolding distribution_def - by (rule measure_mono) (simp_all add: sets_eq_Pow) - with pos_dXZ have "0 < ?dZ {z}" by (rule less_le_trans) - moreover have "0 < b" by (rule less_trans[OF _ `1 < b`]) simp - moreover have "b \ 1" by (rule ccontr) (insert `1 < b`, simp) - ultimately show ?thesis - using pos_dXZ - apply (subst (2) mult_commute) - apply (subst divide_divide_eq_left[symmetric]) - apply (subst log_divide) - by (auto intro: divide_pos_pos) - qed - also have "... = ?rhs" - by (simp add: field_simps) - finally show ?thesis . -qed - -lemma (in finite_prob_space) finite_mutual_information_reduce_prod: - "mutual_information b - \ space = X ` space M, sets = Pow (X ` space M) \ - \ space = Y ` space M \ Z ` space M, sets = Pow (Y ` space M \ Z ` space M) \ - X (\x. (Y x,Z x)) = - (\ (x, y, z) \ X`space M \ Y`space M \ Z`space M. - distribution (\x. (X x, Y x,Z x)) {(x, y, z)} * - log b (distribution (\x. (X x, Y x,Z x)) {(x, y, z)} / - (distribution X {x} * distribution (\x. (Y x,Z x)) {(y,z)})))" (is "_ = setsum ?log ?space") - unfolding Let_def mutual_information_def KL_divergence_def using finite_space -proof (subst finite_measure_space.integral_finite_singleton, - simp_all add: prod_measure_space_def sigma_prod_sets_finite joint_distribution) - let ?sets = "Pow (X ` space M \ Y ` space M \ Z ` space M)" - and ?measure = "distribution (\x. (X x, Y x, Z x))" - let ?P = "\ space = ?space, sets = ?sets, measure = ?measure\" - - show "finite_measure_space ?P" - proof (rule finite_Pow_additivity_sufficient, simp_all) - show "finite ?space" using finite_space by auto - - show "positive ?P ?measure" - using sets_eq_Pow by (auto simp: positive_def distribution_def intro!: positive) - - show "additive ?P ?measure" - proof (simp add: additive_def distribution_def, safe) - fix x y assume "x \ ?space" and "y \ ?space" - assume "x \ y = {}" - thus "prob (((\x. (X x, Y x, Z x)) -` x \ (\x. (X x, Y x, Z x)) -` y) \ space M) = - prob ((\x. (X x, Y x, Z x)) -` x \ space M) + prob ((\x. (X x, Y x, Z x)) -` y \ space M)" - apply (subst measure_additive[unfolded sets_eq_Pow, simplified]) - by (auto simp: Un_Int_distrib Un_Int_distrib2 intro!: arg_cong[where f=prob]) - qed - qed +lemma (in finite_information_space) mutual_information_eq: + "\(X;Y) = (\ (x,y) \ X ` space M \ Y ` space M. + distribution (\x. (X x, Y x)) {(x,y)} * log b (distribution (\x. (X x, Y x)) {(x,y)} / + (distribution X {x} * distribution Y {y})))" + by (subst mutual_information_eq_generic) (simp_all add: finite_prob_space_of_images) - let ?X = "\space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" - and ?YZ = "\space = Y ` space M \ Z ` space M, sets = Pow (Y ` space M \ Z ` space M), measure = distribution (\x. (Y x, Z x))\" - let ?u = "prod_measure ?X ?YZ" - - from finite_measure_space[of X] finite_measure_space_image_prod[of Y Z] - have ms_X: "measure_space ?X" and ms_YZ: "measure_space ?YZ" - by (simp_all add: finite_measure_space_def) - - show "(\x \ ?space. log b (measure_space.RN_deriv \space=?space, sets=?sets, measure=?u\ - (distribution (\x. (X x, Y x, Z x))) x) * distribution (\x. (X x, Y x, Z x)) {x}) - = setsum ?log ?space" - proof (rule setsum_cong) - fix x assume x: "x \ ?space" - then obtain d e f where x_Pair: "x = (d, e, f)" - and d: "d \ X ` space M" - and e: "e \ Y ` space M" - and f: "f \ Z ` space M" by auto - - { fix x assume m_0: "?u {x} = 0" - - let ?PROD = "(\x. (X x, Y x, Z x)) -` {x} \ space M" - obtain a b c where Pair: "x = (a, b, c)" by (cases x) - hence "?PROD = (X -` {a} \ space M) \ ((\x. (Y x, Z x)) -` {(b, c)} \ space M)" - and x_prod: "{x} = {a} \ {(b, c)}" by auto - - have "distribution (\x. (X x, Y x, Z x)) {x} = 0" - proof (cases "{a} \ X ` space M") - case False - hence "?PROD = {}" - unfolding Pair by auto - thus ?thesis by (auto simp: distribution_def) - next - have [intro]: "prob ?PROD \ 0 \ prob ?PROD = 0" - using sets_eq_Pow by (auto intro!: positive real_le_antisym[of _ 0]) - - case True - with prod_measure_times[OF ms_X ms_YZ, simplified, of "{a}" "{(b,c)}"] - have "prob (X -` {a} \ space M) = 0 \ prob ((\x. (Y x, Z x)) -` {(b, c)} \ space M) = 0" - (is "prob ?X = 0 \ prob ?Y = 0") using m_0 - by (simp add: prod_measure_space_def distribution_def Pair) - thus ?thesis - proof (rule disjE) - assume "prob ?X = 0" - have "prob ?PROD \ prob ?X" - using sets_eq_Pow Pair by (auto intro!: measure_mono) - thus ?thesis using `prob ?X = 0` by (auto simp: distribution_def) - next - assume "prob ?Y = 0" - have "prob ?PROD \ prob ?Y" - using sets_eq_Pow Pair by (auto intro!: measure_mono) - thus ?thesis using `prob ?Y = 0` by (auto simp: distribution_def) - qed - qed } - note measure_zero_joint_distribution = this - - from x_Pair d e f finite_space - show "log b (measure_space.RN_deriv \space=?space, sets=?sets, measure=?u\ - (distribution (\x. (X x, Y x, Z x))) x) * distribution (\x. (X x, Y x, Z x)) {x} = ?log x" - apply (cases "distribution (\x. (X x, Y x, Z x)) {x} \ 0") - apply (subst finite_measure_space.RN_deriv_finite_singleton) - proof simp_all - show "measure_space ?P" using `finite_measure_space ?P` by (simp add: finite_measure_space_def) - - from finite_measure_space_finite_prod_measure[OF finite_measure_space[of X] - finite_measure_space_image_prod[of Y Z]] finite_space - show "finite_measure_space \space=?space, sets=?sets, measure=?u\" - by (simp add: prod_measure_space_def sigma_prod_sets_finite) - next - fix x assume "?u {x} = 0" thus "distribution (\x. (X x, Y x, Z x)) {x} = 0" by fact - next - assume jd_0: "distribution (\x. (X x, Y x, Z x)) {(d, e, f)} \ 0" - show "?u {(d,e,f)} \ 0" - proof - assume "?u {(d, e, f)} = 0" - from measure_zero_joint_distribution[OF this] jd_0 - show False by simp - qed - next - assume jd_0: "distribution (\x. (X x, Y x, Z x)) {(d, e, f)} \ 0" - with prod_measure_times[OF ms_X ms_YZ, simplified, of "{d}" "{(e,f)}"] d - show "log b (distribution (\x. (X x, Y x, Z x)) {(d, e, f)} / ?u {(d, e, f)}) = - log b (distribution (\x. (X x, Y x, Z x)) {(d, e, f)} / (distribution X {d} * distribution (\x. (Y x, Z x)) {(e,f)}))" - by (auto intro!: arg_cong[where f="log b"] simp: prod_measure_space_def) - qed - qed simp -qed +lemma (in finite_information_space) mutual_information_positive: "0 \ \(X;Y)" + by (subst mutual_information_positive_generic) (simp_all add: finite_prob_space_of_images) definition (in prob_space) "entropy b s X = mutual_information b s s X X" -abbreviation (in finite_prob_space) - finite_entropy ("\\<^bsub>_\<^esub>'(_')") where - "\\<^bsub>b\<^esub>(X) \ entropy b \ space = X`space M, sets = Pow (X`space M) \ X" - -abbreviation (in finite_prob_space) - finite_entropy_2 ("\'(_')") where - "\(X) \ \\<^bsub>2\<^esub>(X)" +abbreviation (in finite_information_space) + finite_entropy ("\'(_')") where + "\(X) \ entropy b \ space = X`space M, sets = Pow (X`space M) \ X" -lemma (in finite_prob_space) finite_entropy_reduce: - assumes "1 < b" - shows "\\<^bsub>b\<^esub>(X) = -(\ x \ X ` space M. distribution X {x} * log b (distribution X {x}))" +lemma (in finite_information_space) joint_distribution_remove[simp]: + "joint_distribution X X {(x, x)} = distribution X {x}" + unfolding distribution_def by (auto intro!: arg_cong[where f=prob]) + +lemma (in finite_information_space) entropy_eq: + "\(X) = -(\ x \ X ` space M. distribution X {x} * log b (distribution X {x}))" proof - - have fin: "finite (X ` space M)" using finite_space by simp - - have If_mult_distr: "\A B C D. If A B C * D = If A (B * D) (C * D)" by auto - + { fix f { fix x y have "(\x. (X x, X x)) -` {(x, y)} = (if x = y then X -` {x} else {})" by auto - hence "distribution (\x. (X x, X x)) {(x,y)} = (if x = y then distribution X {x} else 0)" + hence "distribution (\x. (X x, X x)) {(x,y)} * f x y = (if x = y then distribution X {x} * f x y else 0)" unfolding distribution_def by auto } - moreover - have "\x. 0 \ distribution X x" - unfolding distribution_def using finite_space sets_eq_Pow by (auto intro: positive) - hence "\x. distribution X x \ 0 \ 0 < distribution X x" by (auto simp: le_less) - ultimately - show ?thesis using `1 < b` - by (auto intro!: setsum_cong - simp: log_inverse If_mult_distr setsum_cases[OF fin] inverse_eq_divide[symmetric] - entropy_def setsum_negf[symmetric] joint_distribution finite_mutual_information_reduce - setsum_cartesian_product[symmetric]) + hence "(\(x, y) \ X ` space M \ X ` space M. joint_distribution X X {(x, y)} * f x y) = + (\x \ X ` space M. distribution X {x} * f x x)" + unfolding setsum_cartesian_product' by (simp add: setsum_cases finite_space) } + note remove_cartesian_product = this + + show ?thesis + unfolding entropy_def mutual_information_eq setsum_negf[symmetric] remove_cartesian_product + by (auto intro!: setsum_cong) qed -lemma log_inj: assumes "1 < b" shows "inj_on (log b) {0 <..}" -proof (rule inj_onI, simp) - fix x y assume pos: "0 < x" "0 < y" and *: "log b x = log b y" - show "x = y" - proof (cases rule: linorder_cases) - assume "x < y" hence "log b x < log b y" - using log_less_cancel_iff[OF `1 < b`] pos by simp - thus ?thesis using * by simp - next - assume "y < x" hence "log b y < log b x" - using log_less_cancel_iff[OF `1 < b`] pos by simp - thus ?thesis using * by simp - qed simp -qed +lemma (in finite_information_space) entropy_positive: "0 \ \(X)" + unfolding entropy_def using mutual_information_positive . definition (in prob_space) "conditional_mutual_information b s1 s2 s3 X Y Z \ @@ -524,160 +415,181 @@ mutual_information b s1 prod_space X (\x. (Y x, Z x)) - mutual_information b s1 s3 X Z" -abbreviation (in finite_prob_space) - finite_conditional_mutual_information ("\\<^bsub>_\<^esub>'( _ ; _ | _ ')") where - "\\<^bsub>b\<^esub>(X ; Y | Z) \ conditional_mutual_information b +abbreviation (in finite_information_space) + finite_conditional_mutual_information ("\'( _ ; _ | _ ')") where + "\(X ; Y | Z) \ conditional_mutual_information b \ space = X`space M, sets = Pow (X`space M) \ \ space = Y`space M, sets = Pow (Y`space M) \ \ space = Z`space M, sets = Pow (Z`space M) \ X Y Z" -abbreviation (in finite_prob_space) - finite_conditional_mutual_information_2 ("\'( _ ; _ | _ ')") where - "\(X ; Y | Z) \ \\<^bsub>2\<^esub>(X ; Y | Z)" +lemma (in finite_information_space) setsum_distribution_gen: + assumes "Z -` {c} \ space M = (\x \ X`space M. Y -` {f x}) \ space M" + and "inj_on f (X`space M)" + shows "(\x \ X`space M. distribution Y {f x}) = distribution Z {c}" + unfolding distribution_def assms + using finite_space assms + by (subst measure_finitely_additive'') + (auto simp add: disjoint_family_on_def sets_eq_Pow inj_on_def + intro!: arg_cong[where f=prob]) + +lemma (in finite_information_space) setsum_distribution: + "(\x \ X`space M. joint_distribution X Y {(x, y)}) = distribution Y {y}" + "(\y \ Y`space M. joint_distribution X Y {(x, y)}) = distribution X {x}" + "(\x \ X`space M. joint_distribution X (\x. (Y x, Z x)) {(x, y, z)}) = joint_distribution Y Z {(y, z)}" + "(\y \ Y`space M. joint_distribution X (\x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Z {(x, z)}" + "(\z \ Z`space M. joint_distribution X (\x. (Y x, Z x)) {(x, y, z)}) = joint_distribution X Y {(x, y)}" + by (auto intro!: inj_onI setsum_distribution_gen) -lemma image_pair_eq_Sigma: - "(\x. (f x, g x)) ` A = Sigma (f ` A) (\x. g ` (f -` {x} \ A))" -proof (safe intro!: imageI vimageI, simp_all) - fix a b assume *: "a \ A" "b \ A" and eq: "f a = f b" - show "(f b, g a) \ (\x. (f x, g x)) ` A" unfolding eq[symmetric] - using * by auto +lemma (in finite_information_space) conditional_mutual_information_eq_sum: + "\(X ; Y | Z) = + (\(x, y, z)\X ` space M \ (\x. (Y x, Z x)) ` space M. + distribution (\x. (X x, Y x, Z x)) {(x, y, z)} * + log b (distribution (\x. (X x, Y x, Z x)) {(x, y, z)}/ + distribution (\x. (Y x, Z x)) {(y, z)})) - + (\(x, z)\X ` space M \ Z ` space M. + distribution (\x. (X x, Z x)) {(x,z)} * log b (distribution (\x. (X x, Z x)) {(x,z)} / distribution Z {z}))" + (is "_ = ?rhs") +proof - + have setsum_product: + "\f x. (\v\(\x. (Y x, Z x)) ` space M. joint_distribution X (\x. (Y x, Z x)) {(x,v)} * f v) + = (\v\Y ` space M \ Z ` space M. joint_distribution X (\x. (Y x, Z x)) {(x,v)} * f v)" + proof (safe intro!: setsum_mono_zero_cong_left imageI) + fix x y z f + assume *: "(Y y, Z z) \ (\x. (Y x, Z x)) ` space M" and "y \ space M" "z \ space M" + hence "(\x. (X x, Y x, Z x)) -` {(x, Y y, Z z)} \ space M = {}" + proof safe + fix x' assume x': "x' \ space M" and eq: "Y x' = Y y" "Z x' = Z z" + have "(Y y, Z z) \ (\x. (Y x, Z x)) ` space M" using eq[symmetric] x' by auto + thus "x' \ {}" using * by auto + qed + thus "joint_distribution X (\x. (Y x, Z x)) {(x, Y y, Z z)} * f (Y y) (Z z) = 0" + unfolding distribution_def by simp + qed (simp add: finite_space) + + thus ?thesis + unfolding conditional_mutual_information_def Let_def mutual_information_eq + apply (subst mutual_information_eq_generic) + by (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space + finite_prob_space_of_images finite_product_prob_space_of_images + setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf + setsum_left_distrib[symmetric] setsum_distribution + cong: setsum_cong) qed -lemma inj_on_swap: "inj_on (\(x,y). (y,x)) A" by (auto intro!: inj_onI) - -lemma (in finite_prob_space) finite_conditional_mutual_information_reduce: - assumes "1 < b" - shows "\\<^bsub>b\<^esub>(X ; Y | Z) = - - (\ (x, z) \ (X ` space M \ Z ` space M). - distribution (\x. (X x, Z x)) {(x,z)} * log b (distribution (\x. (X x, Z x)) {(x,z)} / distribution Z {z})) - + (\ (x, y, z) \ (X ` space M \ (\x. (Y x, Z x)) ` space M). +lemma (in finite_information_space) conditional_mutual_information_eq: + "\(X ; Y | Z) = (\(x, y, z) \ X ` space M \ Y ` space M \ Z ` space M. distribution (\x. (X x, Y x, Z x)) {(x, y, z)} * log b (distribution (\x. (X x, Y x, Z x)) {(x, y, z)}/ - distribution (\x. (Y x, Z x)) {(y, z)}))" (is "_ = ?rhs") -unfolding conditional_mutual_information_def Let_def using finite_space -apply (simp add: prod_measure_space_def sigma_prod_sets_finite) -apply (subst mutual_information_cong[of _ "\space = X ` space M, sets = Pow (X ` space M)\" - _ "\space = Y ` space M \ Z ` space M, sets = Pow (Y ` space M \ Z ` space M)\"], simp_all) -apply (subst finite_mutual_information_reduce_prod, simp_all) -apply (subst finite_mutual_information_reduce, simp_all) + (joint_distribution X Z {(x, z)} * joint_distribution Y Z {(y,z)} / distribution Z {z})))" + unfolding conditional_mutual_information_def Let_def mutual_information_eq + apply (subst mutual_information_eq_generic) + by (auto simp add: prod_measure_space_def sigma_prod_sets_finite finite_space + finite_prob_space_of_images finite_product_prob_space_of_images + setsum_cartesian_product' setsum_product setsum_subtractf setsum_addf + setsum_left_distrib[symmetric] setsum_distribution setsum_commute[where A="Y`space M"] + cong: setsum_cong) + +lemma (in finite_information_space) conditional_mutual_information_eq_mutual_information: + "\(X ; Y) = \(X ; Y | (\x. ()))" +proof - + have [simp]: "(\x. ()) ` space M = {()}" using not_empty by auto + + show ?thesis + unfolding conditional_mutual_information_eq mutual_information_eq + by (simp add: setsum_cartesian_product' distribution_remove_const) +qed + +lemma (in finite_information_space) conditional_mutual_information_positive: + "0 \ \(X ; Y | Z)" proof - let ?dXYZ = "distribution (\x. (X x, Y x, Z x))" - let ?dXZ = "distribution (\x. (X x, Z x))" - let ?dYZ = "distribution (\x. (Y x, Z x))" + let ?dXZ = "joint_distribution X Z" + let ?dYZ = "joint_distribution Y Z" let ?dX = "distribution X" - let ?dY = "distribution Y" let ?dZ = "distribution Z" + let ?M = "X ` space M \ Y ` space M \ Z ` space M" + + have split_beta: "\f. split f = (\x. f (fst x) (snd x))" by (simp add: expand_fun_eq) - have If_mult_distr: "\A B C D. If A B C * D = If A (B * D) (C * D)" by auto - { fix x y - have "(\x. (X x, Y x, Z x)) -` {(X x, y)} \ space M = - (if y \ (\x. (Y x, Z x)) ` space M then (\x. (X x, Y x, Z x)) -` {(X x, y)} \ space M else {})" by auto - hence "?dXYZ {(X x, y)} = (if y \ (\x. (Y x, Z x)) ` space M then ?dXYZ {(X x, y)} else 0)" - unfolding distribution_def by auto } - note split_measure = this - - have sets: "Y ` space M \ Z ` space M \ (\x. (Y x, Z x)) ` space M = (\x. (Y x, Z x)) ` space M" by auto - - have cong: "\A B C D. \ A = C ; B = D \ \ A + B = C + D" by auto - - { fix A f have "setsum f A = setsum (\(x, y). f (y, x)) ((\(x, y). (y, x)) ` A)" - using setsum_reindex[OF inj_on_swap, of "\(x, y). f (y, x)" A] by (simp add: split_twice) } - note setsum_reindex_swap = this - - { fix A B f assume *: "finite A" "\x\A. finite (B x)" - have "(\x\Sigma A B. f x) = (\x\A. setsum (\y. f (x, y)) (B x))" - unfolding setsum_Sigma[OF *] by simp } - note setsum_Sigma = this + have "- (\(x, y, z) \ ?M. ?dXYZ {(x, y, z)} * + log b (?dXYZ {(x, y, z)} / (?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z}))) + \ log b (\(x, y, z) \ ?M. ?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z})" + unfolding split_beta + proof (rule log_setsum_divide) + show "?M \ {}" using not_empty by simp + show "1 < b" using b_gt_1 . - { fix x - have "(\z\Z ` space M. ?dXZ {(X x, z)}) = (\yz\(\x. (Y x, Z x)) ` space M. ?dXYZ {(X x, yz)})" - apply (subst setsum_reindex_swap) - apply (simp add: image_image distribution_def) - unfolding image_pair_eq_Sigma - apply (subst setsum_Sigma) - using finite_space apply simp_all - apply (rule setsum_cong[OF refl]) - apply (subst measure_finitely_additive'') - by (auto simp add: disjoint_family_on_def sets_eq_Pow intro!: arg_cong[where f=prob]) } + fix x assume "x \ ?M" + show "0 \ ?dXYZ {(fst x, fst (snd x), snd (snd x))}" using positive_distribution . + show "0 \ ?dXZ {(fst x, snd (snd x))} * ?dYZ {(fst (snd x), snd (snd x))} / ?dZ {snd (snd x)}" + by (auto intro!: mult_nonneg_nonneg positive_distribution simp: zero_le_divide_iff) - thus "(\(x, y, z)\X ` space M \ Y ` space M \ Z ` space M. - ?dXYZ {(x, y, z)} * log b (?dXYZ {(x, y, z)} / (?dX {x} * ?dYZ {(y, z)}))) - - (\(x, y)\X ` space M \ Z ` space M. - ?dXZ {(x, y)} * log b (?dXZ {(x, y)} / (?dX {x} * ?dZ {y}))) = - - (\ (x, z) \ (X ` space M \ Z ` space M). - ?dXZ {(x,z)} * log b (?dXZ {(x,z)} / ?dZ {z})) + - (\ (x, y, z) \ (X ` space M \ (\x. (Y x, Z x)) ` space M). - ?dXYZ {(x, y, z)} * log b (?dXYZ {(x, y, z)} / ?dYZ {(y, z)}))" - using finite_space - apply (auto simp: setsum_cartesian_product[symmetric] setsum_negf[symmetric] - setsum_addf[symmetric] diff_minus - intro!: setsum_cong[OF refl]) - apply (subst split_measure) - apply (simp add: If_mult_distr setsum_cases sets distribution_log_split[OF assms, of X]) - apply (subst add_commute) - by (simp add: setsum_subtractf setsum_negf field_simps setsum_right_distrib[symmetric] sets_eq_Pow) + assume *: "0 < ?dXYZ {(fst x, fst (snd x), snd (snd x))}" + thus "0 < ?dXZ {(fst x, snd (snd x))} * ?dYZ {(fst (snd x), snd (snd x))} / ?dZ {snd (snd x)}" + by (auto intro!: divide_pos_pos mult_pos_pos + intro: distribution_order(6) distribution_mono_gt_0) + qed (simp_all add: setsum_cartesian_product' sum_over_space_distrib setsum_distribution finite_space) + also have "(\(x, y, z) \ ?M. ?dXZ {(x, z)} * ?dYZ {(y,z)} / ?dZ {z}) = (\z\Z`space M. ?dZ {z})" + apply (simp add: setsum_cartesian_product') + apply (subst setsum_commute) + apply (subst (2) setsum_commute) + by (auto simp: setsum_divide_distrib[symmetric] setsum_product[symmetric] setsum_distribution + intro!: setsum_cong) + finally show ?thesis + unfolding conditional_mutual_information_eq sum_over_space_distrib by simp qed + definition (in prob_space) "conditional_entropy b S T X Y = conditional_mutual_information b S S T X X Y" -abbreviation (in finite_prob_space) - finite_conditional_entropy ("\\<^bsub>_\<^esub>'(_ | _')") where - "\\<^bsub>b\<^esub>(X | Y) \ conditional_entropy b +abbreviation (in finite_information_space) + finite_conditional_entropy ("\'(_ | _')") where + "\(X | Y) \ conditional_entropy b \ space = X`space M, sets = Pow (X`space M) \ \ space = Y`space M, sets = Pow (Y`space M) \ X Y" -abbreviation (in finite_prob_space) - finite_conditional_entropy_2 ("\'(_ | _')") where - "\(X | Y) \ \\<^bsub>2\<^esub>(X | Y)" +lemma (in finite_information_space) conditional_entropy_positive: + "0 \ \(X | Y)" unfolding conditional_entropy_def using conditional_mutual_information_positive . -lemma (in finite_prob_space) finite_conditional_entropy_reduce: - assumes "1 < b" - shows "\\<^bsub>b\<^esub>(X | Z) = +lemma (in finite_information_space) conditional_entropy_eq: + "\(X | Z) = - (\(x, z)\X ` space M \ Z ` space M. joint_distribution X Z {(x, z)} * log b (joint_distribution X Z {(x, z)} / distribution Z {z}))" proof - have *: "\x y z. (\x. (X x, X x, Z x)) -` {(x, y, z)} = (if x = y then (\x. (X x, Z x)) -` {(x, z)} else {})" by auto show ?thesis - unfolding finite_conditional_mutual_information_reduce[OF assms] - conditional_entropy_def joint_distribution_def distribution_def * + unfolding conditional_mutual_information_eq_sum + conditional_entropy_def distribution_def * by (auto intro!: setsum_0') qed -lemma (in finite_prob_space) finite_mutual_information_eq_entropy_conditional_entropy: - assumes "1 < b" shows "\\<^bsub>b\<^esub>(X ; Z) = \\<^bsub>b\<^esub>(X) - \\<^bsub>b\<^esub>(X | Z)" (is "mutual_information b ?X ?Z X Z = _") - unfolding finite_mutual_information_reduce - finite_entropy_reduce[OF assms] - finite_conditional_entropy_reduce[OF assms] - joint_distribution diff_minus_eq_add +lemma (in finite_information_space) mutual_information_eq_entropy_conditional_entropy: + "\(X ; Z) = \(X) - \(X | Z)" + unfolding mutual_information_eq entropy_eq conditional_entropy_eq using finite_space - apply (auto simp add: setsum_addf[symmetric] setsum_subtractf - setsum_Sigma[symmetric] distribution_log_split[OF assms] setsum_negf[symmetric] - intro!: setsum_cong[OF refl]) - apply (simp add: setsum_negf setsum_left_distrib[symmetric]) -proof (rule disjI2) - let ?dX = "distribution X" - and ?dXZ = "distribution (\x. (X x, Z x))" + by (auto simp add: setsum_addf setsum_subtractf setsum_cartesian_product' + setsum_left_distrib[symmetric] setsum_addf setsum_distribution) - fix x assume "x \ space M" - have "\z. (\x. (X x, Z x)) -` {(X x, z)} \ space M = (X -` {X x} \ space M) \ (Z -` {z} \ space M)" by auto - thus "(\z\Z ` space M. distribution (\x. (X x, Z x)) {(X x, z)}) = distribution X {X x}" - unfolding distribution_def - apply (subst prob_real_sum_image_fn[where e="X -` {X x} \ space M" and s = "Z`space M" and f="\z. Z -` {z} \ space M"]) - using finite_space sets_eq_Pow by auto +lemma (in finite_information_space) conditional_entropy_less_eq_entropy: + "\(X | Z) \ \(X)" +proof - + have "\(X ; Z) = \(X) - \(X | Z)" using mutual_information_eq_entropy_conditional_entropy . + with mutual_information_positive[of X Z] entropy_positive[of X] + show ?thesis by auto qed (* -------------Entropy of a RV with a certain event is zero---------------- *) -lemma (in finite_prob_space) finite_entropy_certainty_eq_0: - assumes "x \ X ` space M" and "distribution X {x} = 1" and "b > 1" - shows "\\<^bsub>b\<^esub>(X) = 0" +lemma (in finite_information_space) finite_entropy_certainty_eq_0: + assumes "x \ X ` space M" and "distribution X {x} = 1" + shows "\(X) = 0" proof - interpret X: finite_prob_space "\ space = X ` space M, sets = Pow (X ` space M), - measure = distribution X\" by (rule finite_prob_space) + measure = distribution X\" by (rule finite_prob_space_of_images) have "distribution X (X ` space M - {x}) = distribution X (X ` space M) - distribution X {x}" using X.measure_compl[of "{x}"] assms by auto @@ -694,366 +606,18 @@ have y: "\y. (if x = y then 1 else 0) * log b (if x = y then 1 else 0) = 0" by simp - show ?thesis - unfolding finite_entropy_reduce[OF `b > 1`] by (auto simp: y fi) + show ?thesis unfolding entropy_eq by (auto simp: y fi) qed (* --------------- upper bound on entropy for a rv ------------------------- *) -definition convex_set :: "real set \ bool" -where - "convex_set C \ (\ x y \. x \ C \ y \ C \ 0 \ \ \ \ \ 1 \ \ * x + (1 - \) * y \ C)" - -lemma pos_is_convex: - shows "convex_set {0 <..}" -unfolding convex_set_def -proof safe - fix x y \ :: real - assume asms: "\ \ 0" "\ \ 1" "x > 0" "y > 0" - { assume "\ = 0" - hence "\ * x + (1 - \) * y = y" by simp - hence "\ * x + (1 - \) * y > 0" using asms by simp } - moreover - { assume "\ = 1" - hence "\ * x + (1 - \) * y > 0" using asms by simp } - moreover - { assume "\ \ 1" "\ \ 0" - hence "\ > 0" "(1 - \) > 0" using asms by auto - hence "\ * x + (1 - \) * y > 0" using asms - apply (subst add_nonneg_pos[of "\ * x" "(1 - \) * y"]) - using real_mult_order by auto fastsimp } - ultimately show "\ * x + (1 - \) * y > 0" using assms by blast -qed - -definition convex_fun :: "(real \ real) \ real set \ bool" -where - "convex_fun f C \ (\ x y \. convex_set C \ (x \ C \ y \ C \ 0 \ \ \ \ \ 1 - \ f (\ * x + (1 - \) * y) \ \ * f x + (1 - \) * f y))" - -lemma pos_convex_function: - fixes f :: "real \ real" - assumes "convex_set C" - assumes leq: "\ x y. \x \ C ; y \ C\ \ f' x * (y - x) \ f y - f x" - shows "convex_fun f C" -unfolding convex_fun_def -using assms -proof safe - fix x y \ :: real - let ?x = "\ * x + (1 - \) * y" - assume asm: "convex_set C" "x \ C" "y \ C" "\ \ 0" "\ \ 1" - hence "1 - \ \ 0" by auto - hence xpos: "?x \ C" using asm unfolding convex_set_def by auto - have geq: "\ * (f x - f ?x) + (1 - \) * (f y - f ?x) - \ \ * f' ?x * (x - ?x) + (1 - \) * f' ?x * (y - ?x)" - using add_mono[OF mult_mono1[OF leq[OF xpos asm(2)] `\ \ 0`] - mult_mono1[OF leq[OF xpos asm(3)] `1 - \ \ 0`]] by auto - hence "\ * f x + (1 - \) * f y - f ?x \ 0" - by (auto simp add:field_simps) - thus "\ * f x + (1 - \) * f y \ f ?x" by simp -qed - -lemma atMostAtLeast_subset_convex: - assumes "convex_set C" - assumes "x \ C" "y \ C" "x < y" - shows "{x .. y} \ C" -proof safe - fix z assume zasm: "z \ {x .. y}" - { assume asm: "x < z" "z < y" - let "?\" = "(y - z) / (y - x)" - have "0 \ ?\" "?\ \ 1" using assms asm by (auto simp add:field_simps) - hence comb: "?\ * x + (1 - ?\) * y \ C" - using assms[unfolded convex_set_def] by blast - have "?\ * x + (1 - ?\) * y = (y - z) * x / (y - x) + (1 - (y - z) / (y - x)) * y" - by (auto simp add:field_simps) - also have "\ = ((y - z) * x + (y - x - (y - z)) * y) / (y - x)" - using assms unfolding add_divide_distrib by (auto simp:field_simps) - also have "\ = z" - using assms by (auto simp:field_simps) - finally have "z \ C" - using comb by auto } note less = this - show "z \ C" using zasm less assms - unfolding atLeastAtMost_iff le_less by auto -qed - -lemma f''_imp_f': - fixes f :: "real \ real" - assumes "convex_set C" - assumes f': "\ x. x \ C \ DERIV f x :> (f' x)" - assumes f'': "\ x. x \ C \ DERIV f' x :> (f'' x)" - assumes pos: "\ x. x \ C \ f'' x \ 0" - assumes "x \ C" "y \ C" - shows "f' x * (y - x) \ f y - f x" -using assms -proof - - { fix x y :: real assume asm: "x \ C" "y \ C" "y > x" - hence ge: "y - x > 0" "y - x \ 0" by auto - from asm have le: "x - y < 0" "x - y \ 0" by auto - then obtain z1 where z1: "z1 > x" "z1 < y" "f y - f x = (y - x) * f' z1" - using subsetD[OF atMostAtLeast_subset_convex[OF `convex_set C` `x \ C` `y \ C` `x < y`], - THEN f', THEN MVT2[OF `x < y`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] - by auto - hence "z1 \ C" using atMostAtLeast_subset_convex - `convex_set C` `x \ C` `y \ C` `x < y` by fastsimp - from z1 have z1': "f x - f y = (x - y) * f' z1" - by (simp add:field_simps) - obtain z2 where z2: "z2 > x" "z2 < z1" "f' z1 - f' x = (z1 - x) * f'' z2" - using subsetD[OF atMostAtLeast_subset_convex[OF `convex_set C` `x \ C` `z1 \ C` `x < z1`], - THEN f'', THEN MVT2[OF `x < z1`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1 - by auto - obtain z3 where z3: "z3 > z1" "z3 < y" "f' y - f' z1 = (y - z1) * f'' z3" - using subsetD[OF atMostAtLeast_subset_convex[OF `convex_set C` `z1 \ C` `y \ C` `z1 < y`], - THEN f'', THEN MVT2[OF `z1 < y`, rule_format, unfolded atLeastAtMost_iff[symmetric]]] z1 - by auto - have "f' y - (f x - f y) / (x - y) = f' y - f' z1" - using asm z1' by auto - also have "\ = (y - z1) * f'' z3" using z3 by auto - finally have cool': "f' y - (f x - f y) / (x - y) = (y - z1) * f'' z3" by simp - have A': "y - z1 \ 0" using z1 by auto - have "z3 \ C" using z3 asm atMostAtLeast_subset_convex - `convex_set C` `x \ C` `z1 \ C` `x < z1` by fastsimp - hence B': "f'' z3 \ 0" using assms by auto - from A' B' have "(y - z1) * f'' z3 \ 0" using mult_nonneg_nonneg by auto - from cool' this have "f' y - (f x - f y) / (x - y) \ 0" by auto - from mult_right_mono_neg[OF this le(2)] - have "f' y * (x - y) - (f x - f y) / (x - y) * (x - y) \ 0 * (x - y)" - unfolding diff_def using real_add_mult_distrib by auto - hence "f' y * (x - y) - (f x - f y) \ 0" using le by auto - hence res: "f' y * (x - y) \ f x - f y" by auto - have "(f y - f x) / (y - x) - f' x = f' z1 - f' x" - using asm z1 by auto - also have "\ = (z1 - x) * f'' z2" using z2 by auto - finally have cool: "(f y - f x) / (y - x) - f' x = (z1 - x) * f'' z2" by simp - have A: "z1 - x \ 0" using z1 by auto - have "z2 \ C" using z2 z1 asm atMostAtLeast_subset_convex - `convex_set C` `z1 \ C` `y \ C` `z1 < y` by fastsimp - hence B: "f'' z2 \ 0" using assms by auto - from A B have "(z1 - x) * f'' z2 \ 0" using mult_nonneg_nonneg by auto - from cool this have "(f y - f x) / (y - x) - f' x \ 0" by auto - from mult_right_mono[OF this ge(2)] - have "(f y - f x) / (y - x) * (y - x) - f' x * (y - x) \ 0 * (y - x)" - unfolding diff_def using real_add_mult_distrib by auto - hence "f y - f x - f' x * (y - x) \ 0" using ge by auto - hence "f y - f x \ f' x * (y - x)" "f' y * (x - y) \ f x - f y" - using res by auto } note less_imp = this - { fix x y :: real assume "x \ C" "y \ C" "x \ y" - hence"f y - f x \ f' x * (y - x)" - unfolding neq_iff apply safe - using less_imp by auto } note neq_imp = this - moreover - { fix x y :: real assume asm: "x \ C" "y \ C" "x = y" - hence "f y - f x \ f' x * (y - x)" by auto } - ultimately show ?thesis using assms by blast -qed - -lemma f''_ge0_imp_convex: - fixes f :: "real \ real" - assumes conv: "convex_set C" - assumes f': "\ x. x \ C \ DERIV f x :> (f' x)" - assumes f'': "\ x. x \ C \ DERIV f' x :> (f'' x)" - assumes pos: "\ x. x \ C \ f'' x \ 0" - shows "convex_fun f C" -using f''_imp_f'[OF conv f' f'' pos] assms pos_convex_function by fastsimp - -lemma minus_log_convex: - fixes b :: real - assumes "b > 1" - shows "convex_fun (\ x. - log b x) {0 <..}" -proof - - have "\ z. z > 0 \ DERIV (log b) z :> 1 / (ln b * z)" using DERIV_log by auto - hence f': "\ z. z > 0 \ DERIV (\ z. - log b z) z :> - 1 / (ln b * z)" - using DERIV_minus by auto - have "\ z :: real. z > 0 \ DERIV inverse z :> - (inverse z ^ Suc (Suc 0))" - using less_imp_neq[THEN not_sym, THEN DERIV_inverse] by auto - from this[THEN DERIV_cmult, of _ "- 1 / ln b"] - have "\ z :: real. z > 0 \ DERIV (\ z. (- 1 / ln b) * inverse z) z :> (- 1 / ln b) * (- (inverse z ^ Suc (Suc 0)))" - by auto - hence f''0: "\ z :: real. z > 0 \ DERIV (\ z. - 1 / (ln b * z)) z :> 1 / (ln b * z * z)" - unfolding inverse_eq_divide by (auto simp add:real_mult_assoc) - have f''_ge0: "\ z :: real. z > 0 \ 1 / (ln b * z * z) \ 0" - using `b > 1` by (auto intro!:less_imp_le simp add:divide_pos_pos[of 1] real_mult_order) - from f''_ge0_imp_convex[OF pos_is_convex, - unfolded greaterThan_iff, OF f' f''0 f''_ge0] - show ?thesis by auto -qed - -lemma setsum_nonneg_0: - fixes f :: "'a \ real" - assumes "finite s" - assumes "\ i. i \ s \ f i \ 0" - assumes "(\ i \ s. f i) = 0" - assumes "i \ s" - shows "f i = 0" -proof - - { assume asm: "f i > 0" - from assms have "\ j \ s - {i}. f j \ 0" by auto - from setsum_nonneg[of "s - {i}" f, OF this] - have "(\ j \ s - {i}. f j) \ 0" by simp - hence "(\ j \ s - {i}. f j) + f i > 0" using asm by auto - from this setsum.remove[of s i f, OF `finite s` `i \ s`] - have "(\ j \ s. f j) > 0" by auto - hence "False" using assms by auto } - thus ?thesis using assms by fastsimp -qed - -lemma setsum_nonneg_leq_1: - fixes f :: "'a \ real" - assumes "finite s" - assumes "\ i. i \ s \ f i \ 0" - assumes "(\ i \ s. f i) = 1" - assumes "i \ s" - shows "f i \ 1" -proof - - { assume asm: "f i > 1" - from assms have "\ j \ s - {i}. f j \ 0" by auto - from setsum_nonneg[of "s - {i}" f, OF this] - have "(\ j \ s - {i}. f j) \ 0" by simp - hence "(\ j \ s - {i}. f j) + f i > 1" using asm by auto - from this setsum.remove[of s i f, OF `finite s` `i \ s`] - have "(\ j \ s. f j) > 1" by auto - hence "False" using assms by auto } - thus ?thesis using assms by fastsimp -qed - -lemma convex_set_setsum: - assumes "finite s" "s \ {}" - assumes "convex_set C" - assumes "(\ i \ s. a i) = 1" - assumes "\ i. i \ s \ a i \ 0" - assumes "\ i. i \ s \ y i \ C" - shows "(\ j \ s. a j * y j) \ C" -using assms -proof (induct s arbitrary:a rule:finite_ne_induct) - case (singleton i) note asms = this - hence "a i = 1" by auto - thus ?case using asms by auto -next - case (insert i s) note asms = this - { assume "a i = 1" - hence "(\ j \ s. a j) = 0" - using asms by auto - hence "\ j. j \ s \ a j = 0" - using setsum_nonneg_0 asms by fastsimp - hence ?case using asms by auto } - moreover - { assume asm: "a i \ 1" - from asms have yai: "y i \ C" "a i \ 0" by auto - have fis: "finite (insert i s)" using asms by auto - hence ai1: "a i \ 1" using setsum_nonneg_leq_1[of "insert i s" a] asms by simp - hence "a i < 1" using asm by auto - hence i0: "1 - a i > 0" by auto - let "?a j" = "a j / (1 - a i)" - { fix j assume "j \ s" - hence "?a j \ 0" - using i0 asms divide_nonneg_pos - by fastsimp } note a_nonneg = this - have "(\ j \ insert i s. a j) = 1" using asms by auto - hence "(\ j \ s. a j) = 1 - a i" using setsum.insert asms by fastsimp - hence "(\ j \ s. a j) / (1 - a i) = 1" using i0 by auto - hence a1: "(\ j \ s. ?a j) = 1" unfolding divide.setsum by simp - from this asms - have "(\j\s. ?a j * y j) \ C" using a_nonneg by fastsimp - hence "a i * y i + (1 - a i) * (\ j \ s. ?a j * y j) \ C" - using asms[unfolded convex_set_def, rule_format] yai ai1 by auto - hence "a i * y i + (\ j \ s. (1 - a i) * (?a j * y j)) \ C" - using mult_right.setsum[of "(1 - a i)" "\ j. ?a j * y j" s] by auto - hence "a i * y i + (\ j \ s. a j * y j) \ C" using i0 by auto - hence ?case using setsum.insert asms by auto } - ultimately show ?case by auto -qed - -lemma convex_fun_setsum: - fixes a :: "'a \ real" - assumes "finite s" "s \ {}" - assumes "convex_fun f C" - assumes "(\ i \ s. a i) = 1" - assumes "\ i. i \ s \ a i \ 0" - assumes "\ i. i \ s \ y i \ C" - shows "f (\ i \ s. a i * y i) \ (\ i \ s. a i * f (y i))" -using assms -proof (induct s arbitrary:a rule:finite_ne_induct) - case (singleton i) - hence ai: "a i = 1" by auto - thus ?case by auto -next - case (insert i s) note asms = this - hence "convex_fun f C" by simp - from this[unfolded convex_fun_def, rule_format] - have conv: "\ x y \. \x \ C; y \ C; 0 \ \; \ \ 1\ - \ f (\ * x + (1 - \) * y) \ \ * f x + (1 - \) * f y" - by simp - { assume "a i = 1" - hence "(\ j \ s. a j) = 0" - using asms by auto - hence "\ j. j \ s \ a j = 0" - using setsum_nonneg_0 asms by fastsimp - hence ?case using asms by auto } - moreover - { assume asm: "a i \ 1" - from asms have yai: "y i \ C" "a i \ 0" by auto - have fis: "finite (insert i s)" using asms by auto - hence ai1: "a i \ 1" using setsum_nonneg_leq_1[of "insert i s" a] asms by simp - hence "a i < 1" using asm by auto - hence i0: "1 - a i > 0" by auto - let "?a j" = "a j / (1 - a i)" - { fix j assume "j \ s" - hence "?a j \ 0" - using i0 asms divide_nonneg_pos - by fastsimp } note a_nonneg = this - have "(\ j \ insert i s. a j) = 1" using asms by auto - hence "(\ j \ s. a j) = 1 - a i" using setsum.insert asms by fastsimp - hence "(\ j \ s. a j) / (1 - a i) = 1" using i0 by auto - hence a1: "(\ j \ s. ?a j) = 1" unfolding divide.setsum by simp - have "convex_set C" using asms unfolding convex_fun_def by auto - hence asum: "(\ j \ s. ?a j * y j) \ C" - using asms convex_set_setsum[OF `finite s` `s \ {}` - `convex_set C` a1 a_nonneg] by auto - have asum_le: "f (\ j \ s. ?a j * y j) \ (\ j \ s. ?a j * f (y j))" - using a_nonneg a1 asms by blast - have "f (\ j \ insert i s. a j * y j) = f ((\ j \ s. a j * y j) + a i * y i)" - using setsum.insert[of s i "\ j. a j * y j", OF `finite s` `i \ s`] asms - by (auto simp only:add_commute) - also have "\ = f ((1 - a i) * (\ j \ s. a j * y j) / (1 - a i) + a i * y i)" - using i0 by auto - also have "\ = f ((1 - a i) * (\ j \ s. a j * y j / (1 - a i)) + a i * y i)" - unfolding divide.setsum[of "\ j. a j * y j" s "1 - a i", symmetric] by auto - also have "\ = f ((1 - a i) * (\ j \ s. ?a j * y j) + a i * y i)" by auto - also have "\ \ (1 - a i) * f ((\ j \ s. ?a j * y j)) + a i * f (y i)" - using conv[of "y i" "(\ j \ s. ?a j * y j)" "a i", OF yai(1) asum yai(2) ai1] - by (auto simp only:add_commute) - also have "\ \ (1 - a i) * (\ j \ s. ?a j * f (y j)) + a i * f (y i)" - using add_right_mono[OF mult_left_mono[of _ _ "1 - a i", - OF asum_le less_imp_le[OF i0]], of "a i * f (y i)"] by simp - also have "\ = (\ j \ s. (1 - a i) * ?a j * f (y j)) + a i * f (y i)" - unfolding mult_right.setsum[of "1 - a i" "\ j. ?a j * f (y j)"] using i0 by auto - also have "\ = (\ j \ s. a j * f (y j)) + a i * f (y i)" using i0 by auto - also have "\ = (\ j \ insert i s. a j * f (y j))" using asms by auto - finally have "f (\ j \ insert i s. a j * y j) \ (\ j \ insert i s. a j * f (y j))" - by simp } - ultimately show ?case by auto -qed - -lemma log_setsum: - assumes "finite s" "s \ {}" - assumes "b > 1" - assumes "(\ i \ s. a i) = 1" - assumes "\ i. i \ s \ a i \ 0" - assumes "\ i. i \ s \ y i \ {0 <..}" - shows "log b (\ i \ s. a i * y i) \ (\ i \ s. a i * log b (y i))" -proof - - have "convex_fun (\ x. - log b x) {0 <..}" - by (rule minus_log_convex[OF `b > 1`]) - hence "- log b (\ i \ s. a i * y i) \ (\ i \ s. a i * - log b (y i))" - using convex_fun_setsum assms by blast - thus ?thesis by (auto simp add:setsum_negf le_imp_neg_le) -qed - -lemma (in finite_prob_space) finite_entropy_le_card: - assumes "1 < b" - shows "\\<^bsub>b\<^esub>(X) \ log b (real (card (X ` space M \ {x . distribution X {x} \ 0})))" +lemma (in finite_information_space) finite_entropy_le_card: + "\(X) \ log b (real (card (X ` space M \ {x . distribution X {x} \ 0})))" proof - interpret X: finite_prob_space "\space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" - using finite_prob_space by auto + using finite_prob_space_of_images by auto + have triv: "\ x. (if distribution X {x} \ 0 then distribution X {x} else 0) = distribution X {x}" by auto hence sum1: "(\ x \ X ` space M \ {y. distribution X {y} \ 0}. distribution X {x}) = 1" @@ -1085,7 +649,7 @@ also have "\ = (if distribution X {x} \ 0 then distribution X {x} * log b (inverse (distribution X {x})) else 0)" - using log_inverse `1 < b` X.positive[of "{x}"] asm by auto + using log_inverse b_gt_1 X.positive[of "{x}"] asm by auto finally have "- distribution X {x} * log b (distribution X {x}) = (if distribution X {x} \ 0 then distribution X {x} * log b (inverse (distribution X {x})) @@ -1101,7 +665,7 @@ unfolding setsum_restrict_set[OF finite_imageI[OF finite_space, of X]] by auto also have "\ \ log b (\ x \ X ` space M \ {y. distribution X {y} \ 0}. distribution X {x} * (inverse (distribution X {x})))" - apply (subst log_setsum[OF _ _ `b > 1` sum1, + apply (subst log_setsum[OF _ _ b_gt_1 sum1, unfolded greaterThan_iff, OF _ _ _]) using pos sets_eq_Pow X.finite_space assms X.positive not_empty by auto also have "\ = log b (\ x \ X ` space M \ {y. distribution X {y} \ 0}. 1)" @@ -1110,7 +674,7 @@ by auto finally have "- (\x\X ` space M. distribution X {x} * log b (distribution X {x})) \ log b (real_of_nat (card (X ` space M \ {y. distribution X {y} \ 0})))" by simp - thus ?thesis unfolding finite_entropy_reduce[OF assms] real_eq_of_nat by auto + thus ?thesis unfolding entropy_eq real_eq_of_nat by auto qed (* --------------- entropy is maximal for a uniform rv --------------------- *) @@ -1140,34 +704,31 @@ by (auto simp:field_simps) qed -lemma (in finite_prob_space) finite_entropy_uniform_max: - assumes "b > 1" +lemma (in finite_information_space) finite_entropy_uniform_max: assumes "\x y. \ x \ X ` space M ; y \ X ` space M \ \ distribution X {x} = distribution X {y}" - shows "\\<^bsub>b\<^esub>(X) = log b (real (card (X ` space M)))" + shows "\(X) = log b (real (card (X ` space M)))" proof - interpret X: finite_prob_space "\space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" - using finite_prob_space by auto + using finite_prob_space_of_images by auto + { fix x assume xasm: "x \ X ` space M" hence card_gt0: "real (card (X ` space M)) > 0" using card_gt_0_iff X.finite_space by auto from xasm have "\ y. y \ X ` space M \ distribution X {y} = distribution X {x}" using assms by blast hence "- (\x\X ` space M. distribution X {x} * log b (distribution X {x})) - = - (\ y \ X ` space M. distribution X {x} * log b (distribution X {x}))" - by auto - also have "\ = - real_of_nat (card (X ` space M)) * distribution X {x} * log b (distribution X {x})" - by auto + = - real (card (X ` space M)) * distribution X {x} * log b (distribution X {x})" + unfolding real_eq_of_nat by auto also have "\ = - real (card (X ` space M)) * (1 / real (card (X ` space M))) * log b (1 / real (card (X ` space M)))" - unfolding real_eq_of_nat[symmetric] - by (auto simp: X.uniform_prob[simplified, OF xasm assms(2)]) + by (auto simp: X.uniform_prob[simplified, OF xasm assms]) also have "\ = log b (real (card (X ` space M)))" unfolding inverse_eq_divide[symmetric] - using card_gt0 log_inverse `b > 1` + using card_gt0 log_inverse b_gt_1 by (auto simp add:field_simps card_gt0) finally have ?thesis - unfolding finite_entropy_reduce[OF `b > 1`] by auto } + unfolding entropy_eq by auto } moreover { assume "X ` space M = {}" hence "distribution X (X ` space M) = 0" @@ -1176,4 +737,199 @@ ultimately show ?thesis by auto qed +definition "subvimage A f g \ (\x \ A. f -` {f x} \ A \ g -` {g x} \ A)" + +lemma subvimageI: + assumes "\x y. \ x \ A ; y \ A ; f x = f y \ \ g x = g y" + shows "subvimage A f g" + using assms unfolding subvimage_def by blast + +lemma subvimageE[consumes 1]: + assumes "subvimage A f g" + obtains "\x y. \ x \ A ; y \ A ; f x = f y \ \ g x = g y" + using assms unfolding subvimage_def by blast + +lemma subvimageD: + "\ subvimage A f g ; x \ A ; y \ A ; f x = f y \ \ g x = g y" + using assms unfolding subvimage_def by blast + +lemma subvimage_subset: + "\ subvimage B f g ; A \ B \ \ subvimage A f g" + unfolding subvimage_def by auto + +lemma subvimage_idem[intro]: "subvimage A g g" + by (safe intro!: subvimageI) + +lemma subvimage_comp_finer[intro]: + assumes svi: "subvimage A g h" + shows "subvimage A g (f \ h)" +proof (rule subvimageI, simp) + fix x y assume "x \ A" "y \ A" "g x = g y" + from svi[THEN subvimageD, OF this] + show "f (h x) = f (h y)" by simp +qed + +lemma subvimage_comp_gran: + assumes svi: "subvimage A g h" + assumes inj: "inj_on f (g ` A)" + shows "subvimage A (f \ g) h" + by (rule subvimageI) (auto intro!: subvimageD[OF svi] simp: inj_on_iff[OF inj]) + +lemma subvimage_comp: + assumes svi: "subvimage (f ` A) g h" + shows "subvimage A (g \ f) (h \ f)" + by (rule subvimageI) (auto intro!: svi[THEN subvimageD]) + +lemma subvimage_trans: + assumes fg: "subvimage A f g" + assumes gh: "subvimage A g h" + shows "subvimage A f h" + by (rule subvimageI) (auto intro!: fg[THEN subvimageD] gh[THEN subvimageD]) + +lemma subvimage_translator: + assumes svi: "subvimage A f g" + shows "\h. \x \ A. h (f x) = g x" +proof (safe intro!: exI[of _ "\x. (THE z. z \ (g ` (f -` {x} \ A)))"]) + fix x assume "x \ A" + show "(THE x'. x' \ (g ` (f -` {f x} \ A))) = g x" + by (rule theI2[of _ "g x"]) + (insert `x \ A`, auto intro!: svi[THEN subvimageD]) +qed + +lemma subvimage_translator_image: + assumes svi: "subvimage A f g" + shows "\h. h ` f ` A = g ` A" +proof - + from subvimage_translator[OF svi] + obtain h where "\x. x \ A \ h (f x) = g x" by auto + thus ?thesis + by (auto intro!: exI[of _ h] + simp: image_compose[symmetric] comp_def cong: image_cong) +qed + +lemma subvimage_finite: + assumes svi: "subvimage A f g" and fin: "finite (f`A)" + shows "finite (g`A)" +proof - + from subvimage_translator_image[OF svi] + obtain h where "g`A = h`f`A" by fastsimp + with fin show "finite (g`A)" by simp +qed + +lemma subvimage_disj: + assumes svi: "subvimage A f g" + shows "f -` {x} \ A \ g -` {y} \ A \ + f -` {x} \ g -` {y} \ A = {}" (is "?sub \ ?dist") +proof (rule disjCI) + assume "\ ?dist" + then obtain z where "z \ A" and "x = f z" and "y = g z" by auto + thus "?sub" using svi unfolding subvimage_def by auto +qed + +lemma setsum_image_split: + assumes svi: "subvimage A f g" and fin: "finite (f ` A)" + shows "(\x\f`A. h x) = (\y\g`A. \x\f`(g -` {y} \ A). h x)" + (is "?lhs = ?rhs") +proof - + have "f ` A = + snd ` (SIGMA x : g ` A. f ` (g -` {x} \ A))" + (is "_ = snd ` ?SIGMA") + unfolding image_split_eq_Sigma[symmetric] + by (simp add: image_compose[symmetric] comp_def) + moreover + have snd_inj: "inj_on snd ?SIGMA" + unfolding image_split_eq_Sigma[symmetric] + by (auto intro!: inj_onI subvimageD[OF svi]) + ultimately + have "(\x\f`A. h x) = (\(x,y)\?SIGMA. h y)" + by (auto simp: setsum_reindex intro: setsum_cong) + also have "... = ?rhs" + using subvimage_finite[OF svi fin] fin + apply (subst setsum_Sigma[symmetric]) + by (auto intro!: finite_subset[of _ "f`A"]) + finally show ?thesis . +qed + +lemma (in finite_information_space) entropy_partition: + assumes svi: "subvimage (space M) X P" + shows "\(X) = \(P) + \(X|P)" +proof - + have "(\x\X ` space M. distribution X {x} * log b (distribution X {x})) = + (\y\P `space M. \x\X ` space M. + joint_distribution X P {(x, y)} * log b (joint_distribution X P {(x, y)}))" + proof (subst setsum_image_split[OF svi], + safe intro!: finite_imageI finite_space setsum_mono_zero_cong_left imageI) + fix p x assume in_space: "p \ space M" "x \ space M" + assume "joint_distribution X P {(X x, P p)} * log b (joint_distribution X P {(X x, P p)}) \ 0" + hence "(\x. (X x, P x)) -` {(X x, P p)} \ space M \ {}" by (auto simp: distribution_def) + with svi[unfolded subvimage_def, rule_format, OF `x \ space M`] + show "x \ P -` {P p}" by auto + next + fix p x assume in_space: "p \ space M" "x \ space M" + assume "P x = P p" + from this[symmetric] svi[unfolded subvimage_def, rule_format, OF `x \ space M`] + have "X -` {X x} \ space M \ P -` {P p} \ space M" + by auto + hence "(\x. (X x, P x)) -` {(X x, P p)} \ space M = X -` {X x} \ space M" + by auto + thus "distribution X {X x} * log b (distribution X {X x}) = + joint_distribution X P {(X x, P p)} * + log b (joint_distribution X P {(X x, P p)})" + by (auto simp: distribution_def) + qed + thus ?thesis + unfolding entropy_eq conditional_entropy_eq + by (simp add: setsum_cartesian_product' setsum_subtractf setsum_distribution + setsum_left_distrib[symmetric] setsum_commute[where B="P`space M"]) +qed + +corollary (in finite_information_space) entropy_data_processing: + "\(f \ X) \ \(X)" + by (subst (2) entropy_partition[of _ "f \ X"]) (auto intro: conditional_entropy_positive) + +lemma (in prob_space) distribution_cong: + assumes "\x. x \ space M \ X x = Y x" + shows "distribution X = distribution Y" + unfolding distribution_def expand_fun_eq + using assms by (auto intro!: arg_cong[where f=prob]) + +lemma (in prob_space) joint_distribution_cong: + assumes "\x. x \ space M \ X x = X' x" + assumes "\x. x \ space M \ Y x = Y' x" + shows "joint_distribution X Y = joint_distribution X' Y'" + unfolding distribution_def expand_fun_eq + using assms by (auto intro!: arg_cong[where f=prob]) + +lemma image_cong: + "\ \x. x \ S \ X x = X' x \ \ X ` S = X' ` S" + by (auto intro!: image_eqI) + +lemma (in finite_information_space) mutual_information_cong: + assumes X: "\x. x \ space M \ X x = X' x" + assumes Y: "\x. x \ space M \ Y x = Y' x" + shows "\(X ; Y) = \(X' ; Y')" +proof - + have "X ` space M = X' ` space M" using X by (rule image_cong) + moreover have "Y ` space M = Y' ` space M" using Y by (rule image_cong) + ultimately show ?thesis + unfolding mutual_information_eq + using + assms[THEN distribution_cong] + joint_distribution_cong[OF assms] + by (auto intro!: setsum_cong) +qed + +corollary (in finite_information_space) entropy_of_inj: + assumes "inj_on f (X`space M)" + shows "\(f \ X) = \(X)" +proof (rule antisym) + show "\(f \ X) \ \(X)" using entropy_data_processing . +next + have "\(X) = \(the_inv_into (X`space M) f \ (f \ X))" + by (auto intro!: mutual_information_cong simp: entropy_def the_inv_into_f_f[OF assms]) + also have "... \ \(f \ X)" + using entropy_data_processing . + finally show "\(X) \ \(f \ X)" . +qed + end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Lebesgue.thy --- a/src/HOL/Probability/Lebesgue.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Lebesgue.thy Tue May 04 20:30:22 2010 +0200 @@ -25,6 +25,21 @@ shows "nonneg (neg_part f)" unfolding nonneg_def neg_part_def min_def by auto +lemma pos_neg_part_abs: + fixes f :: "'a \ real" + shows "pos_part f x + neg_part f x = \f x\" +unfolding real_abs_def pos_part_def neg_part_def by auto + +lemma pos_part_abs: + fixes f :: "'a \ real" + shows "pos_part (\ x. \f x\) y = \f y\" +unfolding pos_part_def real_abs_def by auto + +lemma neg_part_abs: + fixes f :: "'a \ real" + shows "neg_part (\ x. \f x\) y = 0" +unfolding neg_part_def real_abs_def by auto + lemma (in measure_space) assumes "f \ borel_measurable M" shows pos_part_borel_measurable: "pos_part f \ borel_measurable M" @@ -1273,6 +1288,22 @@ thus "?int S" and "?I S" by auto qed +lemma (in measure_space) integrable_abs: + assumes "integrable f" + shows "integrable (\ x. \f x\)" +using assms +proof - + from assms obtain p q where pq: "p \ nnfis (pos_part f)" "q \ nnfis (neg_part f)" + unfolding integrable_def by auto + hence "p + q \ nnfis (\ x. pos_part f x + neg_part f x)" + using nnfis_add by auto + hence "p + q \ nnfis (\ x. \f x\)" using pos_neg_part_abs[of f] by simp + thus ?thesis unfolding integrable_def + using ext[OF pos_part_abs[of f], of "\ y. y"] + ext[OF neg_part_abs[of f], of "\ y. y"] + using nnfis_0 by auto +qed + lemma markov_ineq: assumes "integrable f" "0 < a" "integrable (\x. \f x\^n)" shows "measure M (f -` {a ..} \ space M) \ integral (\x. \f x\^n) / a^n" @@ -1310,6 +1341,61 @@ by (auto intro!: mult_imp_le_div_pos[OF `0 < a ^ n`], simp add: real_mult_commute) qed +lemma (in measure_space) integral_0: + fixes f :: "'a \ real" + assumes "integrable f" "integral f = 0" "nonneg f" and borel: "f \ borel_measurable M" + shows "measure M ({x. f x \ 0} \ space M) = 0" +proof - + have "{x. f x \ 0} = {x. \f x\ > 0}" by auto + moreover + { fix y assume "y \ {x. \ f x \ > 0}" + hence "\ f y \ > 0" by auto + hence "\ n. \f y\ \ inverse (real (Suc n))" + using ex_inverse_of_nat_Suc_less[of "\f y\"] less_imp_le unfolding real_of_nat_def by auto + hence "y \ (\ n. {x. \f x\ \ inverse (real (Suc n))})" + by auto } + moreover + { fix y assume "y \ (\ n. {x. \f x\ \ inverse (real (Suc n))})" + then obtain n where n: "y \ {x. \f x\ \ inverse (real (Suc n))}" by auto + hence "\f y\ \ inverse (real (Suc n))" by auto + hence "\f y\ > 0" + using real_of_nat_Suc_gt_zero + positive_imp_inverse_positive[of "real_of_nat (Suc n)"] by fastsimp + hence "y \ {x. \f x\ > 0}" by auto } + ultimately have fneq0_UN: "{x. f x \ 0} = (\ n. {x. \f x\ \ inverse (real (Suc n))})" + by blast + { fix n + have int_one: "integrable (\ x. \f x\ ^ 1)" using integrable_abs assms by auto + have "measure M (f -` {inverse (real (Suc n))..} \ space M) + \ integral (\ x. \f x\ ^ 1) / (inverse (real (Suc n)) ^ 1)" + using markov_ineq[OF `integrable f` _ int_one] real_of_nat_Suc_gt_zero by auto + hence le0: "measure M (f -` {inverse (real (Suc n))..} \ space M) \ 0" + using assms unfolding nonneg_def by auto + have "{x. f x \ inverse (real (Suc n))} \ space M \ sets M" + apply (subst Int_commute) unfolding Int_def + using borel[unfolded borel_measurable_ge_iff] by simp + hence m0: "measure M ({x. f x \ inverse (real (Suc n))} \ space M) = 0 \ + {x. f x \ inverse (real (Suc n))} \ space M \ sets M" + using positive le0 unfolding atLeast_def by fastsimp } + moreover hence "range (\ n. {x. f x \ inverse (real (Suc n))} \ space M) \ sets M" + by auto + moreover + { fix n + have "inverse (real (Suc n)) \ inverse (real (Suc (Suc n)))" + using less_imp_inverse_less real_of_nat_Suc_gt_zero[of n] by fastsimp + hence "\ x. f x \ inverse (real (Suc n)) \ f x \ inverse (real (Suc (Suc n)))" by (rule order_trans) + hence "{x. f x \ inverse (real (Suc n))} \ space M + \ {x. f x \ inverse (real (Suc (Suc n)))} \ space M" by auto } + ultimately have "(\ x. 0) ----> measure M (\ n. {x. f x \ inverse (real (Suc n))} \ space M)" + using monotone_convergence[of "\ n. {x. f x \ inverse (real (Suc n))} \ space M"] + unfolding o_def by (simp del: of_nat_Suc) + hence "measure M (\ n. {x. f x \ inverse (real (Suc n))} \ space M) = 0" + using LIMSEQ_const[of 0] LIMSEQ_unique by simp + hence "measure M ((\ n. {x. \f x\ \ inverse (real (Suc n))}) \ space M) = 0" + using assms unfolding nonneg_def by auto + thus "measure M ({x. f x \ 0} \ space M) = 0" using fneq0_UN by simp +qed + section "Lebesgue integration on countable spaces" lemma nnfis_on_countable: @@ -1551,10 +1637,6 @@ end -locale finite_measure_space = measure_space + - assumes finite_space: "finite (space M)" - and sets_eq_Pow: "sets M = Pow (space M)" - lemma sigma_algebra_cong: fixes M :: "('a, 'b) algebra_scheme" and M' :: "('a, 'c) algebra_scheme" assumes *: "sigma_algebra M" @@ -1610,7 +1692,7 @@ lemma (in finite_measure_space) RN_deriv_finite_singleton: fixes v :: "'a set \ real" assumes ms_v: "measure_space (M\measure := v\)" - and eq_0: "\x. measure M {x} = 0 \ v {x} = 0" + and eq_0: "\x. \ x \ space M ; measure M {x} = 0 \ \ v {x} = 0" and "x \ space M" and "measure M {x} \ 0" shows "RN_deriv v x = v {x} / (measure M {x})" (is "_ = ?v x") unfolding RN_deriv_def @@ -1621,7 +1703,7 @@ fix a assume "a \ sets M" hence "a \ space M" and "finite a" using sets_into_space finite_space by (auto intro: finite_subset) - have *: "\x a. (if measure M {x} = 0 then 0 else v {x} * indicator_fn a x) = + have *: "\x a. x \ space M \ (if measure M {x} = 0 then 0 else v {x} * indicator_fn a x) = v {x} * indicator_fn a x" using eq_0 by auto from measure_space.measure_real_sum_image[OF ms_v, of a] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Measure.thy --- a/src/HOL/Probability/Measure.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Measure.thy Tue May 04 20:30:22 2010 +0200 @@ -365,6 +365,18 @@ by arith qed +lemma (in measure_space) measure_mono: + assumes "a \ b" "a \ sets M" "b \ sets M" + shows "measure M a \ measure M b" +proof - + have "b = a \ (b - a)" using assms by auto + moreover have "{} = a \ (b - a)" by auto + ultimately have "measure M b = measure M a + measure M (b - a)" + using measure_additive[of a "b - a"] local.Diff[of b a] assms by auto + moreover have "measure M (b - a) \ 0" using positive assms by auto + ultimately show "measure M a \ measure M b" by auto +qed + lemma disjoint_family_Suc: assumes Suc: "!!n. A n \ A (Suc n)" shows "disjoint_family (\i. A (Suc i) - A i)" @@ -1045,4 +1057,12 @@ qed qed +locale finite_measure_space = measure_space + + assumes finite_space: "finite (space M)" + and sets_eq_Pow: "sets M = Pow (space M)" + +lemma (in finite_measure_space) sum_over_space: "(\x\space M. measure M {x}) = measure M (space M)" + using measure_finitely_additive''[of "space M" "\i. {i}"] + by (simp add: sets_eq_Pow disjoint_family_on_def finite_space) + end \ No newline at end of file diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Probability_Space.thy --- a/src/HOL/Probability/Probability_Space.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Probability_Space.thy Tue May 04 20:30:22 2010 +0200 @@ -21,22 +21,23 @@ definition "distribution X = (\s. prob ((X -` s) \ (space M)))" -definition - "probably e \ e \ events \ prob e = 1" +abbreviation + "joint_distribution X Y \ distribution (\x. (X x, Y x))" -definition - "possibly e \ e \ events \ prob e \ 0" +(* +definition probably :: "('a \ bool) \ bool" (binder "\\<^sup>*" 10) where + "probably P \ { x. P x } \ events \ prob { x. P x } = 1" +definition possibly :: "('a \ bool) \ bool" (binder "\\<^sup>*" 10) where + "possibly P \ { x. P x } \ events \ prob { x. P x } \ 0" +*) definition - "joint_distribution X Y \ (\a. prob ((\x. (X x, Y x)) -` a \ space M))" + "conditional_expectation X M' \ SOME f. f \ measurable M' borel_space \ + (\ g \ sets M'. measure_space.integral M' (\x. f x * indicator_fn g x) = + measure_space.integral M' (\x. X x * indicator_fn g x))" definition - "conditional_expectation X s \ THE f. random_variable borel_space f \ - (\ g \ s. integral (\x. f x * indicator_fn g x) = - integral (\x. X x * indicator_fn g x))" - -definition - "conditional_prob e1 e2 \ conditional_expectation (indicator_fn e1) e2" + "conditional_prob E M' \ conditional_expectation (indicator_fn E) M'" lemma positive': "positive M prob" unfolding positive_def using positive empty_measure by blast @@ -389,14 +390,61 @@ locale finite_prob_space = prob_space + finite_measure_space -lemma (in finite_prob_space) finite_marginal_product_space_POW2: +lemma finite_prob_space_eq: + "finite_prob_space M \ finite_measure_space M \ measure M (space M) = 1" + unfolding finite_prob_space_def finite_measure_space_def prob_space_def prob_space_axioms_def + by auto + +lemma (in prob_space) not_empty: "space M \ {}" + using prob_space empty_measure by auto + +lemma (in finite_prob_space) sum_over_space_eq_1: "(\x\space M. measure M {x}) = 1" + using prob_space sum_over_space by simp + +lemma (in finite_prob_space) positive_distribution: "0 \ distribution X x" + unfolding distribution_def using positive sets_eq_Pow by simp + +lemma (in finite_prob_space) joint_distribution_restriction_fst: + "joint_distribution X Y A \ distribution X (fst ` A)" + unfolding distribution_def +proof (safe intro!: measure_mono) + fix x assume "x \ space M" and *: "(X x, Y x) \ A" + show "x \ X -` fst ` A" + by (auto intro!: image_eqI[OF _ *]) +qed (simp_all add: sets_eq_Pow) + +lemma (in finite_prob_space) joint_distribution_restriction_snd: + "joint_distribution X Y A \ distribution Y (snd ` A)" + unfolding distribution_def +proof (safe intro!: measure_mono) + fix x assume "x \ space M" and *: "(X x, Y x) \ A" + show "x \ Y -` snd ` A" + by (auto intro!: image_eqI[OF _ *]) +qed (simp_all add: sets_eq_Pow) + +lemma (in finite_prob_space) distribution_order: + shows "0 \ distribution X x'" + and "(distribution X x' \ 0) \ (0 < distribution X x')" + and "r \ joint_distribution X Y {(x, y)} \ r \ distribution X {x}" + and "r \ joint_distribution X Y {(x, y)} \ r \ distribution Y {y}" + and "r < joint_distribution X Y {(x, y)} \ r < distribution X {x}" + and "r < joint_distribution X Y {(x, y)} \ r < distribution Y {y}" + and "distribution X {x} = 0 \ joint_distribution X Y {(x, y)} = 0" + and "distribution Y {y} = 0 \ joint_distribution X Y {(x, y)} = 0" + using positive_distribution[of X x'] + positive_distribution[of "\x. (X x, Y x)" "{(x, y)}"] + joint_distribution_restriction_fst[of X Y "{(x, y)}"] + joint_distribution_restriction_snd[of X Y "{(x, y)}"] + by auto + +lemma (in finite_prob_space) finite_product_measure_space: assumes "finite s1" "finite s2" shows "finite_measure_space \ space = s1 \ s2, sets = Pow (s1 \ s2), measure = joint_distribution X Y\" (is "finite_measure_space ?M") proof (rule finite_Pow_additivity_sufficient) show "positive ?M (measure ?M)" unfolding positive_def using positive'[unfolded positive_def] assms sets_eq_Pow - by (simp add: joint_distribution_def) + by (simp add: distribution_def) show "additive ?M (measure ?M)" unfolding additive_def proof safe @@ -406,7 +454,7 @@ assume "x \ y = {}" from additive[unfolded additive_def, rule_format, OF A B] this show "measure ?M (x \ y) = measure ?M x + measure ?M y" - apply (simp add: joint_distribution_def) + apply (simp add: distribution_def) apply (subst Int_Un_distrib2) by auto qed @@ -418,11 +466,58 @@ by simp qed -lemma (in finite_prob_space) finite_marginal_product_space_POW: +lemma (in finite_prob_space) finite_product_measure_space_of_images: shows "finite_measure_space \ space = X ` space M \ Y ` space M, sets = Pow (X ` space M \ Y ` space M), measure = joint_distribution X Y\" (is "finite_measure_space ?M") - using finite_space by (auto intro!: finite_marginal_product_space_POW2) + using finite_space by (auto intro!: finite_product_measure_space) + +lemma (in finite_prob_space) finite_measure_space: + shows "finite_measure_space \ space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" + (is "finite_measure_space ?S") +proof (rule finite_Pow_additivity_sufficient, simp_all) + show "finite (X ` space M)" using finite_space by simp + + show "positive ?S (distribution X)" unfolding distribution_def + unfolding positive_def using positive'[unfolded positive_def] sets_eq_Pow by auto + + show "additive ?S (distribution X)" unfolding additive_def distribution_def + proof (simp, safe) + fix x y + have x: "(X -` x) \ space M \ sets M" + and y: "(X -` y) \ space M \ sets M" using sets_eq_Pow by auto + assume "x \ y = {}" + from additive[unfolded additive_def, rule_format, OF x y] this + have "prob (((X -` x) \ (X -` y)) \ space M) = + prob ((X -` x) \ space M) + prob ((X -` y) \ space M)" + apply (subst Int_Un_distrib2) + by auto + thus "prob ((X -` x \ X -` y) \ space M) = prob (X -` x \ space M) + prob (X -` y \ space M)" + by auto + qed +qed + +lemma (in finite_prob_space) finite_prob_space_of_images: + "finite_prob_space \ space = X ` space M, sets = Pow (X ` space M), measure = distribution X\" + (is "finite_prob_space ?S") +proof (simp add: finite_prob_space_eq, safe) + show "finite_measure_space ?S" by (rule finite_measure_space) + have "X -` X ` space M \ space M = space M" by auto + thus "distribution X (X`space M) = 1" + by (simp add: distribution_def prob_space) +qed + +lemma (in finite_prob_space) finite_product_prob_space_of_images: + "finite_prob_space \ space = X ` space M \ Y ` space M, sets = Pow (X ` space M \ Y ` space M), + measure = joint_distribution X Y\" + (is "finite_prob_space ?S") +proof (simp add: finite_prob_space_eq, safe) + show "finite_measure_space ?S" by (rule finite_product_measure_space_of_images) + + have "X -` X ` space M \ Y -` Y ` space M \ space M = space M" by auto + thus "joint_distribution X Y (X ` space M \ Y ` space M) = 1" + by (simp add: distribution_def prob_space vimage_Times comp_def) +qed end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/Product_Measure.thy --- a/src/HOL/Probability/Product_Measure.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/Product_Measure.thy Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,5 @@ theory Product_Measure -imports "~~/src/HOL/Probability/Lebesgue" +imports Lebesgue begin definition diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Probability/ex/Dining_Cryptographers.thy --- a/src/HOL/Probability/ex/Dining_Cryptographers.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Probability/ex/Dining_Cryptographers.thy Tue May 04 20:30:22 2010 +0200 @@ -2,10 +2,10 @@ imports Information begin -lemma finite_prob_spaceI: - "\ finite_measure_space M ; measure M (space M) = 1 \ \ finite_prob_space M" - unfolding finite_measure_space_def finite_measure_space_axioms_def - finite_prob_space_def prob_space_def prob_space_axioms_def +lemma finite_information_spaceI: + "\ finite_measure_space M ; measure M (space M) = 1 ; 1 < b \ \ finite_information_space M b" + unfolding finite_information_space_def finite_measure_space_def finite_measure_space_axioms_def + finite_prob_space_def prob_space_def prob_space_axioms_def finite_information_space_axioms_def by auto locale finite_space = @@ -21,8 +21,8 @@ and measure_M[simp]: "measure M s = real (card s) / real (card S)" by (simp_all add: M_def) -sublocale finite_space \ finite_prob_space M -proof (rule finite_prob_spaceI) +sublocale finite_space \ finite_information_space M 2 +proof (rule finite_information_spaceI) let ?measure = "\s::'a set. real (card s) / real (card S)" show "finite_measure_space M" @@ -40,9 +40,7 @@ by (cases "card S = 0") (simp_all add: field_simps) qed qed - - show "measure M (space M) = 1" by simp -qed +qed simp_all lemma set_of_list_extend: "{xs. length xs = Suc n \ (\x\set xs. x \ A)} = @@ -83,19 +81,6 @@ and card_list_length: "card {xs. length xs = n \ (\x\set xs. x \ A)} = (card A)^n" using card_finite_list_length[OF assms, of n] by auto -lemma product_not_empty: - "A \ {} \ B \ {} \ A \ B \ {}" - by auto - -lemma fst_product[simp]: "fst ` (A \ B) = (if B = {} then {} else A)" - by (auto intro!: image_eqI) - -lemma snd_product[simp]: "snd ` (A \ B) = (if A = {} then {} else B)" - by (auto intro!: image_eqI) - -lemma Ex_eq_length[simp]: "\xs. length xs = n" - by (rule exI[of _ "replicate n undefined"]) simp - section "Define the state space" text {* @@ -197,10 +182,10 @@ have *: "{xs. length xs = n} \ {}" by (auto intro!: exI[of _ "replicate n undefined"]) show ?thesis - unfolding payer_def_raw dc_crypto fst_product if_not_P[OF *] .. + unfolding payer_def_raw dc_crypto fst_image_times if_not_P[OF *] .. qed -lemma image_ex1_eq: "inj_on f A \ (b \ f ` A) = (\!x \ A. b = f x)" +lemma image_ex1_eq: "inj_on f A \ (b \ f ` A) \ (\!x \ A. b = f x)" by (unfold inj_on_def) blast lemma Ex1_eq: "\! x. P x \ P x \ P y \ x = y" @@ -495,26 +480,24 @@ show "finite dc_crypto" using finite_dc_crypto . show "dc_crypto \ {}" unfolding dc_crypto - apply (rule product_not_empty) using n_gt_3 by (auto intro: exI[of _ "replicate n True"]) qed notation (in dining_cryptographers_space) - finite_mutual_information_2 ("\'( _ ; _ ')") + finite_mutual_information ("\'( _ ; _ ')") notation (in dining_cryptographers_space) - finite_entropy_2 ("\'( _ ')") + finite_entropy ("\'( _ ')") notation (in dining_cryptographers_space) - finite_conditional_entropy_2 ("\'( _ | _ ')") + finite_conditional_entropy ("\'( _ | _ ')") theorem (in dining_cryptographers_space) "\( inversion ; payer ) = 0" proof - - have b: "1 < (2 :: real)" by simp have n: "0 < n" using n_gt_3 by auto - have lists: "{xs. length xs = n} \ {}" by auto + have lists: "{xs. length xs = n} \ {}" using Ex_list_of_length by auto have card_image_inversion: "real (card (inversion ` dc_crypto)) = 2^n / 2" @@ -526,7 +509,7 @@ { have "\(inversion|payer) = - (\x\inversion`dc_crypto. (\z\Some ` {0..x\inversion`dc_crypto. (\z\Some ` {0..(inversion|payer) = real n - 1" . } moreover { have "\(inversion) = - (\x \ inversion`dc_crypto. ?dI {x} * log 2 (?dI {x}))" - unfolding finite_entropy_reduce[OF b] by simp + unfolding entropy_eq by simp also have "... = - (\x \ inversion`dc_crypto. 2 * (1 - real n) / 2^n)" unfolding neg_equal_iff_equal proof (rule setsum_cong[OF refl]) @@ -577,7 +560,7 @@ finally have "\(inversion) = real n - 1" . } ultimately show ?thesis - unfolding finite_mutual_information_eq_entropy_conditional_entropy[OF b] + unfolding mutual_information_eq_entropy_conditional_entropy by simp qed diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Product_Type.thy --- a/src/HOL/Product_Type.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Product_Type.thy Tue May 04 20:30:22 2010 +0200 @@ -990,6 +990,15 @@ lemma Times_Diff_distrib1: "(A - B) <*> C = (A <*> C) - (B <*> C)" by blast +lemma Times_empty[simp]: "A \ B = {} \ A = {} \ B = {}" + by auto + +lemma fst_image_times[simp]: "fst ` (A \ B) = (if B = {} then {} else A)" + by (auto intro!: image_eqI) + +lemma snd_image_times[simp]: "snd ` (A \ B) = (if A = {} then {} else B)" + by (auto intro!: image_eqI) + lemma insert_times_insert[simp]: "insert a A \ insert b B = insert (a,b) (A \ insert b B \ insert a A \ B)" @@ -999,13 +1008,20 @@ by (auto, rule_tac p = "f x" in PairE, auto) lemma swap_inj_on: - "inj_on (%(i, j). (j, i)) (A \ B)" - by (unfold inj_on_def) fast + "inj_on (\(i, j). (j, i)) A" + by (auto intro!: inj_onI) lemma swap_product: "(%(i, j). (j, i)) ` (A \ B) = B \ A" by (simp add: split_def image_def) blast +lemma image_split_eq_Sigma: + "(\x. (f x, g x)) ` A = Sigma (f ` A) (\x. g ` (f -` {x} \ A))" +proof (safe intro!: imageI vimageI) + fix a b assume *: "a \ A" "b \ A" and eq: "f a = f b" + show "(f b, g a) \ (\x. (f x, g x)) ` A" + using * eq[symmetric] by auto +qed simp_all subsubsection {* Code generator setup *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/FSet.thy --- a/src/HOL/Quotient_Examples/FSet.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Quotient_Examples/FSet.thy Tue May 04 20:30:22 2010 +0200 @@ -1,11 +1,12 @@ -(* Title: Quotient.thy - Author: Cezary Kaliszyk - Author: Christian Urban +(* Title: HOL/Quotient_Examples/FSet.thy + Author: Cezary Kaliszyk, TU Munich + Author: Christian Urban, TU Munich - provides a reasoning infrastructure for the type of finite sets +A reasoning infrastructure for the type of finite sets. *) + theory FSet -imports Quotient Quotient_List List +imports Quotient_List begin text {* Definiton of List relation and the quotient type *} @@ -80,9 +81,9 @@ lemma compose_list_refl: shows "(list_rel op \ OOO op \) r r" proof - show c: "list_rel op \ r r" by (rule list_rel_refl) - have d: "r \ r" by (rule equivp_reflp[OF fset_equivp]) - show b: "(op \ OO list_rel op \) r r" by (rule pred_compI) (rule d, rule c) + have *: "r \ r" by (rule equivp_reflp[OF fset_equivp]) + show "list_rel op \ r r" by (rule list_rel_refl) + with * show "(op \ OO list_rel op \) r r" .. qed lemma Quotient_fset_list: @@ -117,7 +118,8 @@ show "(list_rel op \ OOO op \) s s" by (rule compose_list_refl) next assume a: "(list_rel op \ OOO op \) r s" - then have b: "map abs_fset r \ map abs_fset s" proof (elim pred_compE) + then have b: "map abs_fset r \ map abs_fset s" + proof (elim pred_compE) fix b ba assume c: "list_rel op \ r b" assume d: "b \ ba" @@ -221,20 +223,43 @@ assumes a: "xs \ ys" shows "fcard_raw xs = fcard_raw ys" using a - apply (induct xs arbitrary: ys) - apply (auto simp add: memb_def) - apply (subgoal_tac "\x. (x \ set xs) = (x \ set ys)") - apply (auto) - apply (drule_tac x="x" in spec) - apply (blast) - apply (drule_tac x="[x \ ys. x \ a]" in meta_spec) - apply (simp add: fcard_raw_delete_one memb_def) - apply (case_tac "a \ set ys") - apply (simp only: if_True) - apply (subgoal_tac "\x. (x \ set xs) = (x \ set ys \ x \ a)") - apply (drule Suc_pred'[OF fcard_raw_gt_0]) - apply (auto) - done + proof (induct xs arbitrary: ys) + case Nil + show ?case using Nil.prems by simp + next + case (Cons a xs) + have a: "a # xs \ ys" by fact + have b: "\ys. xs \ ys \ fcard_raw xs = fcard_raw ys" by fact + show ?case proof (cases "a \ set xs") + assume c: "a \ set xs" + have "\x. (x \ set xs) = (x \ set ys)" + proof (intro allI iffI) + fix x + assume "x \ set xs" + then show "x \ set ys" using a by auto + next + fix x + assume d: "x \ set ys" + have e: "(x \ set (a # xs)) = (x \ set ys)" using a by simp + show "x \ set xs" using c d e unfolding list_eq.simps by simp blast + qed + then show ?thesis using b c by (simp add: memb_def) + next + assume c: "a \ set xs" + have d: "xs \ [x\ys . x \ a] \ fcard_raw xs = fcard_raw [x\ys . x \ a]" using b by simp + have "Suc (fcard_raw xs) = fcard_raw ys" + proof (cases "a \ set ys") + assume e: "a \ set ys" + have f: "\x. (x \ set xs) = (x \ set ys \ x \ a)" using a c + by (auto simp add: fcard_raw_delete_one) + have "fcard_raw ys = Suc (fcard_raw ys - 1)" by (rule Suc_pred'[OF fcard_raw_gt_0]) (rule e) + then show ?thesis using d e f by (simp_all add: fcard_raw_delete_one memb_def) + next + case False then show ?thesis using a c d by auto + qed + then show ?thesis using a c d by (simp add: memb_def) + qed +qed lemma fcard_raw_rsp[quot_respect]: shows "(op \ ===> op =) fcard_raw fcard_raw" @@ -306,8 +331,8 @@ obtain xb where e: "xb \ set x" and f: "xa \ set xb" using d by auto have "\y. y \ set x' \ xb \ y" by (rule list_rel_find_element[OF e a]) then obtain ya where h: "ya \ set x'" and i: "xb \ ya" by auto - have j: "ya \ set y'" using b h by simp - have "\yb. yb \ set y \ ya \ yb" by (rule list_rel_find_element[OF j c]) + have "ya \ set y'" using b h by simp + then have "\yb. yb \ set y \ ya \ yb" using c by (rule list_rel_find_element) then show ?thesis using f i by auto qed @@ -334,6 +359,10 @@ then show "concat a \ concat b" by simp qed +lemma [quot_respect]: + "((op =) ===> op \ ===> op \) filter filter" + by auto + text {* Distributive lattice with bot *} lemma sub_list_not_eq: @@ -385,9 +414,10 @@ apply (induct x) apply (simp_all add: memb_def) apply (simp add: memb_def[symmetric] memb_finter_raw) - by (auto simp add: memb_def) + apply (auto simp add: memb_def) + done -instantiation fset :: (type) "{bot,distrib_lattice}" +instantiation fset :: (type) "{bounded_lattice_bot,distrib_lattice}" begin quotient_definition @@ -496,10 +526,10 @@ where "x |\| S \ \ (x |\| S)" -section {* Other constants on the Quotient Type *} +section {* Other constants on the Quotient Type *} quotient_definition - "fcard :: 'a fset \ nat" + "fcard :: 'a fset \ nat" is "fcard_raw" @@ -509,11 +539,11 @@ "map" quotient_definition - "fdelete :: 'a fset \ 'a \ 'a fset" + "fdelete :: 'a fset \ 'a \ 'a fset" is "delete_raw" quotient_definition - "fset_to_set :: 'a fset \ 'a set" + "fset_to_set :: 'a fset \ 'a set" is "set" quotient_definition @@ -525,6 +555,11 @@ is "concat" +quotient_definition + "ffilter :: ('a \ bool) \ 'a fset \ 'a fset" +is + "filter" + text {* Compositional Respectfullness and Preservation *} lemma [quot_respect]: "(list_rel op \ OOO op \) [] []" @@ -701,23 +736,37 @@ by auto lemma fset_raw_strong_cases: - "(xs = []) \ (\x ys. ((\ memb x ys) \ (xs \ x # ys)))" - apply (induct xs) - apply (simp) - apply (rule disjI2) - apply (erule disjE) - apply (rule_tac x="a" in exI) - apply (rule_tac x="[]" in exI) - apply (simp add: memb_def) - apply (erule exE)+ - apply (case_tac "x = a") - apply (rule_tac x="a" in exI) - apply (rule_tac x="ys" in exI) - apply (simp) - apply (rule_tac x="x" in exI) - apply (rule_tac x="a # ys" in exI) - apply (auto simp add: memb_def) - done + obtains "xs = []" + | x ys where "\ memb x ys" and "xs \ x # ys" +proof (induct xs arbitrary: x ys) + case Nil + then show thesis by simp +next + case (Cons a xs) + have a: "\xs = [] \ thesis; \x ys. \\ memb x ys; xs \ x # ys\ \ thesis\ \ thesis" by fact + have b: "\x' ys'. \\ memb x' ys'; a # xs \ x' # ys'\ \ thesis" by fact + have c: "xs = [] \ thesis" by (metis no_memb_nil singleton_list_eq b) + have "\x ys. \\ memb x ys; xs \ x # ys\ \ thesis" + proof - + fix x :: 'a + fix ys :: "'a list" + assume d:"\ memb x ys" + assume e:"xs \ x # ys" + show thesis + proof (cases "x = a") + assume h: "x = a" + then have f: "\ memb a ys" using d by simp + have g: "a # xs \ a # ys" using e h by auto + show thesis using b f g by simp + next + assume h: "x \ a" + then have f: "\ memb x (a # ys)" using d unfolding memb_def by auto + have g: "a # xs \ x # (a # ys)" using e h by auto + show thesis using b f g by simp + qed + qed + then show thesis using a c by blast +qed section {* deletion *} @@ -741,8 +790,8 @@ "finter_raw l [] = []" by (induct l) (simp_all add: not_memb_nil) -lemma set_cong: - shows "(set x = set y) = (x \ y)" +lemma set_cong: + shows "(x \ y) = (set x = set y)" by auto lemma inj_map_eq_iff: @@ -812,13 +861,13 @@ case (Suc m) have b: "l \ r" by fact have d: "fcard_raw l = Suc m" by fact - have "\a. memb a l" by (rule fcard_raw_suc_memb[OF d]) + then have "\a. memb a l" by (rule fcard_raw_suc_memb) then obtain a where e: "memb a l" by auto then have e': "memb a r" using list_eq.simps[simplified memb_def[symmetric], of l r] b by auto have f: "fcard_raw (delete_raw l a) = m" using fcard_raw_delete[of l a] e d by simp have g: "delete_raw l a \ delete_raw r a" using delete_raw_rsp[OF b] by simp - have g': "list_eq2 (delete_raw l a) (delete_raw r a)" by (rule Suc.hyps[OF f g]) - have h: "list_eq2 (a # delete_raw l a) (a # delete_raw r a)" by (rule list_eq2.intros(5)[OF g']) + have "list_eq2 (delete_raw l a) (delete_raw r a)" by (rule Suc.hyps[OF f g]) + then have h: "list_eq2 (a # delete_raw l a) (a # delete_raw r a)" by (rule list_eq2.intros(5)) have i: "list_eq2 l (a # delete_raw l a)" by (rule list_eq2.intros(3)[OF memb_delete_list_eq2[OF e]]) have "list_eq2 l (a # delete_raw r a)" by (rule list_eq2.intros(6)[OF i h]) @@ -828,6 +877,38 @@ then show "l \ r \ list_eq2 l r" by blast qed +text {* Set *} + +lemma sub_list_set: "sub_list xs ys = (set xs \ set ys)" + by (metis rev_append set_append set_cong set_rev sub_list_append sub_list_append_left sub_list_def sub_list_not_eq subset_Un_eq) + +lemma sub_list_neq_set: "(sub_list xs ys \ \ list_eq xs ys) = (set xs \ set ys)" + by (auto simp add: sub_list_set) + +lemma fcard_raw_set: "fcard_raw xs = card (set xs)" + by (induct xs) (auto simp add: insert_absorb memb_def card_insert_disjoint finite_set) + +lemma memb_set: "memb x xs = (x \ set xs)" + by (simp only: memb_def) + +lemma filter_set: "set (filter P xs) = P \ (set xs)" + by (induct xs, simp) + (metis Int_insert_right_if0 Int_insert_right_if1 List.set.simps(2) filter.simps(2) mem_def) + +lemma delete_raw_set: "set (delete_raw xs x) = set xs - {x}" + by (induct xs) auto + +lemma inter_raw_set: "set (finter_raw xs ys) = set xs \ set ys" + by (induct xs) (simp_all add: memb_def) + +text {* Raw theorems of ffilter *} + +lemma sub_list_filter: "sub_list (filter P xs) (filter Q xs) = (\ x. memb x xs \ P x \ Q x)" +unfolding sub_list_def memb_def by auto + +lemma list_eq_filter: "list_eq (filter P xs) (filter Q xs) = (\x. memb x xs \ P x = Q x)" +unfolding memb_def by auto + text {* Lifted theorems *} lemma not_fin_fnil: "x |\| {||}" @@ -879,7 +960,7 @@ by (lifting none_memb_nil) lemma fset_cong: - "(fset_to_set S = fset_to_set T) = (S = T)" + "(S = T) = (fset_to_set S = fset_to_set T)" by (lifting set_cong) text {* fcard *} @@ -899,11 +980,11 @@ shows "(fcard S = 1) = (\x. S = {|x|})" by (lifting fcard_raw_1) -lemma fcard_gt_0: +lemma fcard_gt_0: shows "x \ fset_to_set S \ 0 < fcard S" by (lifting fcard_raw_gt_0) -lemma fcard_not_fin: +lemma fcard_not_fin: shows "(x |\| S) = (fcard (finsert x S) = Suc (fcard S))" by (lifting fcard_raw_not_memb) @@ -922,14 +1003,13 @@ text {* funion *} -lemma funion_simps[simp]: - shows "{||} |\| S = S" - and "finsert x S |\| T = finsert x (S |\| T)" - by (lifting append.simps) +lemmas [simp] = + sup_bot_left[where 'a="'a fset", standard] + sup_bot_right[where 'a="'a fset", standard] -lemma funion_empty[simp]: - shows "S |\| {||} = S" - by (lifting append_Nil2) +lemma funion_finsert[simp]: + shows "finsert x S |\| T = finsert x (S |\| T)" + by (lifting append.simps(2)) lemma singleton_union_left: "{|a|} |\| S = finsert a S" @@ -942,7 +1022,8 @@ section {* Induction and Cases rules for finite sets *} lemma fset_strong_cases: - "S = {||} \ (\x T. x |\| T \ S = finsert x T)" + obtains "xs = {||}" + | x ys where "x |\| ys" and "xs = finsert x ys" by (lifting fset_raw_strong_cases) lemma fset_exhaust[case_names fempty finsert, cases type: fset]: @@ -954,7 +1035,7 @@ by (lifting list.induct) lemma fset_induct[case_names fempty finsert, induct type: fset]: - assumes prem1: "P {||}" + assumes prem1: "P {||}" and prem2: "\x S. \x |\| S; P S\ \ P (finsert x S)" shows "P S" proof(induct S rule: fset_induct_weak) @@ -989,7 +1070,7 @@ lemma fmap_set_image: "fset_to_set (fmap f S) = f ` (fset_to_set S)" - by (induct S) (simp_all) + by (induct S) simp_all lemma inj_fmap_eq_iff: "inj f \ (fmap f S = fmap f T) = (S = T)" @@ -1002,6 +1083,40 @@ "x |\| S |\| T \ x |\| S \ x |\| T" by (lifting memb_append) +text {* to_set *} + +lemma fin_set: "(x |\| xs) = (x \ fset_to_set xs)" + by (lifting memb_set) + +lemma fnotin_set: "(x |\| xs) = (x \ fset_to_set xs)" + by (simp add: fin_set) + +lemma fcard_set: "fcard xs = card (fset_to_set xs)" + by (lifting fcard_raw_set) + +lemma fsubseteq_set: "(xs |\| ys) = (fset_to_set xs \ fset_to_set ys)" + by (lifting sub_list_set) + +lemma fsubset_set: "(xs |\| ys) = (fset_to_set xs \ fset_to_set ys)" + unfolding less_fset by (lifting sub_list_neq_set) + +lemma ffilter_set: "fset_to_set (ffilter P xs) = P \ fset_to_set xs" + by (lifting filter_set) + +lemma fdelete_set: "fset_to_set (fdelete xs x) = fset_to_set xs - {x}" + by (lifting delete_raw_set) + +lemma inter_set: "fset_to_set (xs |\| ys) = fset_to_set xs \ fset_to_set ys" + by (lifting inter_raw_set) + +lemma union_set: "fset_to_set (xs |\| ys) = fset_to_set xs \ fset_to_set ys" + by (lifting set_append) + +lemmas fset_to_set_trans = + fin_set fnotin_set fcard_set fsubseteq_set fsubset_set + inter_set union_set ffilter_set fset_to_set_simps + fset_cong fdelete_set fmap_set_image + text {* ffold *} lemma ffold_nil: "ffold f z {||} = z" @@ -1017,15 +1132,15 @@ text {* fdelete *} -lemma fin_fdelete: +lemma fin_fdelete: shows "x |\| fdelete S y \ x |\| S \ x \ y" by (lifting memb_delete_raw) -lemma fin_fdelete_ident: +lemma fin_fdelete_ident: shows "x |\| fdelete S x" by (lifting memb_delete_raw_ident) -lemma not_memb_fdelete_ident: +lemma not_memb_fdelete_ident: shows "x |\| S \ fdelete S x = S" by (lifting not_memb_delete_raw_ident) @@ -1102,8 +1217,77 @@ lemma "fconcat (xs |\| ys) = fconcat xs |\| fconcat ys" by (lifting concat_append) +text {* ffilter *} + +lemma subseteq_filter: "ffilter P xs <= ffilter Q xs = (\ x. x |\| xs \ P x \ Q x)" +by (lifting sub_list_filter) + +lemma eq_ffilter: "(ffilter P xs = ffilter Q xs) = (\x. x |\| xs \ P x = Q x)" +by (lifting list_eq_filter) + +lemma subset_ffilter: "(\x. x |\| xs \ P x \ Q x) \ (x |\| xs & \ P x & Q x) \ ffilter P xs < ffilter Q xs" +unfolding less_fset by (auto simp add: subseteq_filter eq_ffilter) + +section {* lemmas transferred from Finite_Set theory *} + +text {* finiteness for finite sets holds *} +lemma finite_fset: "finite (fset_to_set S)" + by (induct S) auto + +lemma fset_choice: "\x. x |\| A \ (\y. P x y) \ \f. \x. x |\| A \ P x (f x)" + unfolding fset_to_set_trans + by (rule finite_set_choice[simplified Ball_def, OF finite_fset]) + +lemma fsubseteq_fnil: "xs |\| {||} = (xs = {||})" + unfolding fset_to_set_trans + by (rule subset_empty) + +lemma not_fsubset_fnil: "\ xs |\| {||}" + unfolding fset_to_set_trans + by (rule not_psubset_empty) + +lemma fcard_mono: "xs |\| ys \ fcard xs \ fcard ys" + unfolding fset_to_set_trans + by (rule card_mono[OF finite_fset]) + +lemma fcard_fseteq: "xs |\| ys \ fcard ys \ fcard xs \ xs = ys" + unfolding fset_to_set_trans + by (rule card_seteq[OF finite_fset]) + +lemma psubset_fcard_mono: "xs |\| ys \ fcard xs < fcard ys" + unfolding fset_to_set_trans + by (rule psubset_card_mono[OF finite_fset]) + +lemma fcard_funion_finter: "fcard xs + fcard ys = fcard (xs |\| ys) + fcard (xs |\| ys)" + unfolding fset_to_set_trans + by (rule card_Un_Int[OF finite_fset finite_fset]) + +lemma fcard_funion_disjoint: "xs |\| ys = {||} \ fcard (xs |\| ys) = fcard xs + fcard ys" + unfolding fset_to_set_trans + by (rule card_Un_disjoint[OF finite_fset finite_fset]) + +lemma fcard_delete1_less: "x |\| xs \ fcard (fdelete xs x) < fcard xs" + unfolding fset_to_set_trans + by (rule card_Diff1_less[OF finite_fset]) + +lemma fcard_delete2_less: "x |\| xs \ y |\| xs \ fcard (fdelete (fdelete xs x) y) < fcard xs" + unfolding fset_to_set_trans + by (rule card_Diff2_less[OF finite_fset]) + +lemma fcard_delete1_le: "fcard (fdelete xs x) <= fcard xs" + unfolding fset_to_set_trans + by (rule card_Diff1_le[OF finite_fset]) + +lemma fcard_psubset: "ys |\| xs \ fcard ys < fcard xs \ ys |\| xs" + unfolding fset_to_set_trans + by (rule card_psubset[OF finite_fset]) + +lemma fcard_fmap_le: "fcard (fmap f xs) \ fcard xs" + unfolding fset_to_set_trans + by (rule card_image_le[OF finite_fset]) + ML {* -fun dest_fsetT (Type ("FSet.fset", [T])) = T +fun dest_fsetT (Type (@{type_name fset}, [T])) = T | dest_fsetT T = raise TYPE ("dest_fsetT: fset type expected", [T], []); *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/LarryDatatype.thy --- a/src/HOL/Quotient_Examples/LarryDatatype.thy Tue May 04 19:57:55 2010 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,394 +0,0 @@ -theory LarryDatatype -imports Main Quotient_Syntax -begin - -subsection{*Defining the Free Algebra*} - -datatype - freemsg = NONCE nat - | MPAIR freemsg freemsg - | CRYPT nat freemsg - | DECRYPT nat freemsg - -inductive - msgrel::"freemsg \ freemsg \ bool" (infixl "\" 50) -where - CD: "CRYPT K (DECRYPT K X) \ X" -| DC: "DECRYPT K (CRYPT K X) \ X" -| NONCE: "NONCE N \ NONCE N" -| MPAIR: "\X \ X'; Y \ Y'\ \ MPAIR X Y \ MPAIR X' Y'" -| CRYPT: "X \ X' \ CRYPT K X \ CRYPT K X'" -| DECRYPT: "X \ X' \ DECRYPT K X \ DECRYPT K X'" -| SYM: "X \ Y \ Y \ X" -| TRANS: "\X \ Y; Y \ Z\ \ X \ Z" - -lemmas msgrel.intros[intro] - -text{*Proving that it is an equivalence relation*} - -lemma msgrel_refl: "X \ X" -by (induct X, (blast intro: msgrel.intros)+) - -theorem equiv_msgrel: "equivp msgrel" -proof (rule equivpI) - show "reflp msgrel" by (simp add: reflp_def msgrel_refl) - show "symp msgrel" by (simp add: symp_def, blast intro: msgrel.SYM) - show "transp msgrel" by (simp add: transp_def, blast intro: msgrel.TRANS) -qed - -subsection{*Some Functions on the Free Algebra*} - -subsubsection{*The Set of Nonces*} - -fun - freenonces :: "freemsg \ nat set" -where - "freenonces (NONCE N) = {N}" -| "freenonces (MPAIR X Y) = freenonces X \ freenonces Y" -| "freenonces (CRYPT K X) = freenonces X" -| "freenonces (DECRYPT K X) = freenonces X" - -theorem msgrel_imp_eq_freenonces: - assumes a: "U \ V" - shows "freenonces U = freenonces V" - using a by (induct) (auto) - -subsubsection{*The Left Projection*} - -text{*A function to return the left part of the top pair in a message. It will -be lifted to the initial algrebra, to serve as an example of that process.*} -fun - freeleft :: "freemsg \ freemsg" -where - "freeleft (NONCE N) = NONCE N" -| "freeleft (MPAIR X Y) = X" -| "freeleft (CRYPT K X) = freeleft X" -| "freeleft (DECRYPT K X) = freeleft X" - -text{*This theorem lets us prove that the left function respects the -equivalence relation. It also helps us prove that MPair - (the abstract constructor) is injective*} -lemma msgrel_imp_eqv_freeleft_aux: - shows "freeleft U \ freeleft U" - by (induct rule: freeleft.induct) (auto) - -theorem msgrel_imp_eqv_freeleft: - assumes a: "U \ V" - shows "freeleft U \ freeleft V" - using a - by (induct) (auto intro: msgrel_imp_eqv_freeleft_aux) - -subsubsection{*The Right Projection*} - -text{*A function to return the right part of the top pair in a message.*} -fun - freeright :: "freemsg \ freemsg" -where - "freeright (NONCE N) = NONCE N" -| "freeright (MPAIR X Y) = Y" -| "freeright (CRYPT K X) = freeright X" -| "freeright (DECRYPT K X) = freeright X" - -text{*This theorem lets us prove that the right function respects the -equivalence relation. It also helps us prove that MPair - (the abstract constructor) is injective*} -lemma msgrel_imp_eqv_freeright_aux: - shows "freeright U \ freeright U" - by (induct rule: freeright.induct) (auto) - -theorem msgrel_imp_eqv_freeright: - assumes a: "U \ V" - shows "freeright U \ freeright V" - using a - by (induct) (auto intro: msgrel_imp_eqv_freeright_aux) - -subsubsection{*The Discriminator for Constructors*} - -text{*A function to distinguish nonces, mpairs and encryptions*} -fun - freediscrim :: "freemsg \ int" -where - "freediscrim (NONCE N) = 0" - | "freediscrim (MPAIR X Y) = 1" - | "freediscrim (CRYPT K X) = freediscrim X + 2" - | "freediscrim (DECRYPT K X) = freediscrim X - 2" - -text{*This theorem helps us prove @{term "Nonce N \ MPair X Y"}*} -theorem msgrel_imp_eq_freediscrim: - assumes a: "U \ V" - shows "freediscrim U = freediscrim V" - using a by (induct) (auto) - -subsection{*The Initial Algebra: A Quotiented Message Type*} - -quotient_type msg = freemsg / msgrel - by (rule equiv_msgrel) - -text{*The abstract message constructors*} - -quotient_definition - "Nonce :: nat \ msg" -is - "NONCE" - -quotient_definition - "MPair :: msg \ msg \ msg" -is - "MPAIR" - -quotient_definition - "Crypt :: nat \ msg \ msg" -is - "CRYPT" - -quotient_definition - "Decrypt :: nat \ msg \ msg" -is - "DECRYPT" - -lemma [quot_respect]: - shows "(op = ===> op \ ===> op \) CRYPT CRYPT" -by (auto intro: CRYPT) - -lemma [quot_respect]: - shows "(op = ===> op \ ===> op \) DECRYPT DECRYPT" -by (auto intro: DECRYPT) - -text{*Establishing these two equations is the point of the whole exercise*} -theorem CD_eq [simp]: - shows "Crypt K (Decrypt K X) = X" - by (lifting CD) - -theorem DC_eq [simp]: - shows "Decrypt K (Crypt K X) = X" - by (lifting DC) - -subsection{*The Abstract Function to Return the Set of Nonces*} - -quotient_definition - "nonces:: msg \ nat set" -is - "freenonces" - -text{*Now prove the four equations for @{term nonces}*} - -lemma [quot_respect]: - shows "(op \ ===> op =) freenonces freenonces" - by (simp add: msgrel_imp_eq_freenonces) - -lemma [quot_respect]: - shows "(op = ===> op \) NONCE NONCE" - by (simp add: NONCE) - -lemma nonces_Nonce [simp]: - shows "nonces (Nonce N) = {N}" - by (lifting freenonces.simps(1)) - -lemma [quot_respect]: - shows " (op \ ===> op \ ===> op \) MPAIR MPAIR" - by (simp add: MPAIR) - -lemma nonces_MPair [simp]: - shows "nonces (MPair X Y) = nonces X \ nonces Y" - by (lifting freenonces.simps(2)) - -lemma nonces_Crypt [simp]: - shows "nonces (Crypt K X) = nonces X" - by (lifting freenonces.simps(3)) - -lemma nonces_Decrypt [simp]: - shows "nonces (Decrypt K X) = nonces X" - by (lifting freenonces.simps(4)) - -subsection{*The Abstract Function to Return the Left Part*} - -quotient_definition - "left:: msg \ msg" -is - "freeleft" - -lemma [quot_respect]: - shows "(op \ ===> op \) freeleft freeleft" - by (simp add: msgrel_imp_eqv_freeleft) - -lemma left_Nonce [simp]: - shows "left (Nonce N) = Nonce N" - by (lifting freeleft.simps(1)) - -lemma left_MPair [simp]: - shows "left (MPair X Y) = X" - by (lifting freeleft.simps(2)) - -lemma left_Crypt [simp]: - shows "left (Crypt K X) = left X" - by (lifting freeleft.simps(3)) - -lemma left_Decrypt [simp]: - shows "left (Decrypt K X) = left X" - by (lifting freeleft.simps(4)) - -subsection{*The Abstract Function to Return the Right Part*} - -quotient_definition - "right:: msg \ msg" -is - "freeright" - -text{*Now prove the four equations for @{term right}*} - -lemma [quot_respect]: - shows "(op \ ===> op \) freeright freeright" - by (simp add: msgrel_imp_eqv_freeright) - -lemma right_Nonce [simp]: - shows "right (Nonce N) = Nonce N" - by (lifting freeright.simps(1)) - -lemma right_MPair [simp]: - shows "right (MPair X Y) = Y" - by (lifting freeright.simps(2)) - -lemma right_Crypt [simp]: - shows "right (Crypt K X) = right X" - by (lifting freeright.simps(3)) - -lemma right_Decrypt [simp]: - shows "right (Decrypt K X) = right X" - by (lifting freeright.simps(4)) - -subsection{*Injectivity Properties of Some Constructors*} - -lemma NONCE_imp_eq: - shows "NONCE m \ NONCE n \ m = n" - by (drule msgrel_imp_eq_freenonces, simp) - -text{*Can also be proved using the function @{term nonces}*} -lemma Nonce_Nonce_eq [iff]: - shows "(Nonce m = Nonce n) = (m = n)" -proof - assume "Nonce m = Nonce n" - then show "m = n" by (lifting NONCE_imp_eq) -next - assume "m = n" - then show "Nonce m = Nonce n" by simp -qed - -lemma MPAIR_imp_eqv_left: - shows "MPAIR X Y \ MPAIR X' Y' \ X \ X'" - by (drule msgrel_imp_eqv_freeleft) (simp) - -lemma MPair_imp_eq_left: - assumes eq: "MPair X Y = MPair X' Y'" - shows "X = X'" - using eq by (lifting MPAIR_imp_eqv_left) - -lemma MPAIR_imp_eqv_right: - shows "MPAIR X Y \ MPAIR X' Y' \ Y \ Y'" - by (drule msgrel_imp_eqv_freeright) (simp) - -lemma MPair_imp_eq_right: - shows "MPair X Y = MPair X' Y' \ Y = Y'" - by (lifting MPAIR_imp_eqv_right) - -theorem MPair_MPair_eq [iff]: - shows "(MPair X Y = MPair X' Y') = (X=X' & Y=Y')" - by (blast dest: MPair_imp_eq_left MPair_imp_eq_right) - -lemma NONCE_neqv_MPAIR: - shows "\(NONCE m \ MPAIR X Y)" - by (auto dest: msgrel_imp_eq_freediscrim) - -theorem Nonce_neq_MPair [iff]: - shows "Nonce N \ MPair X Y" - by (lifting NONCE_neqv_MPAIR) - -text{*Example suggested by a referee*} - -lemma CRYPT_NONCE_neq_NONCE: - shows "\(CRYPT K (NONCE M) \ NONCE N)" - by (auto dest: msgrel_imp_eq_freediscrim) - -theorem Crypt_Nonce_neq_Nonce: - shows "Crypt K (Nonce M) \ Nonce N" - by (lifting CRYPT_NONCE_neq_NONCE) - -text{*...and many similar results*} -lemma CRYPT2_NONCE_neq_NONCE: - shows "\(CRYPT K (CRYPT K' (NONCE M)) \ NONCE N)" - by (auto dest: msgrel_imp_eq_freediscrim) - -theorem Crypt2_Nonce_neq_Nonce: - shows "Crypt K (Crypt K' (Nonce M)) \ Nonce N" - by (lifting CRYPT2_NONCE_neq_NONCE) - -theorem Crypt_Crypt_eq [iff]: - shows "(Crypt K X = Crypt K X') = (X=X')" -proof - assume "Crypt K X = Crypt K X'" - hence "Decrypt K (Crypt K X) = Decrypt K (Crypt K X')" by simp - thus "X = X'" by simp -next - assume "X = X'" - thus "Crypt K X = Crypt K X'" by simp -qed - -theorem Decrypt_Decrypt_eq [iff]: - shows "(Decrypt K X = Decrypt K X') = (X=X')" -proof - assume "Decrypt K X = Decrypt K X'" - hence "Crypt K (Decrypt K X) = Crypt K (Decrypt K X')" by simp - thus "X = X'" by simp -next - assume "X = X'" - thus "Decrypt K X = Decrypt K X'" by simp -qed - -lemma msg_induct_aux: - shows "\\N. P (Nonce N); - \X Y. \P X; P Y\ \ P (MPair X Y); - \K X. P X \ P (Crypt K X); - \K X. P X \ P (Decrypt K X)\ \ P msg" - by (lifting freemsg.induct) - -lemma msg_induct [case_names Nonce MPair Crypt Decrypt, cases type: msg]: - assumes N: "\N. P (Nonce N)" - and M: "\X Y. \P X; P Y\ \ P (MPair X Y)" - and C: "\K X. P X \ P (Crypt K X)" - and D: "\K X. P X \ P (Decrypt K X)" - shows "P msg" - using N M C D by (rule msg_induct_aux) - -subsection{*The Abstract Discriminator*} - -text{*However, as @{text Crypt_Nonce_neq_Nonce} above illustrates, we don't -need this function in order to prove discrimination theorems.*} - -quotient_definition - "discrim:: msg \ int" -is - "freediscrim" - -text{*Now prove the four equations for @{term discrim}*} - -lemma [quot_respect]: - shows "(op \ ===> op =) freediscrim freediscrim" - by (auto simp add: msgrel_imp_eq_freediscrim) - -lemma discrim_Nonce [simp]: - shows "discrim (Nonce N) = 0" - by (lifting freediscrim.simps(1)) - -lemma discrim_MPair [simp]: - shows "discrim (MPair X Y) = 1" - by (lifting freediscrim.simps(2)) - -lemma discrim_Crypt [simp]: - shows "discrim (Crypt K X) = discrim X + 2" - by (lifting freediscrim.simps(3)) - -lemma discrim_Decrypt [simp]: - shows "discrim (Decrypt K X) = discrim X - 2" - by (lifting freediscrim.simps(4)) - -end - diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/LarryInt.thy --- a/src/HOL/Quotient_Examples/LarryInt.thy Tue May 04 19:57:55 2010 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,395 +0,0 @@ - -header{*The Integers as Equivalence Classes over Pairs of Natural Numbers*} - -theory LarryInt -imports Main Quotient_Product -begin - -fun - intrel :: "(nat \ nat) \ (nat \ nat) \ bool" -where - "intrel (x, y) (u, v) = (x + v = u + y)" - -quotient_type int = "nat \ nat" / intrel - by (auto simp add: equivp_def expand_fun_eq) - -instantiation int :: "{zero, one, plus, uminus, minus, times, ord}" -begin - -quotient_definition - Zero_int_def: "0::int" is "(0::nat, 0::nat)" - -quotient_definition - One_int_def: "1::int" is "(1::nat, 0::nat)" - -definition - "add_raw \ \(x, y) (u, v). (x + (u::nat), y + (v::nat))" - -quotient_definition - "(op +) :: int \ int \ int" -is - "add_raw" - -definition - "uminus_raw \ \(x::nat, y::nat). (y, x)" - -quotient_definition - "uminus :: int \ int" -is - "uminus_raw" - -fun - mult_raw::"nat \ nat \ nat \ nat \ nat \ nat" -where - "mult_raw (x, y) (u, v) = (x*u + y*v, x*v + y*u)" - -quotient_definition - "(op *) :: int \ int \ int" -is - "mult_raw" - -definition - "le_raw \ \(x, y) (u, v). (x+v \ u+(y::nat))" - -quotient_definition - le_int_def: "(op \) :: int \ int \ bool" -is - "le_raw" - -definition - less_int_def: "z < (w::int) \ (z \ w & z \ w)" - -definition - diff_int_def: "z - (w::int) \ z + (-w)" - -instance .. - -end - -subsection{*Construction of the Integers*} - -lemma zminus_zminus_raw: - "uminus_raw (uminus_raw z) = z" - by (cases z) (simp add: uminus_raw_def) - -lemma [quot_respect]: - shows "(intrel ===> intrel) uminus_raw uminus_raw" - by (simp add: uminus_raw_def) - -lemma zminus_zminus: - fixes z::"int" - shows "- (- z) = z" - by(lifting zminus_zminus_raw) - -lemma zminus_0_raw: - shows "uminus_raw (0, 0) = (0, 0::nat)" - by (simp add: uminus_raw_def) - -lemma zminus_0: - shows "- 0 = (0::int)" - by (lifting zminus_0_raw) - -subsection{*Integer Addition*} - -lemma zminus_zadd_distrib_raw: - shows "uminus_raw (add_raw z w) = add_raw (uminus_raw z) (uminus_raw w)" -by (cases z, cases w) - (auto simp add: add_raw_def uminus_raw_def) - -lemma [quot_respect]: - shows "(intrel ===> intrel ===> intrel) add_raw add_raw" -by (simp add: add_raw_def) - -lemma zminus_zadd_distrib: - fixes z w::"int" - shows "- (z + w) = (- z) + (- w)" - by(lifting zminus_zadd_distrib_raw) - -lemma zadd_commute_raw: - shows "add_raw z w = add_raw w z" -by (cases z, cases w) - (simp add: add_raw_def) - -lemma zadd_commute: - fixes z w::"int" - shows "(z::int) + w = w + z" - by (lifting zadd_commute_raw) - -lemma zadd_assoc_raw: - shows "add_raw (add_raw z1 z2) z3 = add_raw z1 (add_raw z2 z3)" - by (cases z1, cases z2, cases z3) (simp add: add_raw_def) - -lemma zadd_assoc: - fixes z1 z2 z3::"int" - shows "(z1 + z2) + z3 = z1 + (z2 + z3)" - by (lifting zadd_assoc_raw) - -lemma zadd_0_raw: - shows "add_raw (0, 0) z = z" - by (simp add: add_raw_def) - - -text {*also for the instance declaration int :: plus_ac0*} -lemma zadd_0: - fixes z::"int" - shows "0 + z = z" - by (lifting zadd_0_raw) - -lemma zadd_zminus_inverse_raw: - shows "intrel (add_raw (uminus_raw z) z) (0, 0)" - by (cases z) (simp add: add_raw_def uminus_raw_def) - -lemma zadd_zminus_inverse2: - fixes z::"int" - shows "(- z) + z = 0" - by (lifting zadd_zminus_inverse_raw) - -subsection{*Integer Multiplication*} - -lemma zmult_zminus_raw: - shows "mult_raw (uminus_raw z) w = uminus_raw (mult_raw z w)" -apply(cases z, cases w) -apply(simp add: uminus_raw_def) -done - -lemma mult_raw_fst: - assumes a: "intrel x z" - shows "intrel (mult_raw x y) (mult_raw z y)" -using a -apply(cases x, cases y, cases z) -apply(auto simp add: mult_raw.simps intrel.simps) -apply(rename_tac u v w x y z) -apply(subgoal_tac "u*w + z*w = y*w + v*w & u*x + z*x = y*x + v*x") -apply(simp add: mult_ac) -apply(simp add: add_mult_distrib [symmetric]) -done - -lemma mult_raw_snd: - assumes a: "intrel x z" - shows "intrel (mult_raw y x) (mult_raw y z)" -using a -apply(cases x, cases y, cases z) -apply(auto simp add: mult_raw.simps intrel.simps) -apply(rename_tac u v w x y z) -apply(subgoal_tac "u*w + z*w = y*w + v*w & u*x + z*x = y*x + v*x") -apply(simp add: mult_ac) -apply(simp add: add_mult_distrib [symmetric]) -done - -lemma [quot_respect]: - shows "(intrel ===> intrel ===> intrel) mult_raw mult_raw" -apply(simp only: fun_rel_def) -apply(rule allI | rule impI)+ -apply(rule equivp_transp[OF int_equivp]) -apply(rule mult_raw_fst) -apply(assumption) -apply(rule mult_raw_snd) -apply(assumption) -done - -lemma zmult_zminus: - fixes z w::"int" - shows "(- z) * w = - (z * w)" - by (lifting zmult_zminus_raw) - -lemma zmult_commute_raw: - shows "mult_raw z w = mult_raw w z" -apply(cases z, cases w) -apply(simp add: add_ac mult_ac) -done - -lemma zmult_commute: - fixes z w::"int" - shows "z * w = w * z" - by (lifting zmult_commute_raw) - -lemma zmult_assoc_raw: - shows "mult_raw (mult_raw z1 z2) z3 = mult_raw z1 (mult_raw z2 z3)" -apply(cases z1, cases z2, cases z3) -apply(simp add: add_mult_distrib2 mult_ac) -done - -lemma zmult_assoc: - fixes z1 z2 z3::"int" - shows "(z1 * z2) * z3 = z1 * (z2 * z3)" - by (lifting zmult_assoc_raw) - -lemma zadd_mult_distrib_raw: - shows "mult_raw (add_raw z1 z2) w = add_raw (mult_raw z1 w) (mult_raw z2 w)" -apply(cases z1, cases z2, cases w) -apply(simp add: add_mult_distrib2 mult_ac add_raw_def) -done - -lemma zadd_zmult_distrib: - fixes z1 z2 w::"int" - shows "(z1 + z2) * w = (z1 * w) + (z2 * w)" - by(lifting zadd_mult_distrib_raw) - -lemma zadd_zmult_distrib2: - fixes w z1 z2::"int" - shows "w * (z1 + z2) = (w * z1) + (w * z2)" - by (simp add: zmult_commute [of w] zadd_zmult_distrib) - -lemma zdiff_zmult_distrib: - fixes w z1 z2::"int" - shows "(z1 - z2) * w = (z1 * w) - (z2 * w)" - by (simp add: diff_int_def zadd_zmult_distrib zmult_zminus) - -lemma zdiff_zmult_distrib2: - fixes w z1 z2::"int" - shows "w * (z1 - z2) = (w * z1) - (w * z2)" - by (simp add: zmult_commute [of w] zdiff_zmult_distrib) - -lemmas int_distrib = - zadd_zmult_distrib zadd_zmult_distrib2 - zdiff_zmult_distrib zdiff_zmult_distrib2 - -lemma zmult_1_raw: - shows "mult_raw (1, 0) z = z" - by (cases z) (auto) - -lemma zmult_1: - fixes z::"int" - shows "1 * z = z" - by (lifting zmult_1_raw) - -lemma zmult_1_right: - fixes z::"int" - shows "z * 1 = z" - by (rule trans [OF zmult_commute zmult_1]) - -lemma zero_not_one: - shows "\(intrel (0, 0) (1::nat, 0::nat))" - by auto - -text{*The Integers Form A Ring*} -instance int :: comm_ring_1 -proof - fix i j k :: int - show "(i + j) + k = i + (j + k)" by (simp add: zadd_assoc) - show "i + j = j + i" by (simp add: zadd_commute) - show "0 + i = i" by (rule zadd_0) - show "- i + i = 0" by (rule zadd_zminus_inverse2) - show "i - j = i + (-j)" by (simp add: diff_int_def) - show "(i * j) * k = i * (j * k)" by (rule zmult_assoc) - show "i * j = j * i" by (rule zmult_commute) - show "1 * i = i" by (rule zmult_1) - show "(i + j) * k = i * k + j * k" by (simp add: int_distrib) - show "0 \ (1::int)" by (lifting zero_not_one) -qed - - -subsection{*The @{text "\"} Ordering*} - -lemma zle_refl_raw: - shows "le_raw w w" - by (cases w) (simp add: le_raw_def) - -lemma [quot_respect]: - shows "(intrel ===> intrel ===> op =) le_raw le_raw" - by (auto) (simp_all add: le_raw_def) - -lemma zle_refl: - fixes w::"int" - shows "w \ w" - by (lifting zle_refl_raw) - - -lemma zle_trans_raw: - shows "\le_raw i j; le_raw j k\ \ le_raw i k" -apply(cases i, cases j, cases k) -apply(auto simp add: le_raw_def) -done - -lemma zle_trans: - fixes i j k::"int" - shows "\i \ j; j \ k\ \ i \ k" - by (lifting zle_trans_raw) - -lemma zle_anti_sym_raw: - shows "\le_raw z w; le_raw w z\ \ intrel z w" -apply(cases z, cases w) -apply(auto iff: le_raw_def) -done - -lemma zle_anti_sym: - fixes z w::"int" - shows "\z \ w; w \ z\ \ z = w" - by (lifting zle_anti_sym_raw) - - -(* Axiom 'order_less_le' of class 'order': *) -lemma zless_le: - fixes w z::"int" - shows "(w < z) = (w \ z & w \ z)" - by (simp add: less_int_def) - -instance int :: order -apply (default) -apply(auto simp add: zless_le zle_anti_sym)[1] -apply(rule zle_refl) -apply(erule zle_trans, assumption) -apply(erule zle_anti_sym, assumption) -done - -(* Axiom 'linorder_linear' of class 'linorder': *) - -lemma zle_linear_raw: - shows "le_raw z w \ le_raw w z" -apply(cases w, cases z) -apply(auto iff: le_raw_def) -done - -lemma zle_linear: - fixes z w::"int" - shows "z \ w \ w \ z" - by (lifting zle_linear_raw) - -instance int :: linorder -apply(default) -apply(rule zle_linear) -done - -lemma zadd_left_mono_raw: - shows "le_raw i j \ le_raw (add_raw k i) (add_raw k j)" -apply(cases k) -apply(auto simp add: add_raw_def le_raw_def) -done - -lemma zadd_left_mono: - fixes i j::"int" - shows "i \ j \ k + i \ k + j" - by (lifting zadd_left_mono_raw) - - -subsection{*Magnitide of an Integer, as a Natural Number: @{term nat}*} - -definition - "nat_raw \ \(x, y).x - (y::nat)" - -quotient_definition - "nat2::int \ nat" -is - "nat_raw" - -abbreviation - "less_raw x y \ (le_raw x y \ \(intrel x y))" - -lemma nat_le_eq_zle_raw: - shows "less_raw (0, 0) w \ le_raw (0, 0) z \ (nat_raw w \ nat_raw z) = (le_raw w z)" - apply (cases w) - apply (cases z) - apply (simp add: nat_raw_def le_raw_def) - by auto - -lemma [quot_respect]: - shows "(intrel ===> op =) nat_raw nat_raw" - by (auto iff: nat_raw_def) - -lemma nat_le_eq_zle: - fixes w z::"int" - shows "0 < w \ 0 \ z \ (nat2 w \ nat2 z) = (w\z)" - unfolding less_int_def - by (lifting nat_le_eq_zle_raw) - -end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/Quotient_Int.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Quotient_Examples/Quotient_Int.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,380 @@ +(* Title: HOL/Quotient_Examples/Quotient_Int.thy + Author: Cezary Kaliszyk + Author: Christian Urban + +Integers based on Quotients, based on an older version by Larry Paulson. +*) +theory Quotient_Int +imports Quotient_Product Nat +begin + +fun + intrel :: "(nat \ nat) \ (nat \ nat) \ bool" (infix "\" 50) +where + "intrel (x, y) (u, v) = (x + v = u + y)" + +quotient_type int = "nat \ nat" / intrel + by (auto simp add: equivp_def expand_fun_eq) + +instantiation int :: "{zero, one, plus, uminus, minus, times, ord, abs, sgn}" +begin + +quotient_definition + "0 \ int" is "(0\nat, 0\nat)" + +quotient_definition + "1 \ int" is "(1\nat, 0\nat)" + +fun + plus_int_raw :: "(nat \ nat) \ (nat \ nat) \ (nat \ nat)" +where + "plus_int_raw (x, y) (u, v) = (x + u, y + v)" + +quotient_definition + "(op +) \ (int \ int \ int)" is "plus_int_raw" + +fun + uminus_int_raw :: "(nat \ nat) \ (nat \ nat)" +where + "uminus_int_raw (x, y) = (y, x)" + +quotient_definition + "(uminus \ (int \ int))" is "uminus_int_raw" + +definition + minus_int_def [code del]: "z - w = z + (-w\int)" + +fun + times_int_raw :: "(nat \ nat) \ (nat \ nat) \ (nat \ nat)" +where + "times_int_raw (x, y) (u, v) = (x*u + y*v, x*v + y*u)" + +quotient_definition + "(op *) :: (int \ int \ int)" is "times_int_raw" + +fun + le_int_raw :: "(nat \ nat) \ (nat \ nat) \ bool" +where + "le_int_raw (x, y) (u, v) = (x+v \ u+y)" + +quotient_definition + le_int_def: "(op \) :: int \ int \ bool" is "le_int_raw" + +definition + less_int_def [code del]: "(z\int) < w = (z \ w \ z \ w)" + +definition + zabs_def: "\i\int\ = (if i < 0 then - i else i)" + +definition + zsgn_def: "sgn (i\int) = (if i = 0 then 0 else if 0 < i then 1 else - 1)" + +instance .. + +end + +lemma [quot_respect]: + shows "(op \ ===> op \ ===> op \) plus_int_raw plus_int_raw" + and "(op \ ===> op \) uminus_int_raw uminus_int_raw" + and "(op \ ===> op \ ===> op =) le_int_raw le_int_raw" + by auto + +lemma times_int_raw_fst: + assumes a: "x \ z" + shows "times_int_raw x y \ times_int_raw z y" + using a + apply(cases x, cases y, cases z) + apply(auto simp add: times_int_raw.simps intrel.simps) + apply(rename_tac u v w x y z) + apply(subgoal_tac "u*w + z*w = y*w + v*w & u*x + z*x = y*x + v*x") + apply(simp add: mult_ac) + apply(simp add: add_mult_distrib [symmetric]) + done + +lemma times_int_raw_snd: + assumes a: "x \ z" + shows "times_int_raw y x \ times_int_raw y z" + using a + apply(cases x, cases y, cases z) + apply(auto simp add: times_int_raw.simps intrel.simps) + apply(rename_tac u v w x y z) + apply(subgoal_tac "u*w + z*w = y*w + v*w & u*x + z*x = y*x + v*x") + apply(simp add: mult_ac) + apply(simp add: add_mult_distrib [symmetric]) + done + +lemma [quot_respect]: + shows "(op \ ===> op \ ===> op \) times_int_raw times_int_raw" + apply(simp only: fun_rel_def) + apply(rule allI | rule impI)+ + apply(rule equivp_transp[OF int_equivp]) + apply(rule times_int_raw_fst) + apply(assumption) + apply(rule times_int_raw_snd) + apply(assumption) + done + +lemma plus_assoc_raw: + shows "plus_int_raw (plus_int_raw i j) k \ plus_int_raw i (plus_int_raw j k)" + by (cases i, cases j, cases k) (simp) + +lemma plus_sym_raw: + shows "plus_int_raw i j \ plus_int_raw j i" + by (cases i, cases j) (simp) + +lemma plus_zero_raw: + shows "plus_int_raw (0, 0) i \ i" + by (cases i) (simp) + +lemma plus_minus_zero_raw: + shows "plus_int_raw (uminus_int_raw i) i \ (0, 0)" + by (cases i) (simp) + +lemma times_assoc_raw: + shows "times_int_raw (times_int_raw i j) k \ times_int_raw i (times_int_raw j k)" + by (cases i, cases j, cases k) + (simp add: algebra_simps) + +lemma times_sym_raw: + shows "times_int_raw i j \ times_int_raw j i" + by (cases i, cases j) (simp add: algebra_simps) + +lemma times_one_raw: + shows "times_int_raw (1, 0) i \ i" + by (cases i) (simp) + +lemma times_plus_comm_raw: + shows "times_int_raw (plus_int_raw i j) k \ plus_int_raw (times_int_raw i k) (times_int_raw j k)" +by (cases i, cases j, cases k) + (simp add: algebra_simps) + +lemma one_zero_distinct: + shows "\ (0, 0) \ ((1::nat), (0::nat))" + by simp + +text{* The integers form a @{text comm_ring_1}*} + +instance int :: comm_ring_1 +proof + fix i j k :: int + show "(i + j) + k = i + (j + k)" + by (lifting plus_assoc_raw) + show "i + j = j + i" + by (lifting plus_sym_raw) + show "0 + i = (i::int)" + by (lifting plus_zero_raw) + show "- i + i = 0" + by (lifting plus_minus_zero_raw) + show "i - j = i + - j" + by (simp add: minus_int_def) + show "(i * j) * k = i * (j * k)" + by (lifting times_assoc_raw) + show "i * j = j * i" + by (lifting times_sym_raw) + show "1 * i = i" + by (lifting times_one_raw) + show "(i + j) * k = i * k + j * k" + by (lifting times_plus_comm_raw) + show "0 \ (1::int)" + by (lifting one_zero_distinct) +qed + +lemma plus_int_raw_rsp_aux: + assumes a: "a \ b" "c \ d" + shows "plus_int_raw a c \ plus_int_raw b d" + using a + by (cases a, cases b, cases c, cases d) + (simp) + +lemma add_abs_int: + "(abs_int (x,y)) + (abs_int (u,v)) = + (abs_int (x + u, y + v))" + apply(simp add: plus_int_def id_simps) + apply(fold plus_int_raw.simps) + apply(rule Quotient_rel_abs[OF Quotient_int]) + apply(rule plus_int_raw_rsp_aux) + apply(simp_all add: rep_abs_rsp_left[OF Quotient_int]) + done + +definition int_of_nat_raw: + "int_of_nat_raw m = (m :: nat, 0 :: nat)" + +quotient_definition + "int_of_nat :: nat \ int" is "int_of_nat_raw" + +lemma[quot_respect]: + shows "(op = ===> op \) int_of_nat_raw int_of_nat_raw" + by (simp add: equivp_reflp[OF int_equivp]) + +lemma int_of_nat: + shows "of_nat m = int_of_nat m" + by (induct m) + (simp_all add: zero_int_def one_int_def int_of_nat_def int_of_nat_raw add_abs_int) + +lemma le_antisym_raw: + shows "le_int_raw i j \ le_int_raw j i \ i \ j" + by (cases i, cases j) (simp) + +lemma le_refl_raw: + shows "le_int_raw i i" + by (cases i) (simp) + +lemma le_trans_raw: + shows "le_int_raw i j \ le_int_raw j k \ le_int_raw i k" + by (cases i, cases j, cases k) (simp) + +lemma le_cases_raw: + shows "le_int_raw i j \ le_int_raw j i" + by (cases i, cases j) + (simp add: linorder_linear) + +instance int :: linorder +proof + fix i j k :: int + show antisym: "i \ j \ j \ i \ i = j" + by (lifting le_antisym_raw) + show "(i < j) = (i \ j \ \ j \ i)" + by (auto simp add: less_int_def dest: antisym) + show "i \ i" + by (lifting le_refl_raw) + show "i \ j \ j \ k \ i \ k" + by (lifting le_trans_raw) + show "i \ j \ j \ i" + by (lifting le_cases_raw) +qed + +instantiation int :: distrib_lattice +begin + +definition + "(inf \ int \ int \ int) = min" + +definition + "(sup \ int \ int \ int) = max" + +instance + by default + (auto simp add: inf_int_def sup_int_def min_max.sup_inf_distrib1) + +end + +lemma le_plus_int_raw: + shows "le_int_raw i j \ le_int_raw (plus_int_raw k i) (plus_int_raw k j)" + by (cases i, cases j, cases k) (simp) + +instance int :: ordered_cancel_ab_semigroup_add +proof + fix i j k :: int + show "i \ j \ k + i \ k + j" + by (lifting le_plus_int_raw) +qed + +abbreviation + "less_int_raw i j \ le_int_raw i j \ \(i \ j)" + +lemma zmult_zless_mono2_lemma: + fixes i j::int + and k::nat + shows "i < j \ 0 < k \ of_nat k * i < of_nat k * j" + apply(induct "k") + apply(simp) + apply(case_tac "k = 0") + apply(simp_all add: left_distrib add_strict_mono) + done + +lemma zero_le_imp_eq_int_raw: + fixes k::"(nat \ nat)" + shows "less_int_raw (0, 0) k \ (\n > 0. k \ int_of_nat_raw n)" + apply(cases k) + apply(simp add:int_of_nat_raw) + apply(auto) + apply(rule_tac i="b" and j="a" in less_Suc_induct) + apply(auto) + done + +lemma zero_le_imp_eq_int: + fixes k::int + shows "0 < k \ \n > 0. k = of_nat n" + unfolding less_int_def int_of_nat + by (lifting zero_le_imp_eq_int_raw) + +lemma zmult_zless_mono2: + fixes i j k::int + assumes a: "i < j" "0 < k" + shows "k * i < k * j" + using a + by (drule_tac zero_le_imp_eq_int) (auto simp add: zmult_zless_mono2_lemma) + +text{*The integers form an ordered integral domain*} + +instance int :: linordered_idom +proof + fix i j k :: int + show "i < j \ 0 < k \ k * i < k * j" + by (rule zmult_zless_mono2) + show "\i\ = (if i < 0 then -i else i)" + by (simp only: zabs_def) + show "sgn (i\int) = (if i=0 then 0 else if 0i. P i \ P (plus_int_raw i (1, 0))" + and c: "\i. P i \ P (plus_int_raw i (uminus_int_raw (1, 0)))" + shows "P x" +proof (cases x, clarify) + fix a b + show "P (a, b)" + proof (induct a arbitrary: b rule: Nat.induct) + case zero + show "P (0, b)" using assms by (induct b) auto + next + case (Suc n) + then show ?case using assms by auto + qed +qed + +lemma int_induct: + fixes x :: int + assumes a: "P 0" + and b: "\i. P i \ P (i + 1)" + and c: "\i. P i \ P (i - 1)" + shows "P x" + using a b c unfolding minus_int_def + by (lifting int_induct_raw) + +text {* Magnitide of an Integer, as a Natural Number: @{term nat} *} + +definition + "int_to_nat_raw \ \(x, y).x - (y::nat)" + +quotient_definition + "int_to_nat::int \ nat" +is + "int_to_nat_raw" + +lemma [quot_respect]: + shows "(intrel ===> op =) int_to_nat_raw int_to_nat_raw" + by (auto iff: int_to_nat_raw_def) + +lemma nat_le_eq_zle_raw: + assumes a: "less_int_raw (0, 0) w \ le_int_raw (0, 0) z" + shows "(int_to_nat_raw w \ int_to_nat_raw z) = (le_int_raw w z)" + using a + by (cases w, cases z) (auto simp add: int_to_nat_raw_def) + +lemma nat_le_eq_zle: + fixes w z::"int" + shows "0 < w \ 0 \ z \ (int_to_nat w \ int_to_nat z) = (w \ z)" + unfolding less_int_def + by (lifting nat_le_eq_zle_raw) + +end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/Quotient_Message.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Quotient_Examples/Quotient_Message.thy Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,399 @@ +(* Title: HOL/Quotient_Examples/Quotient_Message.thy + Author: Christian Urban + +Message datatype, based on an older version by Larry Paulson. +*) +theory Quotient_Message +imports Main Quotient_Syntax +begin + +subsection{*Defining the Free Algebra*} + +datatype + freemsg = NONCE nat + | MPAIR freemsg freemsg + | CRYPT nat freemsg + | DECRYPT nat freemsg + +inductive + msgrel::"freemsg \ freemsg \ bool" (infixl "\" 50) +where + CD: "CRYPT K (DECRYPT K X) \ X" +| DC: "DECRYPT K (CRYPT K X) \ X" +| NONCE: "NONCE N \ NONCE N" +| MPAIR: "\X \ X'; Y \ Y'\ \ MPAIR X Y \ MPAIR X' Y'" +| CRYPT: "X \ X' \ CRYPT K X \ CRYPT K X'" +| DECRYPT: "X \ X' \ DECRYPT K X \ DECRYPT K X'" +| SYM: "X \ Y \ Y \ X" +| TRANS: "\X \ Y; Y \ Z\ \ X \ Z" + +lemmas msgrel.intros[intro] + +text{*Proving that it is an equivalence relation*} + +lemma msgrel_refl: "X \ X" +by (induct X, (blast intro: msgrel.intros)+) + +theorem equiv_msgrel: "equivp msgrel" +proof (rule equivpI) + show "reflp msgrel" by (simp add: reflp_def msgrel_refl) + show "symp msgrel" by (simp add: symp_def, blast intro: msgrel.SYM) + show "transp msgrel" by (simp add: transp_def, blast intro: msgrel.TRANS) +qed + +subsection{*Some Functions on the Free Algebra*} + +subsubsection{*The Set of Nonces*} + +fun + freenonces :: "freemsg \ nat set" +where + "freenonces (NONCE N) = {N}" +| "freenonces (MPAIR X Y) = freenonces X \ freenonces Y" +| "freenonces (CRYPT K X) = freenonces X" +| "freenonces (DECRYPT K X) = freenonces X" + +theorem msgrel_imp_eq_freenonces: + assumes a: "U \ V" + shows "freenonces U = freenonces V" + using a by (induct) (auto) + +subsubsection{*The Left Projection*} + +text{*A function to return the left part of the top pair in a message. It will +be lifted to the initial algrebra, to serve as an example of that process.*} +fun + freeleft :: "freemsg \ freemsg" +where + "freeleft (NONCE N) = NONCE N" +| "freeleft (MPAIR X Y) = X" +| "freeleft (CRYPT K X) = freeleft X" +| "freeleft (DECRYPT K X) = freeleft X" + +text{*This theorem lets us prove that the left function respects the +equivalence relation. It also helps us prove that MPair + (the abstract constructor) is injective*} +lemma msgrel_imp_eqv_freeleft_aux: + shows "freeleft U \ freeleft U" + by (induct rule: freeleft.induct) (auto) + +theorem msgrel_imp_eqv_freeleft: + assumes a: "U \ V" + shows "freeleft U \ freeleft V" + using a + by (induct) (auto intro: msgrel_imp_eqv_freeleft_aux) + +subsubsection{*The Right Projection*} + +text{*A function to return the right part of the top pair in a message.*} +fun + freeright :: "freemsg \ freemsg" +where + "freeright (NONCE N) = NONCE N" +| "freeright (MPAIR X Y) = Y" +| "freeright (CRYPT K X) = freeright X" +| "freeright (DECRYPT K X) = freeright X" + +text{*This theorem lets us prove that the right function respects the +equivalence relation. It also helps us prove that MPair + (the abstract constructor) is injective*} +lemma msgrel_imp_eqv_freeright_aux: + shows "freeright U \ freeright U" + by (induct rule: freeright.induct) (auto) + +theorem msgrel_imp_eqv_freeright: + assumes a: "U \ V" + shows "freeright U \ freeright V" + using a + by (induct) (auto intro: msgrel_imp_eqv_freeright_aux) + +subsubsection{*The Discriminator for Constructors*} + +text{*A function to distinguish nonces, mpairs and encryptions*} +fun + freediscrim :: "freemsg \ int" +where + "freediscrim (NONCE N) = 0" + | "freediscrim (MPAIR X Y) = 1" + | "freediscrim (CRYPT K X) = freediscrim X + 2" + | "freediscrim (DECRYPT K X) = freediscrim X - 2" + +text{*This theorem helps us prove @{term "Nonce N \ MPair X Y"}*} +theorem msgrel_imp_eq_freediscrim: + assumes a: "U \ V" + shows "freediscrim U = freediscrim V" + using a by (induct) (auto) + +subsection{*The Initial Algebra: A Quotiented Message Type*} + +quotient_type msg = freemsg / msgrel + by (rule equiv_msgrel) + +text{*The abstract message constructors*} + +quotient_definition + "Nonce :: nat \ msg" +is + "NONCE" + +quotient_definition + "MPair :: msg \ msg \ msg" +is + "MPAIR" + +quotient_definition + "Crypt :: nat \ msg \ msg" +is + "CRYPT" + +quotient_definition + "Decrypt :: nat \ msg \ msg" +is + "DECRYPT" + +lemma [quot_respect]: + shows "(op = ===> op \ ===> op \) CRYPT CRYPT" +by (auto intro: CRYPT) + +lemma [quot_respect]: + shows "(op = ===> op \ ===> op \) DECRYPT DECRYPT" +by (auto intro: DECRYPT) + +text{*Establishing these two equations is the point of the whole exercise*} +theorem CD_eq [simp]: + shows "Crypt K (Decrypt K X) = X" + by (lifting CD) + +theorem DC_eq [simp]: + shows "Decrypt K (Crypt K X) = X" + by (lifting DC) + +subsection{*The Abstract Function to Return the Set of Nonces*} + +quotient_definition + "nonces:: msg \ nat set" +is + "freenonces" + +text{*Now prove the four equations for @{term nonces}*} + +lemma [quot_respect]: + shows "(op \ ===> op =) freenonces freenonces" + by (simp add: msgrel_imp_eq_freenonces) + +lemma [quot_respect]: + shows "(op = ===> op \) NONCE NONCE" + by (simp add: NONCE) + +lemma nonces_Nonce [simp]: + shows "nonces (Nonce N) = {N}" + by (lifting freenonces.simps(1)) + +lemma [quot_respect]: + shows " (op \ ===> op \ ===> op \) MPAIR MPAIR" + by (simp add: MPAIR) + +lemma nonces_MPair [simp]: + shows "nonces (MPair X Y) = nonces X \ nonces Y" + by (lifting freenonces.simps(2)) + +lemma nonces_Crypt [simp]: + shows "nonces (Crypt K X) = nonces X" + by (lifting freenonces.simps(3)) + +lemma nonces_Decrypt [simp]: + shows "nonces (Decrypt K X) = nonces X" + by (lifting freenonces.simps(4)) + +subsection{*The Abstract Function to Return the Left Part*} + +quotient_definition + "left:: msg \ msg" +is + "freeleft" + +lemma [quot_respect]: + shows "(op \ ===> op \) freeleft freeleft" + by (simp add: msgrel_imp_eqv_freeleft) + +lemma left_Nonce [simp]: + shows "left (Nonce N) = Nonce N" + by (lifting freeleft.simps(1)) + +lemma left_MPair [simp]: + shows "left (MPair X Y) = X" + by (lifting freeleft.simps(2)) + +lemma left_Crypt [simp]: + shows "left (Crypt K X) = left X" + by (lifting freeleft.simps(3)) + +lemma left_Decrypt [simp]: + shows "left (Decrypt K X) = left X" + by (lifting freeleft.simps(4)) + +subsection{*The Abstract Function to Return the Right Part*} + +quotient_definition + "right:: msg \ msg" +is + "freeright" + +text{*Now prove the four equations for @{term right}*} + +lemma [quot_respect]: + shows "(op \ ===> op \) freeright freeright" + by (simp add: msgrel_imp_eqv_freeright) + +lemma right_Nonce [simp]: + shows "right (Nonce N) = Nonce N" + by (lifting freeright.simps(1)) + +lemma right_MPair [simp]: + shows "right (MPair X Y) = Y" + by (lifting freeright.simps(2)) + +lemma right_Crypt [simp]: + shows "right (Crypt K X) = right X" + by (lifting freeright.simps(3)) + +lemma right_Decrypt [simp]: + shows "right (Decrypt K X) = right X" + by (lifting freeright.simps(4)) + +subsection{*Injectivity Properties of Some Constructors*} + +lemma NONCE_imp_eq: + shows "NONCE m \ NONCE n \ m = n" + by (drule msgrel_imp_eq_freenonces, simp) + +text{*Can also be proved using the function @{term nonces}*} +lemma Nonce_Nonce_eq [iff]: + shows "(Nonce m = Nonce n) = (m = n)" +proof + assume "Nonce m = Nonce n" + then show "m = n" by (lifting NONCE_imp_eq) +next + assume "m = n" + then show "Nonce m = Nonce n" by simp +qed + +lemma MPAIR_imp_eqv_left: + shows "MPAIR X Y \ MPAIR X' Y' \ X \ X'" + by (drule msgrel_imp_eqv_freeleft) (simp) + +lemma MPair_imp_eq_left: + assumes eq: "MPair X Y = MPair X' Y'" + shows "X = X'" + using eq by (lifting MPAIR_imp_eqv_left) + +lemma MPAIR_imp_eqv_right: + shows "MPAIR X Y \ MPAIR X' Y' \ Y \ Y'" + by (drule msgrel_imp_eqv_freeright) (simp) + +lemma MPair_imp_eq_right: + shows "MPair X Y = MPair X' Y' \ Y = Y'" + by (lifting MPAIR_imp_eqv_right) + +theorem MPair_MPair_eq [iff]: + shows "(MPair X Y = MPair X' Y') = (X=X' & Y=Y')" + by (blast dest: MPair_imp_eq_left MPair_imp_eq_right) + +lemma NONCE_neqv_MPAIR: + shows "\(NONCE m \ MPAIR X Y)" + by (auto dest: msgrel_imp_eq_freediscrim) + +theorem Nonce_neq_MPair [iff]: + shows "Nonce N \ MPair X Y" + by (lifting NONCE_neqv_MPAIR) + +text{*Example suggested by a referee*} + +lemma CRYPT_NONCE_neq_NONCE: + shows "\(CRYPT K (NONCE M) \ NONCE N)" + by (auto dest: msgrel_imp_eq_freediscrim) + +theorem Crypt_Nonce_neq_Nonce: + shows "Crypt K (Nonce M) \ Nonce N" + by (lifting CRYPT_NONCE_neq_NONCE) + +text{*...and many similar results*} +lemma CRYPT2_NONCE_neq_NONCE: + shows "\(CRYPT K (CRYPT K' (NONCE M)) \ NONCE N)" + by (auto dest: msgrel_imp_eq_freediscrim) + +theorem Crypt2_Nonce_neq_Nonce: + shows "Crypt K (Crypt K' (Nonce M)) \ Nonce N" + by (lifting CRYPT2_NONCE_neq_NONCE) + +theorem Crypt_Crypt_eq [iff]: + shows "(Crypt K X = Crypt K X') = (X=X')" +proof + assume "Crypt K X = Crypt K X'" + hence "Decrypt K (Crypt K X) = Decrypt K (Crypt K X')" by simp + thus "X = X'" by simp +next + assume "X = X'" + thus "Crypt K X = Crypt K X'" by simp +qed + +theorem Decrypt_Decrypt_eq [iff]: + shows "(Decrypt K X = Decrypt K X') = (X=X')" +proof + assume "Decrypt K X = Decrypt K X'" + hence "Crypt K (Decrypt K X) = Crypt K (Decrypt K X')" by simp + thus "X = X'" by simp +next + assume "X = X'" + thus "Decrypt K X = Decrypt K X'" by simp +qed + +lemma msg_induct_aux: + shows "\\N. P (Nonce N); + \X Y. \P X; P Y\ \ P (MPair X Y); + \K X. P X \ P (Crypt K X); + \K X. P X \ P (Decrypt K X)\ \ P msg" + by (lifting freemsg.induct) + +lemma msg_induct [case_names Nonce MPair Crypt Decrypt, cases type: msg]: + assumes N: "\N. P (Nonce N)" + and M: "\X Y. \P X; P Y\ \ P (MPair X Y)" + and C: "\K X. P X \ P (Crypt K X)" + and D: "\K X. P X \ P (Decrypt K X)" + shows "P msg" + using N M C D by (rule msg_induct_aux) + +subsection{*The Abstract Discriminator*} + +text{*However, as @{text Crypt_Nonce_neq_Nonce} above illustrates, we don't +need this function in order to prove discrimination theorems.*} + +quotient_definition + "discrim:: msg \ int" +is + "freediscrim" + +text{*Now prove the four equations for @{term discrim}*} + +lemma [quot_respect]: + shows "(op \ ===> op =) freediscrim freediscrim" + by (auto simp add: msgrel_imp_eq_freediscrim) + +lemma discrim_Nonce [simp]: + shows "discrim (Nonce N) = 0" + by (lifting freediscrim.simps(1)) + +lemma discrim_MPair [simp]: + shows "discrim (MPair X Y) = 1" + by (lifting freediscrim.simps(2)) + +lemma discrim_Crypt [simp]: + shows "discrim (Crypt K X) = discrim X + 2" + by (lifting freediscrim.simps(3)) + +lemma discrim_Decrypt [simp]: + shows "discrim (Decrypt K X) = discrim X - 2" + by (lifting freediscrim.simps(4)) + +end + diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Quotient_Examples/ROOT.ML --- a/src/HOL/Quotient_Examples/ROOT.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Quotient_Examples/ROOT.ML Tue May 04 20:30:22 2010 +0200 @@ -4,5 +4,5 @@ Testing the quotient package. *) -use_thys ["FSet", "LarryInt", "LarryDatatype"]; +use_thys ["FSet", "Quotient_Int", "Quotient_Message"]; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Random.thy --- a/src/HOL/Random.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Random.thy Tue May 04 20:30:22 2010 +0200 @@ -138,10 +138,15 @@ subsection {* @{text ML} interface *} +code_reflect Random_Engine + functions range select select_weight + ML {* structure Random_Engine = struct +open Random_Engine; + type seed = int * int; local diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Rat.thy --- a/src/HOL/Rat.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Rat.thy Tue May 04 20:30:22 2010 +0200 @@ -411,7 +411,7 @@ subsubsection {* The field of rational numbers *} -instantiation rat :: field +instantiation rat :: field_inverse_zero begin definition @@ -440,13 +440,12 @@ next fix q r :: rat show "q / r = q * inverse r" by (simp add: divide_rat_def) +next + show "inverse 0 = (0::rat)" by (simp add: rat_number_expand, simp add: rat_number_collapse) qed end -instance rat :: division_by_zero proof -qed (simp add: rat_number_expand, simp add: rat_number_collapse) - subsubsection {* Various *} @@ -624,7 +623,7 @@ end -instance rat :: linordered_field +instance rat :: linordered_field_inverse_zero proof fix q r s :: rat show "q \ r ==> s + q \ s + r" @@ -724,8 +723,7 @@ by (cases "b = 0", simp, simp add: of_int_rat) moreover have "0 \ Fract (a mod b) b \ Fract (a mod b) b < 1" unfolding Fract_of_int_quotient - by (rule linorder_cases [of b 0]) - (simp add: divide_nonpos_neg, simp, simp add: divide_nonneg_pos) + by (rule linorder_cases [of b 0]) (simp add: divide_nonpos_neg, simp, simp add: divide_nonneg_pos) ultimately show ?thesis by simp qed @@ -818,7 +816,7 @@ done lemma of_rat_inverse: - "(of_rat (inverse a)::'a::{field_char_0,division_by_zero}) = + "(of_rat (inverse a)::'a::{field_char_0, field_inverse_zero}) = inverse (of_rat a)" by (cases "a = 0", simp_all add: nonzero_of_rat_inverse) @@ -827,7 +825,7 @@ by (simp add: divide_inverse of_rat_mult nonzero_of_rat_inverse) lemma of_rat_divide: - "(of_rat (a / b)::'a::{field_char_0,division_by_zero}) + "(of_rat (a / b)::'a::{field_char_0, field_inverse_zero}) = of_rat a / of_rat b" by (cases "b = 0") (simp_all add: nonzero_of_rat_divide) @@ -968,7 +966,7 @@ done lemma Rats_inverse [simp]: - fixes a :: "'a::{field_char_0,division_by_zero}" + fixes a :: "'a::{field_char_0, field_inverse_zero}" shows "a \ Rats \ inverse a \ Rats" apply (auto simp add: Rats_def) apply (rule range_eqI) @@ -984,7 +982,7 @@ done lemma Rats_divide [simp]: - fixes a b :: "'a::{field_char_0,division_by_zero}" + fixes a b :: "'a::{field_char_0, field_inverse_zero}" shows "\a \ Rats; b \ Rats\ \ a / b \ Rats" apply (auto simp add: Rats_def) apply (rule range_eqI) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/RealDef.thy --- a/src/HOL/RealDef.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/RealDef.thy Tue May 04 20:30:22 2010 +0200 @@ -266,23 +266,16 @@ subsection{*The Real Numbers form a Field*} -instance real :: field +instance real :: field_inverse_zero proof fix x y z :: real show "x \ 0 ==> inverse x * x = 1" by (rule real_mult_inverse_left) show "x / y = x * inverse y" by (simp add: real_divide_def) + show "inverse 0 = (0::real)" by (simp add: real_inverse_def) qed - -text{*Inverse of zero! Useful to simplify certain equations*} - lemma INVERSE_ZERO: "inverse 0 = (0::real)" -by (simp add: real_inverse_def) - -instance real :: division_by_zero -proof - show "inverse 0 = (0::real)" by (rule INVERSE_ZERO) -qed + by (fact inverse_zero) subsection{*The @{text "\"} Ordering*} @@ -416,7 +409,7 @@ subsection{*The Reals Form an Ordered Field*} -instance real :: linordered_field +instance real :: linordered_field_inverse_zero proof fix x y z :: real show "x \ y ==> z + x \ z + y" by (rule real_add_left_mono) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/RealVector.thy --- a/src/HOL/RealVector.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/RealVector.thy Tue May 04 20:30:22 2010 +0200 @@ -207,7 +207,7 @@ by (rule inverse_unique, simp) lemma inverse_scaleR_distrib: - fixes x :: "'a::{real_div_algebra,division_by_zero}" + fixes x :: "'a::{real_div_algebra, division_ring_inverse_zero}" shows "inverse (scaleR a x) = scaleR (inverse a) (inverse x)" apply (case_tac "a = 0", simp) apply (case_tac "x = 0", simp) @@ -250,7 +250,7 @@ lemma of_real_inverse [simp]: "of_real (inverse x) = - inverse (of_real x :: 'a::{real_div_algebra,division_by_zero})" + inverse (of_real x :: 'a::{real_div_algebra, division_ring_inverse_zero})" by (simp add: of_real_def inverse_scaleR_distrib) lemma nonzero_of_real_divide: @@ -260,7 +260,7 @@ lemma of_real_divide [simp]: "of_real (x / y) = - (of_real x / of_real y :: 'a::{real_field,division_by_zero})" + (of_real x / of_real y :: 'a::{real_field, field_inverse_zero})" by (simp add: divide_inverse) lemma of_real_power [simp]: @@ -370,7 +370,7 @@ done lemma Reals_inverse [simp]: - fixes a :: "'a::{real_div_algebra,division_by_zero}" + fixes a :: "'a::{real_div_algebra, division_ring_inverse_zero}" shows "a \ Reals \ inverse a \ Reals" apply (auto simp add: Reals_def) apply (rule range_eqI) @@ -386,7 +386,7 @@ done lemma Reals_divide [simp]: - fixes a b :: "'a::{real_field,division_by_zero}" + fixes a b :: "'a::{real_field, field_inverse_zero}" shows "\a \ Reals; b \ Reals\ \ a / b \ Reals" apply (auto simp add: Reals_def) apply (rule range_eqI) @@ -726,7 +726,7 @@ done lemma norm_inverse: - fixes a :: "'a::{real_normed_div_algebra,division_by_zero}" + fixes a :: "'a::{real_normed_div_algebra, division_ring_inverse_zero}" shows "norm (inverse a) = inverse (norm a)" apply (case_tac "a = 0", simp) apply (erule nonzero_norm_inverse) @@ -738,7 +738,7 @@ by (simp add: divide_inverse norm_mult nonzero_norm_inverse) lemma norm_divide: - fixes a b :: "'a::{real_normed_field,division_by_zero}" + fixes a b :: "'a::{real_normed_field, field_inverse_zero}" shows "norm (a / b) = norm a / norm b" by (simp add: divide_inverse norm_mult norm_inverse) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Rings.thy --- a/src/HOL/Rings.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Rings.thy Tue May 04 20:30:22 2010 +0200 @@ -14,8 +14,8 @@ begin class semiring = ab_semigroup_add + semigroup_mult + - assumes left_distrib[algebra_simps]: "(a + b) * c = a * c + b * c" - assumes right_distrib[algebra_simps]: "a * (b + c) = a * b + a * c" + assumes left_distrib[algebra_simps, field_simps]: "(a + b) * c = a * c + b * c" + assumes right_distrib[algebra_simps, field_simps]: "a * (b + c) = a * b + a * c" begin text{*For the @{text combine_numerals} simproc*} @@ -230,18 +230,15 @@ lemma minus_mult_commute: "- a * b = a * - b" by simp -lemma right_diff_distrib[algebra_simps]: "a * (b - c) = a * b - a * c" +lemma right_diff_distrib[algebra_simps, field_simps]: "a * (b - c) = a * b - a * c" by (simp add: right_distrib diff_minus) -lemma left_diff_distrib[algebra_simps]: "(a - b) * c = a * c - b * c" +lemma left_diff_distrib[algebra_simps, field_simps]: "(a - b) * c = a * c - b * c" by (simp add: left_distrib diff_minus) lemmas ring_distribs[no_atp] = right_distrib left_distrib left_diff_distrib right_diff_distrib -text{*Legacy - use @{text algebra_simps} *} -lemmas ring_simps[no_atp] = algebra_simps - lemma eq_add_iff1: "a * e + c = b * e + d \ (a - b) * e + c = d" by (simp add: algebra_simps) @@ -536,7 +533,7 @@ lemma diff_divide_distrib: "(a - b) / c = a / c - b / c" by (simp add: diff_minus add_divide_distrib) -lemma nonzero_eq_divide_eq: "c \ 0 \ a = b / c \ a * c = b" +lemma nonzero_eq_divide_eq [field_simps]: "c \ 0 \ a = b / c \ a * c = b" proof - assume [simp]: "c \ 0" have "a = b / c \ a * c = (b / c) * c" by simp @@ -544,7 +541,7 @@ finally show ?thesis . qed -lemma nonzero_divide_eq_eq: "c \ 0 \ b / c = a \ b = a * c" +lemma nonzero_divide_eq_eq [field_simps]: "c \ 0 \ b / c = a \ b = a * c" proof - assume [simp]: "c \ 0" have "b / c = a \ (b / c) * c = a * c" by simp @@ -560,7 +557,7 @@ end -class division_by_zero = division_ring + +class division_ring_inverse_zero = division_ring + assumes inverse_zero [simp]: "inverse 0 = 0" begin @@ -687,6 +684,18 @@ end class linordered_semiring_1 = linordered_semiring + semiring_1 +begin + +lemma convex_bound_le: + assumes "x \ a" "y \ a" "0 \ u" "0 \ v" "u + v = 1" + shows "u * x + v * y \ a" +proof- + from assms have "u * x + v * y \ u * a + v * a" + by (simp add: add_mono mult_left_mono) + thus ?thesis using assms unfolding left_distrib[symmetric] by simp +qed + +end class linordered_semiring_strict = semiring + comm_monoid_add + linordered_cancel_ab_semigroup_add + assumes mult_strict_left_mono: "a < b \ 0 < c \ c * a < c * b" @@ -806,6 +815,21 @@ end class linordered_semiring_1_strict = linordered_semiring_strict + semiring_1 +begin + +subclass linordered_semiring_1 .. + +lemma convex_bound_lt: + assumes "x < a" "y < a" "0 \ u" "0 \ v" "u + v = 1" + shows "u * x + v * y < a" +proof - + from assms have "u * x + v * y < u * a + v * a" + by (cases "u = 0") + (auto intro!: add_less_le_mono mult_strict_left_mono mult_left_mono) + thus ?thesis using assms unfolding left_distrib[symmetric] by simp +qed + +end class mult_mono1 = times + zero + ord + assumes mult_mono1: "a \ b \ 0 \ c \ c * a \ c * b" @@ -861,9 +885,6 @@ subclass ordered_ab_group_add .. -text{*Legacy - use @{text algebra_simps} *} -lemmas ring_simps[no_atp] = algebra_simps - lemma less_add_iff1: "a * e + c < b * e + d \ (a - b) * e + c < d" by (simp add: algebra_simps) @@ -1068,9 +1089,6 @@ end -text{*Legacy - use @{text algebra_simps} *} -lemmas ring_simps[no_atp] = algebra_simps - lemmas mult_sign_intros = mult_nonneg_nonneg mult_nonneg_nonpos mult_nonpos_nonneg mult_nonpos_nonpos @@ -1117,6 +1135,7 @@ (*previously linordered_ring*) begin +subclass linordered_semiring_1_strict .. subclass linordered_ring_strict .. subclass ordered_comm_ring .. subclass idom .. diff -r aace7a969410 -r 8629ac3efb19 src/HOL/SEQ.thy --- a/src/HOL/SEQ.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/SEQ.thy Tue May 04 20:30:22 2010 +0200 @@ -532,6 +532,35 @@ lemma convergent_LIMSEQ_iff: "convergent X = (X ----> lim X)" by (auto intro: theI LIMSEQ_unique simp add: convergent_def lim_def) +lemma convergent_const: "convergent (\n. c)" +by (rule convergentI, rule LIMSEQ_const) + +lemma convergent_add: + fixes X Y :: "nat \ 'a::real_normed_vector" + assumes "convergent (\n. X n)" + assumes "convergent (\n. Y n)" + shows "convergent (\n. X n + Y n)" +using assms unfolding convergent_def by (fast intro: LIMSEQ_add) + +lemma convergent_setsum: + fixes X :: "'a \ nat \ 'b::real_normed_vector" + assumes "\i. i \ A \ convergent (\n. X i n)" + shows "convergent (\n. \i\A. X i n)" +proof (cases "finite A") + case True from this and assms show ?thesis + by (induct A set: finite) (simp_all add: convergent_const convergent_add) +qed (simp add: convergent_const) + +lemma (in bounded_linear) convergent: + assumes "convergent (\n. X n)" + shows "convergent (\n. f (X n))" +using assms unfolding convergent_def by (fast intro: LIMSEQ) + +lemma (in bounded_bilinear) convergent: + assumes "convergent (\n. X n)" and "convergent (\n. Y n)" + shows "convergent (\n. X n ** Y n)" +using assms unfolding convergent_def by (fast intro: LIMSEQ) + lemma convergent_minus_iff: fixes X :: "nat \ 'a::real_normed_vector" shows "convergent X \ convergent (\n. - X n)" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/SMT/Tools/z3_proof_rules.ML --- a/src/HOL/SMT/Tools/z3_proof_rules.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/SMT/Tools/z3_proof_rules.ML Tue May 04 20:30:22 2010 +0200 @@ -1137,7 +1137,8 @@ handle THM _ => NONE in val z3_simpset = HOL_ss addsimps @{thms array_rules} - addsimps @{thms ring_distribs} addsimps @{thms field_eq_simps} + addsimps @{thms ring_distribs} addsimps @{thms field_simps} + addsimps [@{thm times_divide_eq_right}, @{thm times_divide_eq_left}] addsimps @{thms arith_special} addsimps @{thms less_bin_simps} addsimps @{thms le_bin_simps} addsimps @{thms eq_bin_simps} addsimps @{thms add_bin_simps} addsimps @{thms succ_bin_simps} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/SMT/Z3.thy --- a/src/HOL/SMT/Z3.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/SMT/Z3.thy Tue May 04 20:30:22 2010 +0200 @@ -19,7 +19,7 @@ lemmas [z3_rewrite] = refl eq_commute conj_commute disj_commute simp_thms nnf_simps - ring_distribs field_eq_simps if_True if_False + ring_distribs field_simps times_divide_eq_right times_divide_eq_left if_True if_False lemma [z3_rewrite]: "(P \ Q) = (Q = (\P))" by fast diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Series.thy --- a/src/HOL/Series.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Series.thy Tue May 04 20:30:22 2010 +0200 @@ -381,7 +381,7 @@ shows "norm x < 1 \ summable (\n. x ^ n)" by (rule geometric_sums [THEN sums_summable]) -lemma half: "0 < 1 / (2::'a::{number_ring,division_by_zero,linordered_field})" +lemma half: "0 < 1 / (2::'a::{number_ring,linordered_field_inverse_zero})" by arith lemma power_half_series: "(\n. (1/2::real)^Suc n) sums 1" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/SetInterval.thy --- a/src/HOL/SetInterval.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/SetInterval.thy Tue May 04 20:30:22 2010 +0200 @@ -54,22 +54,22 @@ @{term"{m.. 'a => 'b set => 'b set" ("(3UN _<=_./ _)" 10) - "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3UN _<_./ _)" 10) - "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3INT _<=_./ _)" 10) - "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3INT _<_./ _)" 10) + "_UNION_le" :: "'a => 'a => 'b set => 'b set" ("(3UN _<=_./ _)" [0, 0, 10] 10) + "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3UN _<_./ _)" [0, 0, 10] 10) + "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3INT _<=_./ _)" [0, 0, 10] 10) + "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3INT _<_./ _)" [0, 0, 10] 10) syntax (xsymbols) - "_UNION_le" :: "'a => 'a => 'b set => 'b set" ("(3\ _\_./ _)" 10) - "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3\ _<_./ _)" 10) - "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3\ _\_./ _)" 10) - "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3\ _<_./ _)" 10) + "_UNION_le" :: "'a => 'a => 'b set => 'b set" ("(3\ _\_./ _)" [0, 0, 10] 10) + "_UNION_less" :: "'a => 'a => 'b set => 'b set" ("(3\ _<_./ _)" [0, 0, 10] 10) + "_INTER_le" :: "'a => 'a => 'b set => 'b set" ("(3\ _\_./ _)" [0, 0, 10] 10) + "_INTER_less" :: "'a => 'a => 'b set => 'b set" ("(3\ _<_./ _)" [0, 0, 10] 10) syntax (latex output) - "_UNION_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ \ _)/ _)" 10) - "_UNION_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ < _)/ _)" 10) - "_INTER_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ \ _)/ _)" 10) - "_INTER_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ < _)/ _)" 10) + "_UNION_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ \ _)/ _)" [0, 0, 10] 10) + "_UNION_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ < _)/ _)" [0, 0, 10] 10) + "_INTER_le" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ \ _)/ _)" [0, 0, 10] 10) + "_INTER_less" :: "'a \ 'a => 'b set => 'b set" ("(3\(00_ < _)/ _)" [0, 0, 10] 10) translations "UN i<=n. A" == "UN i:{..n}. A" @@ -1095,7 +1095,7 @@ next case (Suc n) moreover with `y \ 0` have "(1 + y) ^ n = (y * inverse y) * (1 + y) ^ n" by simp - ultimately show ?case by (simp add: field_eq_simps divide_inverse) + ultimately show ?case by (simp add: field_simps divide_inverse) qed ultimately show ?thesis by simp qed diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Sledgehammer.thy --- a/src/HOL/Sledgehammer.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Sledgehammer.thy Tue May 04 20:30:22 2010 +0200 @@ -18,35 +18,35 @@ ("Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML") ("Tools/Sledgehammer/sledgehammer_fact_filter.ML") ("Tools/ATP_Manager/atp_manager.ML") - ("Tools/ATP_Manager/atp_wrapper.ML") - ("Tools/ATP_Manager/atp_minimal.ML") + ("Tools/ATP_Manager/atp_systems.ML") + ("Tools/Sledgehammer/sledgehammer_fact_minimizer.ML") ("Tools/Sledgehammer/sledgehammer_isar.ML") ("Tools/Sledgehammer/meson_tactic.ML") ("Tools/Sledgehammer/metis_tactics.ML") begin -definition COMBI :: "'a \ 'a" - where "COMBI P \ P" +definition COMBI :: "'a \ 'a" where +[no_atp]: "COMBI P \ P" -definition COMBK :: "'a \ 'b \ 'a" - where "COMBK P Q \ P" +definition COMBK :: "'a \ 'b \ 'a" where +[no_atp]: "COMBK P Q \ P" -definition COMBB :: "('b => 'c) \ ('a => 'b) \ 'a \ 'c" - where "COMBB P Q R \ P (Q R)" +definition COMBB :: "('b => 'c) \ ('a => 'b) \ 'a \ 'c" where [no_atp]: +"COMBB P Q R \ P (Q R)" -definition COMBC :: "('a \ 'b \ 'c) \ 'b \ 'a \ 'c" - where "COMBC P Q R \ P R Q" +definition COMBC :: "('a \ 'b \ 'c) \ 'b \ 'a \ 'c" where +[no_atp]: "COMBC P Q R \ P R Q" -definition COMBS :: "('a \ 'b \ 'c) \ ('a \ 'b) \ 'a \ 'c" - where "COMBS P Q R \ P R (Q R)" +definition COMBS :: "('a \ 'b \ 'c) \ ('a \ 'b) \ 'a \ 'c" where +[no_atp]: "COMBS P Q R \ P R (Q R)" -definition fequal :: "'a \ 'a \ bool" - where "fequal X Y \ (X = Y)" +definition fequal :: "'a \ 'a \ bool" where [no_atp]: +"fequal X Y \ (X = Y)" -lemma fequal_imp_equal: "fequal X Y \ X = Y" +lemma fequal_imp_equal [no_atp]: "fequal X Y \ X = Y" by (simp add: fequal_def) -lemma equal_imp_fequal: "X = Y \ fequal X Y" +lemma equal_imp_fequal [no_atp]: "X = Y \ fequal X Y" by (simp add: fequal_def) text{*These two represent the equivalence between Boolean equality and iff. @@ -61,31 +61,31 @@ text{*Theorems for translation to combinators*} -lemma abs_S: "\x. (f x) (g x) \ COMBS f g" +lemma abs_S [no_atp]: "\x. (f x) (g x) \ COMBS f g" apply (rule eq_reflection) apply (rule ext) apply (simp add: COMBS_def) done -lemma abs_I: "\x. x \ COMBI" +lemma abs_I [no_atp]: "\x. x \ COMBI" apply (rule eq_reflection) apply (rule ext) apply (simp add: COMBI_def) done -lemma abs_K: "\x. y \ COMBK y" +lemma abs_K [no_atp]: "\x. y \ COMBK y" apply (rule eq_reflection) apply (rule ext) apply (simp add: COMBK_def) done -lemma abs_B: "\x. a (g x) \ COMBB a g" +lemma abs_B [no_atp]: "\x. a (g x) \ COMBB a g" apply (rule eq_reflection) apply (rule ext) apply (simp add: COMBB_def) done -lemma abs_C: "\x. (f x) b \ COMBC f b" +lemma abs_C [no_atp]: "\x. (f x) b \ COMBC f b" apply (rule eq_reflection) apply (rule ext) apply (simp add: COMBC_def) @@ -101,10 +101,11 @@ use "Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML" use "Tools/Sledgehammer/sledgehammer_fact_filter.ML" use "Tools/ATP_Manager/atp_manager.ML" -use "Tools/ATP_Manager/atp_wrapper.ML" -setup ATP_Wrapper.setup -use "Tools/ATP_Manager/atp_minimal.ML" +use "Tools/ATP_Manager/atp_systems.ML" +setup ATP_Systems.setup +use "Tools/Sledgehammer/sledgehammer_fact_minimizer.ML" use "Tools/Sledgehammer/sledgehammer_isar.ML" +setup Sledgehammer_Isar.setup subsection {* The MESON prover *} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Statespace/state_space.ML --- a/src/HOL/Statespace/state_space.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Statespace/state_space.ML Tue May 04 20:30:22 2010 +0200 @@ -198,7 +198,7 @@ if Variable.is_fixed ctxt name orelse Variable.is_declared ctxt name then let val n' = lookupI (op =) (Variable.fixes_of ctxt) name - in SOME (Free (n',ProofContext.infer_type ctxt n')) end + in SOME (Free (n',ProofContext.infer_type ctxt (n', dummyT))) end else NONE @@ -430,7 +430,7 @@ let fun upd (n,v) = let - val nT = ProofContext.infer_type (Local_Theory.target_of lthy) n + val nT = ProofContext.infer_type (Local_Theory.target_of lthy) (n, dummyT) in Context.proof_map (update_declinfo (Morphism.term phi (Free (n,nT)),v)) end; @@ -440,7 +440,7 @@ fun string_of_typ T = setmp_CRITICAL show_sorts true - (PrintMode.setmp [] (Syntax.string_of_typ (ProofContext.init thy))) T; + (PrintMode.setmp [] (Syntax.string_of_typ (ProofContext.init_global thy))) T; val fixestate = (case state_type of NONE => [] | SOME s => @@ -502,7 +502,7 @@ *) val _ = writeln ("Defining statespace " ^ quote name ^ " ..."); - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun add_parent (Ts,pname,rs) env = let diff -r aace7a969410 -r 8629ac3efb19 src/HOL/TLA/Memory/MemoryParameters.thy --- a/src/HOL/TLA/Memory/MemoryParameters.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/TLA/Memory/MemoryParameters.thy Tue May 04 20:30:22 2010 +0200 @@ -12,7 +12,7 @@ begin (* the memory operations *) -datatype memOp = read Locs | write Locs Vals +datatype memOp = read Locs | "write" Locs Vals consts (* memory locations and contents *) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/ATP_Manager/SystemOnTPTP --- a/src/HOL/Tools/ATP_Manager/SystemOnTPTP Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/ATP_Manager/SystemOnTPTP Tue May 04 20:30:22 2010 +0200 @@ -136,7 +136,7 @@ print $Response->content; exit(0); }else { - print "Remote-script could not extract proof:\n".$Response->content; + print "Remote script could not extract proof:\n".$Response->content; exit(-1); } diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/ATP_Manager/atp_manager.ML --- a/src/HOL/Tools/ATP_Manager/atp_manager.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/ATP_Manager/atp_manager.ML Tue May 04 20:30:22 2010 +0200 @@ -8,6 +8,7 @@ signature ATP_MANAGER = sig + type name_pool = Sledgehammer_HOL_Clause.name_pool type relevance_override = Sledgehammer_Fact_Filter.relevance_override type minimize_command = Sledgehammer_Proof_Reconstruct.minimize_command type params = @@ -21,11 +22,9 @@ relevance_threshold: real, convergence: real, theory_relevant: bool option, - higher_order: bool option, follow_defs: bool, isar_proof: bool, - modulus: int, - sorts: bool, + shrink_factor: int, timeout: Time.time, minimize_timeout: Time.time} type problem = @@ -34,29 +33,32 @@ relevance_override: relevance_override, axiom_clauses: (thm * (string * int)) list option, filtered_clauses: (thm * (string * int)) list option} + datatype failure = + Unprovable | TimedOut | OutOfResources | OldSpass | MalformedOutput | + UnknownError type prover_result = - {success: bool, + {outcome: failure option, message: string, + pool: name_pool option, relevant_thm_names: string list, atp_run_time_in_msecs: int, + output: string, proof: string, internal_thm_names: string Vector.vector, + conjecture_shape: int list list, filtered_clauses: (thm * (string * int)) list} type prover = params -> minimize_command -> Time.time -> problem -> prover_result - val atps: string Unsynchronized.ref - val timeout: int Unsynchronized.ref - val full_types: bool Unsynchronized.ref val kill_atps: unit -> unit val running_atps: unit -> unit val messages: int option -> unit val add_prover: string * prover -> theory -> theory - val get_prover: theory -> string -> prover option + val get_prover: theory -> string -> prover val available_atps: theory -> unit - val sledgehammer: - params -> int -> relevance_override -> (string -> minimize_command) - -> Proof.state -> unit + val start_prover_thread: + params -> Time.time -> Time.time -> int -> int -> relevance_override + -> (string -> minimize_command) -> Proof.state -> string -> unit end; structure ATP_Manager : ATP_MANAGER = @@ -79,11 +81,9 @@ relevance_threshold: real, convergence: real, theory_relevant: bool option, - higher_order: bool option, follow_defs: bool, isar_proof: bool, - modulus: int, - sorts: bool, + shrink_factor: int, timeout: Time.time, minimize_timeout: Time.time} @@ -94,13 +94,20 @@ axiom_clauses: (thm * (string * int)) list option, filtered_clauses: (thm * (string * int)) list option}; +datatype failure = + Unprovable | TimedOut | OutOfResources | OldSpass | MalformedOutput | + UnknownError + type prover_result = - {success: bool, + {outcome: failure option, message: string, + pool: name_pool option, relevant_thm_names: string list, atp_run_time_in_msecs: int, + output: string, proof: string, internal_thm_names: string Vector.vector, + conjecture_shape: int list list, filtered_clauses: (thm * (string * int)) list}; type prover = @@ -112,26 +119,6 @@ val message_store_limit = 20; val message_display_limit = 5; -val atps = Unsynchronized.ref "e spass remote_vampire"; (* set in "ATP_Wrapper" *) -val timeout = Unsynchronized.ref 60; -val full_types = Unsynchronized.ref false; - -val _ = - ProofGeneralPgip.add_preference Preferences.category_proof - (Preferences.string_pref atps - "ATP: provers" "Default automatic provers (separated by whitespace)"); - -val _ = - ProofGeneralPgip.add_preference Preferences.category_proof - (Preferences.int_pref timeout - "ATP: timeout" "ATPs will be interrupted after this time (in seconds)"); - -val _ = - ProofGeneralPgip.add_preference Preferences.category_proof - (Preferences.bool_pref full_types - "ATP: full types" "ATPs will use full type information"); - - (** thread management **) @@ -172,13 +159,13 @@ Synchronized.change global_state (fn state as {manager, timeout_heap, active, cancelling, messages, store} => (case lookup_thread active thread of - SOME (birth_time, _, description) => + SOME (birth_time, _, desc) => let val active' = delete_thread thread active; val now = Time.now () - val cancelling' = (thread, (now, description)) :: cancelling; + val cancelling' = (thread, (now, desc)) :: cancelling; val message' = - description ^ "\n" ^ message ^ + desc ^ "\n" ^ message ^ (if verbose then "Total time: " ^ Int.toString (Time.toMilliseconds (Time.- (now, birth_time))) ^ " ms.\n" @@ -246,7 +233,7 @@ do (Synchronized.timed_access global_state (SOME o time_limit o #timeout_heap) action |> these - |> List.app (unregister params "Timed out."); + |> List.app (unregister params "Timed out.\n"); print_new_messages (); (*give threads some time to respond to interrupt*) OS.Process.sleep min_wait_time) @@ -277,6 +264,8 @@ let val killing = map (fn (th, (_, _, desc)) => (th, (Time.now (), desc))) active; val state' = make_state manager timeout_heap [] (killing @ cancelling) messages store; + val _ = if null active then () + else priority "Killed active Sledgehammer threads." in state' end); @@ -322,7 +311,7 @@ fun err_dup_prover name = error ("Duplicate prover: " ^ quote name ^ "."); -structure Provers = Theory_Data +structure Data = Theory_Data ( type T = (prover * stamp) Symtab.table; val empty = Symtab.empty; @@ -332,60 +321,42 @@ ); fun add_prover (name, prover) thy = - Provers.map (Symtab.update_new (name, (prover, stamp ()))) thy - handle Symtab.DUP name => err_dup_prover name; + Data.map (Symtab.update_new (name, (prover, stamp ()))) thy + handle Symtab.DUP name => err_dup_prover name; fun get_prover thy name = - Option.map #1 (Symtab.lookup (Provers.get thy) name); + case Symtab.lookup (Data.get thy) name of + SOME (prover, _) => prover + | NONE => error ("Unknown ATP: " ^ name) fun available_atps thy = priority ("Available ATPs: " ^ - commas (sort_strings (Symtab.keys (Provers.get thy))) ^ ".") + commas (sort_strings (Symtab.keys (Data.get thy))) ^ ".") (* start prover thread *) -fun start_prover (params as {timeout, ...}) birth_time death_time i - relevance_override minimize_command proof_state name = - (case get_prover (Proof.theory_of proof_state) name of - NONE => warning ("Unknown ATP: " ^ quote name ^ ".") - | SOME prover => +fun start_prover_thread (params as {timeout, ...}) birth_time death_time i n + relevance_override minimize_command proof_state name = + let + val prover = get_prover (Proof.theory_of proof_state) name + val {context = ctxt, facts, goal} = Proof.goal proof_state; + val desc = + "ATP " ^ quote name ^ " for subgoal " ^ string_of_int i ^ ":\n" ^ + Syntax.string_of_term ctxt (Thm.term_of (Thm.cprem_of goal i)); + val _ = Toplevel.thread true (fn () => let - val {context = ctxt, facts, goal} = Proof.goal proof_state; - val n = Logic.count_prems (prop_of goal) - val desc = - "ATP " ^ quote name ^ " for subgoal " ^ string_of_int i ^ ":\n" ^ - Syntax.string_of_term ctxt (Thm.term_of (Thm.cprem_of goal i)); - - val _ = Toplevel.thread true (fn () => - let - val _ = register params birth_time death_time (Thread.self (), desc) - val problem = - {subgoal = i, goal = (ctxt, (facts, goal)), - relevance_override = relevance_override, axiom_clauses = NONE, - filtered_clauses = NONE} - val message = - #message (prover params (minimize_command name) timeout problem) - handle Sledgehammer_HOL_Clause.TRIVIAL => - metis_line i n [] - | ERROR msg => "Error: " ^ msg ^ ".\n"; - val _ = unregister params message (Thread.self ()); - in () end); - in () end); - - -(* Sledgehammer the given subgoal *) - -fun sledgehammer (params as {atps, timeout, ...}) i relevance_override - minimize_command proof_state = - let - val birth_time = Time.now () - val death_time = Time.+ (birth_time, timeout) - val _ = kill_atps () (* RACE w.r.t. other invocations of Sledgehammer *) - val _ = priority "Sledgehammering..." - val _ = List.app (start_prover params birth_time death_time i - relevance_override minimize_command - proof_state) atps + val _ = register params birth_time death_time (Thread.self (), desc) + val problem = + {subgoal = i, goal = (ctxt, (facts, goal)), + relevance_override = relevance_override, axiom_clauses = NONE, + filtered_clauses = NONE} + val message = + #message (prover params (minimize_command name) timeout problem) + handle Sledgehammer_HOL_Clause.TRIVIAL => metis_line i n [] + | ERROR message => "Error: " ^ message ^ "\n" + val _ = unregister params message (Thread.self ()); + in () end) in () end end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/ATP_Manager/atp_minimal.ML --- a/src/HOL/Tools/ATP_Manager/atp_minimal.ML Tue May 04 19:57:55 2010 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,144 +0,0 @@ -(* Title: HOL/Tools/ATP_Manager/atp_minimal.ML - Author: Philipp Meyer, TU Muenchen - -Minimization of theorem list for Metis using automatic theorem provers. -*) - -signature ATP_MINIMAL = -sig - type params = ATP_Manager.params - type prover = ATP_Manager.prover - type prover_result = ATP_Manager.prover_result - - val minimize_theorems : - params -> prover -> string -> int -> Proof.state -> (string * thm list) list - -> (string * thm list) list option * string -end; - -structure ATP_Minimal : ATP_MINIMAL = -struct - -open Sledgehammer_Util -open Sledgehammer_Fact_Preprocessor -open Sledgehammer_Proof_Reconstruct -open ATP_Manager - -(* Linear minimization algorithm *) - -fun linear_minimize test s = - let - fun aux [] p = p - | aux (x :: xs) (needed, result) = - case test (xs @ needed) of - SOME result => aux xs (needed, result) - | NONE => aux xs (x :: needed, result) - in aux s end - - -(* failure check and producing answer *) - -datatype outcome = Success | Failure | Timeout | Error - -val string_of_outcome = - fn Success => "Success" - | Failure => "Failure" - | Timeout => "Timeout" - | Error => "Error" - -val failure_strings = - [("SPASS beiseite: Ran out of time.", Timeout), - ("Timeout", Timeout), - ("time limit exceeded", Timeout), - ("# Cannot determine problem status within resource limit", Timeout), - ("Error", Error)] - -fun outcome_of_result (result as {success, proof, ...} : prover_result) = - if success then - Success - else case get_first (fn (s, outcome) => - if String.isSubstring s proof then SOME outcome - else NONE) failure_strings of - SOME outcome => outcome - | NONE => Failure - -(* wrapper for calling external prover *) - -fun sledgehammer_test_theorems (params as {full_types, ...} : params) prover - timeout subgoal state filtered_clauses name_thms_pairs = - let - val num_theorems = length name_thms_pairs - val _ = priority ("Testing " ^ string_of_int num_theorems ^ - " theorem" ^ plural_s num_theorems ^ "...") - val name_thm_pairs = maps (fn (n, ths) => map (pair n) ths) name_thms_pairs - val axclauses = cnf_rules_pairs (Proof.theory_of state) name_thm_pairs - val {context = ctxt, facts, goal} = Proof.goal state - val problem = - {subgoal = subgoal, goal = (ctxt, (facts, goal)), - relevance_override = {add = [], del = [], only = false}, - axiom_clauses = SOME axclauses, - filtered_clauses = SOME (the_default axclauses filtered_clauses)} - in - `outcome_of_result (prover params (K "") timeout problem) - |>> tap (priority o string_of_outcome) - end - -(* minimalization of thms *) - -fun minimize_theorems (params as {debug, minimize_timeout, isar_proof, modulus, - sorts, ...}) - prover atp_name i state name_thms_pairs = - let - val msecs = Time.toMilliseconds minimize_timeout - val n = length name_thms_pairs - val _ = - priority ("Sledgehammer minimizer: ATP " ^ quote atp_name ^ - " with a time limit of " ^ string_of_int msecs ^ " ms.") - val test_thms_fun = - sledgehammer_test_theorems params prover minimize_timeout i state - fun test_thms filtered thms = - case test_thms_fun filtered thms of - (Success, result) => SOME result - | _ => NONE - - val {context = ctxt, facts, goal} = Proof.goal state; - val n = Logic.count_prems (prop_of goal) - in - (* try prove first to check result and get used theorems *) - (case test_thms_fun NONE name_thms_pairs of - (Success, result as {internal_thm_names, filtered_clauses, ...}) => - let - val used = internal_thm_names |> Vector.foldr (op ::) [] - |> sort_distinct string_ord - val to_use = - if length used < length name_thms_pairs then - filter (fn (name1, _) => List.exists (curry (op =) name1) used) - name_thms_pairs - else name_thms_pairs - val (min_thms, {proof, internal_thm_names, ...}) = - linear_minimize (test_thms (SOME filtered_clauses)) to_use - ([], result) - val n = length min_thms - val _ = priority (cat_lines - ["Minimized: " ^ string_of_int n ^ " theorem" ^ plural_s n] ^ ".") - in - (SOME min_thms, - proof_text isar_proof debug modulus sorts ctxt - (K "", proof, internal_thm_names, goal, i) |> fst) - end - | (Timeout, _) => - (NONE, "Timeout: You can increase the time limit using the \"timeout\" \ - \option (e.g., \"timeout = " ^ - string_of_int (10 + msecs div 1000) ^ " s\").") - | (Error, {message, ...}) => (NONE, "ATP error: " ^ message) - | (Failure, _) => - (* Failure sometimes mean timeout, unfortunately. *) - (NONE, "Failure: No proof was found with the current time limit. You \ - \can increase the time limit using the \"timeout\" \ - \option (e.g., \"timeout = " ^ - string_of_int (10 + msecs div 1000) ^ " s\").")) - handle Sledgehammer_HOL_Clause.TRIVIAL => - (SOME [], metis_line i n []) - | ERROR msg => (NONE, "Error: " ^ msg) - end - -end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/ATP_Manager/atp_systems.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/ATP_Manager/atp_systems.ML Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,426 @@ +(* Title: HOL/Tools/ATP_Manager/atp_systems.ML + Author: Fabian Immler, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Setup for supported ATPs. +*) + +signature ATP_SYSTEMS = +sig + type prover = ATP_Manager.prover + + (* hooks for problem files *) + val dest_dir : string Config.T + val problem_prefix : string Config.T + val measure_runtime : bool Config.T + + val refresh_systems_on_tptp : unit -> unit + val default_atps_param_value : unit -> string + val setup : theory -> theory +end; + +structure ATP_Systems : ATP_SYSTEMS = +struct + +open Sledgehammer_Util +open Sledgehammer_Fact_Preprocessor +open Sledgehammer_HOL_Clause +open Sledgehammer_Fact_Filter +open Sledgehammer_Proof_Reconstruct +open ATP_Manager + +(** generic ATP **) + +(* external problem files *) + +val (dest_dir, dest_dir_setup) = Attrib.config_string "atp_dest_dir" (K ""); + (*Empty string means create files in Isabelle's temporary files directory.*) + +val (problem_prefix, problem_prefix_setup) = + Attrib.config_string "atp_problem_prefix" (K "prob"); + +val (measure_runtime, measure_runtime_setup) = + Attrib.config_bool "atp_measure_runtime" (K false); + + +(* prover configuration *) + +type prover_config = + {home: string, + executable: string, + arguments: Time.time -> string, + proof_delims: (string * string) list, + known_failures: (failure * string) list, + max_axiom_clauses: int, + prefers_theory_relevant: bool}; + + +(* basic template *) + +val remotify = prefix "remote_" + +fun with_path cleanup after f path = + Exn.capture f path + |> tap (fn _ => cleanup path) + |> Exn.release + |> tap (after path) + +(* Splits by the first possible of a list of delimiters. *) +fun extract_proof delims output = + case pairself (find_first (fn s => String.isSubstring s output)) + (ListPair.unzip delims) of + (SOME begin_delim, SOME end_delim) => + (output |> first_field begin_delim |> the |> snd + |> first_field end_delim |> the |> fst + |> first_field "\n" |> the |> snd + handle Option.Option => "") + | _ => "" + +fun extract_proof_and_outcome res_code proof_delims known_failures output = + case map_filter (fn (failure, pattern) => + if String.isSubstring pattern output then SOME failure + else NONE) known_failures of + [] => (case extract_proof proof_delims output of + "" => ("", SOME UnknownError) + | proof => if res_code = 0 then (proof, NONE) + else ("", SOME UnknownError)) + | (failure :: _) => ("", SOME failure) + +fun string_for_failure Unprovable = "The ATP problem is unprovable." + | string_for_failure TimedOut = "Timed out." + | string_for_failure OutOfResources = "The ATP ran out of resources." + | string_for_failure OldSpass = + (* FIXME: Change the error message below to point to the Isabelle download + page once the package is there (around the Isabelle2010 release). *) + "Warning: Sledgehammer requires a more recent version of SPASS with \ + \support for the TPTP syntax. To install it, download and untar the \ + \package \"http://isabelle.in.tum.de/~blanchet/spass-3.7.tgz\" and add the \ + \\"spass-3.7\" directory's full path to \"" ^ + Path.implode (Path.expand (Path.appends + (Path.variable "ISABELLE_HOME_USER" :: + map Path.basic ["etc", "components"]))) ^ + "\" on a line of its own." + | string_for_failure MalformedOutput = "Error: The ATP output is malformed." + | string_for_failure UnknownError = "Error: An unknown ATP error occurred." + +fun shape_of_clauses _ [] = [] + | shape_of_clauses j ([] :: clauses) = [] :: shape_of_clauses j clauses + | shape_of_clauses j ((lit :: lits) :: clauses) = + let val shape = shape_of_clauses (j + 1) (lits :: clauses) in + (j :: hd shape) :: tl shape + end + +fun generic_prover overlord get_facts prepare write_file home executable args + proof_delims known_failures name + ({debug, full_types, explicit_apply, isar_proof, shrink_factor, ...} + : params) minimize_command + ({subgoal, goal, relevance_override, axiom_clauses, filtered_clauses} + : problem) = + let + (* get clauses and prepare them for writing *) + val (ctxt, (chain_ths, th)) = goal; + val thy = ProofContext.theory_of ctxt; + val chain_ths = map (Thm.put_name_hint chained_hint) chain_ths; + val goal_clss = #1 (neg_conjecture_clauses ctxt th subgoal) + val goal_cls = List.concat goal_clss + val the_filtered_clauses = + (case filtered_clauses of + NONE => get_facts relevance_override goal goal_cls + | SOME fcls => fcls); + val the_axiom_clauses = + (case axiom_clauses of + NONE => the_filtered_clauses + | SOME axcls => axcls); + val (internal_thm_names, clauses) = + prepare goal_cls chain_ths the_axiom_clauses the_filtered_clauses thy; + + (* path to unique problem file *) + val the_dest_dir = if overlord then getenv "ISABELLE_HOME_USER" + else Config.get ctxt dest_dir; + val the_problem_prefix = Config.get ctxt problem_prefix; + fun prob_pathname nr = + let + val probfile = + Path.basic ((if overlord then "prob_" ^ name + else the_problem_prefix ^ serial_string ()) + ^ "_" ^ string_of_int nr) + in + if the_dest_dir = "" then File.tmp_path probfile + else if File.exists (Path.explode the_dest_dir) + then Path.append (Path.explode the_dest_dir) probfile + else error ("No such directory: " ^ the_dest_dir ^ ".") + end; + + val command = Path.explode (home ^ "/" ^ executable) + (* write out problem file and call prover *) + fun command_line probfile = + (if Config.get ctxt measure_runtime then + "TIMEFORMAT='%3U'; { time " ^ + space_implode " " [File.shell_path command, args, + File.shell_path probfile] ^ " ; } 2>&1" + else + space_implode " " ["exec", File.shell_path command, args, + File.shell_path probfile, "2>&1"]) ^ + (if overlord then + " | sed 's/,/, /g' \ + \| sed 's/\\([^!=<]\\)\\([=|]\\)\\([^=>]\\)/\\1 \\2 \\3/g' \ + \| sed 's/ / /g' | sed 's/| |/||/g' \ + \| sed 's/ = = =/===/g' \ + \| sed 's/= = /== /g'" + else + "") + fun split_time s = + let + val split = String.tokens (fn c => str c = "\n"); + val (output, t) = s |> split |> split_last |> apfst cat_lines; + fun as_num f = f >> (fst o read_int); + val num = as_num (Scan.many1 Symbol.is_ascii_digit); + val digit = Scan.one Symbol.is_ascii_digit; + val num3 = as_num (digit ::: digit ::: (digit >> single)); + val time = num --| Scan.$$ "." -- num3 >> (fn (a, b) => a * 1000 + b); + val as_time = the_default 0 o Scan.read Symbol.stopper time o explode; + in (output, as_time t) end; + fun split_time' s = + if Config.get ctxt measure_runtime then split_time s else (s, 0) + fun run_on probfile = + if File.exists command then + write_file full_types explicit_apply probfile clauses + |> pair (apfst split_time' (bash_output (command_line probfile))) + else + error ("Bad executable: " ^ Path.implode command ^ "."); + + (* If the problem file has not been exported, remove it; otherwise, export + the proof file too. *) + fun cleanup probfile = + if the_dest_dir = "" then try File.rm probfile else NONE + fun export probfile (((output, _), _), _) = + if the_dest_dir = "" then + () + else + File.write (Path.explode (Path.implode probfile ^ "_proof")) + ((if overlord then + "% " ^ command_line probfile ^ "\n% " ^ timestamp () ^ + "\n" + else + "") ^ output) + + val (((output, atp_run_time_in_msecs), res_code), + (pool, conjecture_offset)) = + with_path cleanup export run_on (prob_pathname subgoal); + val conjecture_shape = shape_of_clauses (conjecture_offset + 1) goal_clss + (* Check for success and print out some information on failure. *) + val (proof, outcome) = + extract_proof_and_outcome res_code proof_delims known_failures output + val (message, relevant_thm_names) = + case outcome of + NONE => + proof_text isar_proof + (pool, debug, shrink_factor, ctxt, conjecture_shape) + (minimize_command, proof, internal_thm_names, th, subgoal) + | SOME failure => (string_for_failure failure ^ "\n", []) + in + {outcome = outcome, message = message, pool = pool, + relevant_thm_names = relevant_thm_names, + atp_run_time_in_msecs = atp_run_time_in_msecs, output = output, + proof = proof, internal_thm_names = internal_thm_names, + conjecture_shape = conjecture_shape, + filtered_clauses = the_filtered_clauses} + end; + + +(* generic TPTP-based provers *) + +fun generic_tptp_prover + (name, {home, executable, arguments, proof_delims, known_failures, + max_axiom_clauses, prefers_theory_relevant}) + (params as {debug, overlord, respect_no_atp, relevance_threshold, + convergence, theory_relevant, follow_defs, isar_proof, ...}) + minimize_command timeout = + generic_prover overlord + (get_relevant_facts respect_no_atp relevance_threshold convergence + follow_defs max_axiom_clauses + (the_default prefers_theory_relevant theory_relevant)) + (prepare_clauses false) + (write_tptp_file (debug andalso overlord)) home + executable (arguments timeout) proof_delims known_failures name params + minimize_command + +fun tptp_prover name p = (name, generic_tptp_prover (name, p)); + + +(** common provers **) + +fun to_generous_secs time = (Time.toMilliseconds time + 999) div 1000 + +(* Vampire *) + +(* Vampire requires an explicit time limit. *) + +val vampire_config : prover_config = + {home = getenv "VAMPIRE_HOME", + executable = "vampire", + arguments = fn timeout => + "--output_syntax tptp --mode casc -t " ^ + string_of_int (to_generous_secs timeout), + proof_delims = + [("=========== Refutation ==========", + "======= End of refutation ======="), + ("% SZS output start Refutation", "% SZS output end Refutation")], + known_failures = + [(Unprovable, "Satisfiability detected"), + (OutOfResources, "CANNOT PROVE"), + (OutOfResources, "Refutation not found")], + max_axiom_clauses = 60, + prefers_theory_relevant = false} +val vampire = tptp_prover "vampire" vampire_config + + +(* E prover *) + +val tstp_proof_delims = + ("# SZS output start CNFRefutation.", "# SZS output end CNFRefutation") + +val e_config : prover_config = + {home = getenv "E_HOME", + executable = "eproof", + arguments = fn timeout => + "--tstp-in --tstp-out -l5 -xAutoDev -tAutoDev --silent --cpu-limit=" ^ + string_of_int (to_generous_secs timeout), + proof_delims = [tstp_proof_delims], + known_failures = + [(Unprovable, "SZS status: Satisfiable"), + (Unprovable, "SZS status Satisfiable"), + (TimedOut, "Failure: Resource limit exceeded (time)"), + (TimedOut, "time limit exceeded"), + (OutOfResources, + "# Cannot determine problem status within resource limit"), + (OutOfResources, "SZS status: ResourceOut"), + (OutOfResources, "SZS status ResourceOut")], + max_axiom_clauses = 100, + prefers_theory_relevant = false} +val e = tptp_prover "e" e_config + + +(* SPASS *) + +fun generic_dfg_prover + (name, {home, executable, arguments, proof_delims, known_failures, + max_axiom_clauses, prefers_theory_relevant}) + (params as {overlord, respect_no_atp, relevance_threshold, convergence, + theory_relevant, follow_defs, ...}) + minimize_command timeout = + generic_prover overlord + (get_relevant_facts respect_no_atp relevance_threshold convergence + follow_defs max_axiom_clauses + (the_default prefers_theory_relevant theory_relevant)) + (prepare_clauses true) write_dfg_file home executable + (arguments timeout) proof_delims known_failures name params + minimize_command + +fun dfg_prover name p = (name, generic_dfg_prover (name, p)) + +(* The "-VarWeight=3" option helps the higher-order problems, probably by + counteracting the presence of "hAPP". *) +val spass_config : prover_config = + {home = getenv "SPASS_HOME", + executable = "SPASS", + arguments = fn timeout => + "-Auto -SOS=1 -PGiven=0 -PProblem=0 -Splits=0 -FullRed=0 -DocProof \ + \-VarWeight=3 -TimeLimit=" ^ string_of_int (to_generous_secs timeout), + proof_delims = [("Here is a proof", "Formulae used in the proof")], + known_failures = + [(Unprovable, "SPASS beiseite: Completion found"), + (TimedOut, "SPASS beiseite: Ran out of time"), + (OutOfResources, "SPASS beiseite: Maximal number of loops exceeded")], + max_axiom_clauses = 40, + prefers_theory_relevant = true} +val spass = dfg_prover "spass" spass_config + +(* SPASS 3.7 supports both the DFG and the TPTP syntax, whereas SPASS 3.0 + supports only the DFG syntax. As soon as all Isabelle repository/snapshot + users have upgraded to 3.7, we can kill "spass" (and all DFG support in + Sledgehammer) and rename "spass_tptp" "spass". *) + +val spass_tptp_config = + {home = #home spass_config, + executable = #executable spass_config, + arguments = prefix "-TPTP " o #arguments spass_config, + proof_delims = #proof_delims spass_config, + known_failures = + #known_failures spass_config @ + [(OldSpass, "unrecognized option `-TPTP'"), + (OldSpass, "Unrecognized option TPTP")], + max_axiom_clauses = #max_axiom_clauses spass_config, + prefers_theory_relevant = #prefers_theory_relevant spass_config} +val spass_tptp = tptp_prover "spass_tptp" spass_tptp_config + +(* remote prover invocation via SystemOnTPTP *) + +val systems = Synchronized.var "atp_systems" ([]: string list); + +fun get_systems () = + case bash_output "\"$ISABELLE_ATP_MANAGER/SystemOnTPTP\" -w" of + (answer, 0) => split_lines answer + | (answer, _) => + error ("Failed to get available systems at SystemOnTPTP:\n" ^ answer) + +fun refresh_systems_on_tptp () = + Synchronized.change systems (fn _ => get_systems ()); + +fun get_system prefix = Synchronized.change_result systems (fn systems => + (if null systems then get_systems () else systems) + |> `(find_first (String.isPrefix prefix))); + +fun the_system prefix = + (case get_system prefix of + NONE => error ("System " ^ quote prefix ^ + " not available at SystemOnTPTP.") + | SOME sys => sys); + +val remote_known_failures = + [(TimedOut, "says Timeout"), + (MalformedOutput, "Remote script could not extract proof")] + +fun remote_prover_config atp_prefix args + ({proof_delims, known_failures, max_axiom_clauses, + prefers_theory_relevant, ...} : prover_config) : prover_config = + {home = getenv "ISABELLE_ATP_MANAGER", + executable = "SystemOnTPTP", + arguments = fn timeout => + args ^ " -t " ^ string_of_int (to_generous_secs timeout) ^ " -s " ^ + the_system atp_prefix, + proof_delims = insert (op =) tstp_proof_delims proof_delims, + known_failures = remote_known_failures @ known_failures, + max_axiom_clauses = max_axiom_clauses, + prefers_theory_relevant = prefers_theory_relevant} + +val remote_vampire = + tptp_prover (remotify (fst vampire)) + (remote_prover_config "Vampire---9" "" vampire_config) + +val remote_e = + tptp_prover (remotify (fst e)) + (remote_prover_config "EP---" "" e_config) + +val remote_spass = + tptp_prover (remotify (fst spass)) + (remote_prover_config "SPASS---" "-x" spass_config) + +fun maybe_remote (name, _) ({home, ...} : prover_config) = + name |> home = "" ? remotify + +fun default_atps_param_value () = + space_implode " " [maybe_remote e e_config, maybe_remote spass spass_config, + remotify (fst vampire)] + +val provers = + [spass, spass_tptp, vampire, e, remote_vampire, remote_spass, remote_e] +val prover_setup = fold add_prover provers + +val setup = + dest_dir_setup + #> problem_prefix_setup + #> measure_runtime_setup + #> prover_setup + +end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/ATP_Manager/atp_wrapper.ML --- a/src/HOL/Tools/ATP_Manager/atp_wrapper.ML Tue May 04 19:57:55 2010 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,384 +0,0 @@ -(* Title: HOL/Tools/ATP_Manager/atp_wrapper.ML - Author: Fabian Immler, TU Muenchen - -Wrapper functions for external ATPs. -*) - -signature ATP_WRAPPER = -sig - type prover = ATP_Manager.prover - - (* hooks for problem files *) - val destdir : string Config.T - val problem_prefix : string Config.T - val measure_runtime : bool Config.T - - val refresh_systems_on_tptp : unit -> unit - val setup : theory -> theory -end; - -structure ATP_Wrapper : ATP_WRAPPER = -struct - -open Sledgehammer_Util -open Sledgehammer_Fact_Preprocessor -open Sledgehammer_HOL_Clause -open Sledgehammer_Fact_Filter -open Sledgehammer_Proof_Reconstruct -open ATP_Manager - -(** generic ATP wrapper **) - -(* external problem files *) - -val (destdir, destdir_setup) = Attrib.config_string "atp_destdir" (K ""); - (*Empty string means create files in Isabelle's temporary files directory.*) - -val (problem_prefix, problem_prefix_setup) = - Attrib.config_string "atp_problem_prefix" (K "prob"); - -val (measure_runtime, measure_runtime_setup) = - Attrib.config_bool "atp_measure_runtime" (K false); - - -(* prover configuration *) - -val remotify = prefix "remote_" - -type prover_config = - {home: string, - executable: string, - arguments: Time.time -> string, - known_failures: (string list * string) list, - max_new_clauses: int, - prefers_theory_relevant: bool}; - - -(* basic template *) - -fun with_path cleanup after f path = - Exn.capture f path - |> tap (fn _ => cleanup path) - |> Exn.release - |> tap (after path); - -fun find_known_failure known_failures proof = - case map_filter (fn (patterns, message) => - if exists (fn pattern => String.isSubstring pattern proof) - patterns then - SOME message - else - NONE) known_failures of - [] => if is_proof_well_formed proof then "" - else "Error: The ATP output is ill-formed." - | (message :: _) => message - -fun generic_prover overlord get_facts prepare write_file home executable args - known_failures name - ({debug, full_types, explicit_apply, isar_proof, modulus, sorts, ...} - : params) minimize_command - ({subgoal, goal, relevance_override, axiom_clauses, filtered_clauses} - : problem) = - let - (* get clauses and prepare them for writing *) - val (ctxt, (chain_ths, th)) = goal; - val thy = ProofContext.theory_of ctxt; - val chain_ths = map (Thm.put_name_hint chained_hint) chain_ths; - val goal_cls = #1 (neg_conjecture_clauses ctxt th subgoal); - val the_filtered_clauses = - (case filtered_clauses of - NONE => get_facts relevance_override goal goal_cls - | SOME fcls => fcls); - val the_axiom_clauses = - (case axiom_clauses of - NONE => the_filtered_clauses - | SOME axcls => axcls); - val (internal_thm_names, clauses) = - prepare goal_cls chain_ths the_axiom_clauses the_filtered_clauses thy; - - (* path to unique problem file *) - val destdir' = if overlord then getenv "ISABELLE_HOME_USER" - else Config.get ctxt destdir; - val problem_prefix' = Config.get ctxt problem_prefix; - fun prob_pathname nr = - let - val probfile = - Path.basic (problem_prefix' ^ - (if overlord then "_" ^ name else serial_string ()) - ^ "_" ^ string_of_int nr) - in - if destdir' = "" then File.tmp_path probfile - else if File.exists (Path.explode destdir') - then Path.append (Path.explode destdir') probfile - else error ("No such directory: " ^ destdir' ^ ".") - end; - - val command = Path.explode (home ^ "/" ^ executable) - (* write out problem file and call prover *) - fun command_line probfile = - (if Config.get ctxt measure_runtime then - "TIMEFORMAT='%3U'; { time " ^ - space_implode " " [File.shell_path command, args, - File.shell_path probfile] ^ " ; } 2>&1" - else - space_implode " " ["exec", File.shell_path command, args, - File.shell_path probfile, "2>&1"]) ^ - (if overlord then - " | sed 's/,/, /g' \ - \| sed 's/\\([^!=]\\)\\([=|]\\)\\([^=]\\)/\\1 \\2 \\3/g' \ - \| sed 's/! =/ !=/g' \ - \| sed 's/ / /g' | sed 's/| |/||/g' \ - \| sed 's/ = = =/===/g' \ - \| sed 's/= = /== /g'" - else - "") - fun split_time s = - let - val split = String.tokens (fn c => str c = "\n"); - val (proof, t) = s |> split |> split_last |> apfst cat_lines; - fun as_num f = f >> (fst o read_int); - val num = as_num (Scan.many1 Symbol.is_ascii_digit); - val digit = Scan.one Symbol.is_ascii_digit; - val num3 = as_num (digit ::: digit ::: (digit >> single)); - val time = num --| Scan.$$ "." -- num3 >> (fn (a, b) => a * 1000 + b); - val as_time = the_default 0 o Scan.read Symbol.stopper time o explode; - in (proof, as_time t) end; - fun split_time' s = - if Config.get ctxt measure_runtime then split_time s else (s, 0) - fun run_on probfile = - if File.exists command then - write_file full_types explicit_apply probfile clauses - |> pair (apfst split_time' (bash_output (command_line probfile))) - else error ("Bad executable: " ^ Path.implode command ^ "."); - - (* If the problem file has not been exported, remove it; otherwise, export - the proof file too. *) - fun cleanup probfile = if destdir' = "" then try File.rm probfile else NONE; - fun export probfile (((proof, _), _), _) = - if destdir' = "" then - () - else - File.write (Path.explode (Path.implode probfile ^ "_proof")) - ((if overlord then - "% " ^ command_line probfile ^ "\n% " ^ timestamp () ^ - "\n" - else - "") ^ proof) - - val (((proof, atp_run_time_in_msecs), rc), _) = - with_path cleanup export run_on (prob_pathname subgoal); - - (* Check for success and print out some information on failure. *) - val failure = find_known_failure known_failures proof; - val success = rc = 0 andalso failure = ""; - val (message, relevant_thm_names) = - if success then - proof_text isar_proof debug modulus sorts ctxt - (minimize_command, proof, internal_thm_names, th, subgoal) - else if failure <> "" then - (failure ^ "\n", []) - else - ("Unknown ATP error: " ^ proof ^ ".\n", []) - in - {success = success, message = message, - relevant_thm_names = relevant_thm_names, - atp_run_time_in_msecs = atp_run_time_in_msecs, proof = proof, - internal_thm_names = internal_thm_names, - filtered_clauses = the_filtered_clauses} - end; - - -(* generic TPTP-based provers *) - -fun generic_tptp_prover - (name, {home, executable, arguments, known_failures, max_new_clauses, - prefers_theory_relevant}) - (params as {debug, overlord, respect_no_atp, relevance_threshold, - convergence, theory_relevant, higher_order, follow_defs, - isar_proof, ...}) - minimize_command timeout = - generic_prover overlord - (get_relevant_facts respect_no_atp relevance_threshold convergence - higher_order follow_defs max_new_clauses - (the_default prefers_theory_relevant theory_relevant)) - (prepare_clauses higher_order false) - (write_tptp_file (debug andalso overlord andalso not isar_proof)) home - executable (arguments timeout) known_failures name params minimize_command - -fun tptp_prover name p = (name, generic_tptp_prover (name, p)); - - -(** common provers **) - -fun generous_to_secs time = (Time.toMilliseconds time + 999) div 1000 - -(* Vampire *) - -(* Vampire requires an explicit time limit. *) - -val vampire_config : prover_config = - {home = getenv "VAMPIRE_HOME", - executable = "vampire", - arguments = (fn timeout => "--output_syntax tptp --mode casc -t " ^ - string_of_int (generous_to_secs timeout)), - known_failures = - [(["Satisfiability detected", "CANNOT PROVE"], - "The ATP problem is unprovable."), - (["Refutation not found"], - "The ATP failed to determine the problem's status.")], - max_new_clauses = 60, - prefers_theory_relevant = false} -val vampire = tptp_prover "vampire" vampire_config - - -(* E prover *) - -val e_config : prover_config = - {home = getenv "E_HOME", - executable = "eproof", - arguments = (fn timeout => "--tstp-in --tstp-out -l5 -xAutoDev \ - \-tAutoDev --silent --cpu-limit=" ^ - string_of_int (generous_to_secs timeout)), - known_failures = - [(["SZS status: Satisfiable", "SZS status Satisfiable"], - "The ATP problem is unprovable."), - (["SZS status: ResourceOut", "SZS status ResourceOut"], - "The ATP ran out of resources."), - (["# Cannot determine problem status"], - "The ATP failed to determine the problem's status.")], - max_new_clauses = 100, - prefers_theory_relevant = false} -val e = tptp_prover "e" e_config - - -(* SPASS *) - -fun generic_dfg_prover - (name, {home, executable, arguments, known_failures, max_new_clauses, - prefers_theory_relevant}) - (params as {overlord, respect_no_atp, relevance_threshold, convergence, - theory_relevant, higher_order, follow_defs, ...}) - minimize_command timeout = - generic_prover overlord - (get_relevant_facts respect_no_atp relevance_threshold convergence - higher_order follow_defs max_new_clauses - (the_default prefers_theory_relevant theory_relevant)) - (prepare_clauses higher_order true) write_dfg_file home executable - (arguments timeout) known_failures name params minimize_command - -fun dfg_prover name p = (name, generic_dfg_prover (name, p)) - -(* The "-VarWeight=3" option helps the higher-order problems, probably by - counteracting the presence of "hAPP". *) -val spass_config : prover_config = - {home = getenv "SPASS_HOME", - executable = "SPASS", - arguments = (fn timeout => "-Auto -SOS=1 -PGiven=0 -PProblem=0 -Splits=0" ^ - " -FullRed=0 -DocProof -VarWeight=3 -TimeLimit=" ^ - string_of_int (generous_to_secs timeout)), - known_failures = - [(["SPASS beiseite: Completion found."], "The ATP problem is unprovable."), - (["SPASS beiseite: Ran out of time."], "The ATP timed out."), - (["SPASS beiseite: Maximal number of loops exceeded."], - "The ATP hit its loop limit.")], - max_new_clauses = 40, - prefers_theory_relevant = true} -val spass = dfg_prover "spass" spass_config - -(* SPASS 3.7 supports both the DFG and the TPTP syntax, whereas SPASS 3.0 - supports only the DFG syntax. As soon as all Isabelle repository/snapshot - users have upgraded to 3.7, we can kill "spass" (and all DFG support in - Sledgehammer) and rename "spass_tptp" "spass". *) - -(* FIXME: Change the error message below to point to the Isabelle download - page once the package is there (around the Isabelle2010 release). *) - -val spass_tptp_config = - {home = #home spass_config, - executable = #executable spass_config, - arguments = prefix "-TPTP " o #arguments spass_config, - known_failures = - #known_failures spass_config @ - [(["unrecognized option `-TPTP'", "Unrecognized option TPTP"], - "Warning: Sledgehammer requires a more recent version of SPASS with \ - \support for the TPTP syntax. To install it, download and untar the \ - \package \"http://isabelle.in.tum.de/~blanchet/spass-3.7.tgz\" and add \ - \the \"spass-3.7\" directory's full path to \"" ^ - Path.implode (Path.expand (Path.appends - (Path.variable "ISABELLE_HOME_USER" :: - map Path.basic ["etc", "components"]))) ^ - "\" on a line of its own.")], - max_new_clauses = #max_new_clauses spass_config, - prefers_theory_relevant = #prefers_theory_relevant spass_config} -val spass_tptp = tptp_prover "spass_tptp" spass_tptp_config - -(* remote prover invocation via SystemOnTPTP *) - -val systems = Synchronized.var "atp_wrapper_systems" ([]: string list); - -fun get_systems () = - let - val (answer, rc) = bash_output "\"$ISABELLE_ATP_MANAGER/SystemOnTPTP\" -w" - in - if rc <> 0 then - error ("Failed to get available systems at SystemOnTPTP:\n" ^ answer) - else - split_lines answer - end; - -fun refresh_systems_on_tptp () = - Synchronized.change systems (fn _ => get_systems ()); - -fun get_system prefix = Synchronized.change_result systems (fn systems => - (if null systems then get_systems () else systems) - |> `(find_first (String.isPrefix prefix))); - -fun the_system prefix = - (case get_system prefix of - NONE => error ("System " ^ quote prefix ^ " not available at SystemOnTPTP") - | SOME sys => sys); - -val remote_known_failures = - [(["Remote-script could not extract proof"], - "Error: The remote ATP proof is ill-formed.")] - -fun remote_prover_config prover_prefix args - ({known_failures, max_new_clauses, prefers_theory_relevant, ...} - : prover_config) : prover_config = - {home = getenv "ISABELLE_ATP_MANAGER", - executable = "SystemOnTPTP", - arguments = (fn timeout => - args ^ " -t " ^ string_of_int (generous_to_secs timeout) ^ " -s " ^ - the_system prover_prefix), - known_failures = remote_known_failures @ known_failures, - max_new_clauses = max_new_clauses, - prefers_theory_relevant = prefers_theory_relevant} - -val remote_vampire = - tptp_prover (remotify (fst vampire)) - (remote_prover_config "Vampire---9" "" vampire_config) - -val remote_e = - tptp_prover (remotify (fst e)) - (remote_prover_config "EP---" "" e_config) - -val remote_spass = - tptp_prover (remotify (fst spass)) - (remote_prover_config "SPASS---" "-x" spass_config) - -val provers = - [spass, spass_tptp, vampire, e, remote_vampire, remote_spass, remote_e] -val prover_setup = fold add_prover provers - -val setup = - destdir_setup - #> problem_prefix_setup - #> measure_runtime_setup - #> prover_setup; - -fun maybe_remote (name, _) ({home, ...} : prover_config) = - name |> home = "" ? remotify - -val _ = atps := ([maybe_remote e e_config, maybe_remote spass spass_config, - remotify (fst vampire)] |> space_implode " ") -end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Datatype/datatype_codegen.ML --- a/src/HOL/Tools/Datatype/datatype_codegen.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Datatype/datatype_codegen.ML Tue May 04 20:30:22 2010 +0200 @@ -100,7 +100,7 @@ val def' = Syntax.check_term lthy def; val ((_, (_, thm)), lthy') = Specification.definition (NONE, (Attrib.empty_binding, def')) lthy; - val ctxt_thy = ProofContext.init (ProofContext.theory_of lthy); + val ctxt_thy = ProofContext.init_global (ProofContext.theory_of lthy); val thm' = singleton (ProofContext.export lthy' ctxt_thy) thm; in (thm', lthy') end; fun tac thms = Class.intro_classes_tac [] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Datatype/datatype_data.ML --- a/src/HOL/Tools/Datatype/datatype_data.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Datatype/datatype_data.ML Tue May 04 20:30:22 2010 +0200 @@ -165,7 +165,7 @@ fun read_typ thy str sorts = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy |> fold (Variable.declare_typ o TFree) sorts; val T = Syntax.read_typ ctxt str; in (T, Term.add_tfreesT T sorts) end; @@ -316,7 +316,7 @@ val (splits, thy9) = Datatype_Abs_Proofs.prove_split_thms config new_type_names descr sorts inject distinct exhaust case_rewrites thy8; - val inducts = Project_Rule.projections (ProofContext.init thy2) induct; + val inducts = Project_Rule.projections (ProofContext.init_global thy2) induct; val dt_infos = map_index (make_dt_info alt_names flat_descr sorts induct inducts rec_names rec_rewrites) (hd descr ~~ inject ~~ distinct ~~ exhaust ~~ nchotomys ~~ @@ -342,8 +342,8 @@ ((Binding.empty, map (fn th => th RS notE) (flat distinct)), [Classical.safe_elim NONE]), ((Binding.empty, weak_case_congs), [Simplifier.attrib (op addcongs)]), - ((Binding.empty, flat (distinct @ inject)), [Induct.add_simp_rule])] - @ named_rules @ unnamed_rules) + ((Binding.empty, flat (distinct @ inject)), [Induct.induct_simp_add])] @ + named_rules @ unnamed_rules) |> snd |> add_case_tr' case_names |> register dt_infos @@ -435,8 +435,8 @@ end; in thy - |> ProofContext.init - |> Proof.theorem_i NONE after_qed' ((map o map) (rpair []) (flat rules)) + |> ProofContext.init_global + |> Proof.theorem NONE after_qed' ((map o map) (rpair []) (flat rules)) end; val rep_datatype = gen_rep_datatype Sign.cert_term; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/fun.ML --- a/src/HOL/Tools/Function/fun.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/fun.ML Tue May 04 20:30:22 2010 +0200 @@ -7,12 +7,12 @@ signature FUNCTION_FUN = sig - val add_fun : Function_Common.function_config -> - (binding * typ option * mixfix) list -> (Attrib.binding * term) list -> - bool -> local_theory -> Proof.context - val add_fun_cmd : Function_Common.function_config -> - (binding * string option * mixfix) list -> (Attrib.binding * string) list -> - bool -> local_theory -> Proof.context + val add_fun : (binding * typ option * mixfix) list -> + (Attrib.binding * term) list -> Function_Common.function_config -> + local_theory -> Proof.context + val add_fun_cmd : (binding * string option * mixfix) list -> + (Attrib.binding * string) list -> Function_Common.function_config -> + local_theory -> Proof.context val setup : theory -> theory end @@ -56,15 +56,6 @@ () end -val by_pat_completeness_auto = - Proof.global_future_terminal_proof - (Method.Basic Pat_Completeness.pat_completeness, - SOME (Method.Source_i (Args.src (("HOL.auto", []), Position.none)))) - -fun termination_by method int = - Function.termination_proof NONE - #> Proof.global_future_terminal_proof (Method.Basic method, NONE) int - fun mk_catchall fixes arity_of = let fun mk_eqn ((fname, fT), _) = @@ -148,24 +139,32 @@ val fun_config = FunctionConfig { sequential=true, default="%x. undefined" (*FIXME dynamic scoping*), domintros=false, partials=false, tailrec=false } -fun gen_fun add config fixes statements int lthy = - lthy - |> add fixes statements config - |> by_pat_completeness_auto int - |> Local_Theory.restore - |> termination_by (Function_Common.get_termination_prover lthy) int +fun gen_add_fun add fixes statements config lthy = + let + fun pat_completeness_auto ctxt = + Pat_Completeness.pat_completeness_tac ctxt 1 + THEN auto_tac (clasimpset_of ctxt) + fun prove_termination lthy = + Function.prove_termination NONE + (Function_Common.get_termination_prover lthy lthy) lthy + in + lthy + |> add fixes statements config pat_completeness_auto |> snd + |> Local_Theory.restore + |> prove_termination |> snd + end -val add_fun = gen_fun Function.add_function -val add_fun_cmd = gen_fun Function.add_function_cmd +val add_fun = gen_add_fun Function.add_function +val add_fun_cmd = gen_add_fun Function.add_function_cmd local structure P = OuterParse and K = OuterKeyword in val _ = - OuterSyntax.local_theory' "fun" "define general recursive functions (short version)" K.thy_decl + OuterSyntax.local_theory "fun" "define general recursive functions (short version)" K.thy_decl (function_parser fun_config - >> (fn ((config, fixes), statements) => add_fun_cmd config fixes statements)); + >> (fn ((config, fixes), statements) => add_fun_cmd fixes statements config)); end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/function.ML --- a/src/HOL/Tools/Function/function.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/function.ML Tue May 04 20:30:22 2010 +0200 @@ -11,14 +11,27 @@ val add_function: (binding * typ option * mixfix) list -> (Attrib.binding * term) list -> Function_Common.function_config -> - local_theory -> Proof.state + (Proof.context -> tactic) -> local_theory -> info * local_theory val add_function_cmd: (binding * string option * mixfix) list -> (Attrib.binding * string) list -> Function_Common.function_config -> + (Proof.context -> tactic) -> local_theory -> info * local_theory + + val function: (binding * typ option * mixfix) list -> + (Attrib.binding * term) list -> Function_Common.function_config -> local_theory -> Proof.state - val termination_proof : term option -> local_theory -> Proof.state - val termination_proof_cmd : string option -> local_theory -> Proof.state + val function_cmd: (binding * string option * mixfix) list -> + (Attrib.binding * string) list -> Function_Common.function_config -> + local_theory -> Proof.state + + val prove_termination: term option -> tactic -> local_theory -> + info * local_theory + val prove_termination_cmd: string option -> tactic -> local_theory -> + info * local_theory + + val termination : term option -> local_theory -> Proof.state + val termination_cmd : string option -> local_theory -> Proof.state val setup : theory -> theory val get_congs : Proof.context -> thm list @@ -65,7 +78,7 @@ (saved_simps, fold2 add_for_f fnames simps_by_f lthy) end -fun gen_add_function is_external prep default_constraint fixspec eqns config lthy = +fun prepare_function is_external prep default_constraint fixspec eqns config lthy = let val constrn_fxs = map (fn (b, T, mx) => (b, SOME (the_default default_constraint T), mx)) val ((fixes0, spec0), ctxt') = prep (constrn_fxs fixspec) eqns lthy @@ -76,7 +89,7 @@ val defname = mk_defname fixes val FunctionConfig {partials, ...} = config - val ((goalstate, cont), lthy) = + val ((goal_state, cont), lthy') = Function_Mutual.prepare_function_mutual config defname fixes eqs lthy fun afterqed [[proof]] lthy = @@ -115,20 +128,45 @@ if not is_external then () else Specification.print_consts lthy (K false) (map fst fixes) in - lthy - |> Local_Theory.declaration false (add_function_data o morph_function_data info) + (info, + lthy |> Local_Theory.declaration false (add_function_data o morph_function_data info)) end in - lthy - |> Proof.theorem_i NONE afterqed [[(Logic.unprotect (concl_of goalstate), [])]] - |> Proof.refine (Method.primitive_text (fn _ => goalstate)) |> Seq.hd + ((goal_state, afterqed), lthy') + end + +fun gen_add_function is_external prep default_constraint fixspec eqns config tac lthy = + let + val ((goal_state, afterqed), lthy') = + prepare_function is_external prep default_constraint fixspec eqns config lthy + val pattern_thm = + case SINGLE (tac lthy') goal_state of + NONE => error "pattern completeness and compatibility proof failed" + | SOME st => Goal.finish lthy' st + in + lthy' + |> afterqed [[pattern_thm]] end val add_function = gen_add_function false Specification.check_spec (TypeInfer.anyT HOLogic.typeS) val add_function_cmd = gen_add_function true Specification.read_spec "_::type" - -fun gen_termination_proof prep_term raw_term_opt lthy = + +fun gen_function is_external prep default_constraint fixspec eqns config lthy = + let + val ((goal_state, afterqed), lthy') = + prepare_function is_external prep default_constraint fixspec eqns config lthy + in + lthy' + |> Proof.theorem NONE (snd oo afterqed) [[(Logic.unprotect (concl_of goal_state), [])]] + |> Proof.refine (Method.primitive_text (K goal_state)) |> Seq.hd + end + +val function = + gen_function false Specification.check_spec (TypeInfer.anyT HOLogic.typeS) +val function_cmd = gen_function true Specification.read_spec "_::type" + +fun prepare_termination_proof prep_term raw_term_opt lthy = let val term_opt = Option.map (prep_term lthy) raw_term_opt val info = the (case term_opt of @@ -159,16 +197,38 @@ ((qualify "induct", [Attrib.internal (K (Rule_Cases.case_names case_names))]), tinduct) - |-> (fn (simps, (_, inducts)) => + |-> (fn (simps, (_, inducts)) => fn lthy => let val info' = { is_partial=false, defname=defname, add_simps=add_simps, case_names=case_names, fs=fs, R=R, psimps=psimps, pinducts=pinducts, simps=SOME simps, inducts=SOME inducts, termination=termination } in - Local_Theory.declaration false (add_function_data o morph_function_data info') - #> Spec_Rules.add Spec_Rules.Equational (fs, tsimps) + (info', + lthy + |> Local_Theory.declaration false (add_function_data o morph_function_data info') + |> Spec_Rules.add Spec_Rules.Equational (fs, tsimps)) end) end in + (goal, afterqed, termination) + end + +fun gen_prove_termination prep_term raw_term_opt tac lthy = + let + val (goal, afterqed, termination) = + prepare_termination_proof prep_term raw_term_opt lthy + + val totality = Goal.prove lthy [] [] goal (K tac) + in + afterqed [[totality]] lthy +end + +val prove_termination = gen_prove_termination Syntax.check_term +val prove_termination_cmd = gen_prove_termination Syntax.read_term + +fun gen_termination prep_term raw_term_opt lthy = + let + val (goal, afterqed, termination) = prepare_termination_proof prep_term raw_term_opt lthy + in lthy |> ProofContext.note_thmss "" [((Binding.empty, [Context_Rules.rule_del]), [([allI], [])])] |> snd @@ -177,11 +237,11 @@ |> ProofContext.note_thmss "" [((Binding.name "termination", [Context_Rules.intro_bang (SOME 0)]), [([Goal.norm_result termination], [])])] |> snd - |> Proof.theorem_i NONE afterqed [[(goal, [])]] + |> Proof.theorem NONE (snd oo afterqed) [[(goal, [])]] end -val termination_proof = gen_termination_proof Syntax.check_term -val termination_proof_cmd = gen_termination_proof Syntax.read_term +val termination = gen_termination Syntax.check_term +val termination_cmd = gen_termination Syntax.read_term (* Datatype hook to declare datatype congs as "function_congs" *) @@ -221,11 +281,11 @@ val _ = OuterSyntax.local_theory_to_proof "function" "define general recursive functions" K.thy_goal (function_parser default_config - >> (fn ((config, fixes), statements) => add_function_cmd fixes statements config)) + >> (fn ((config, fixes), statements) => function_cmd fixes statements config)) val _ = OuterSyntax.local_theory_to_proof "termination" "prove termination of a recursive function" K.thy_goal - (Scan.option P.term >> termination_proof_cmd) + (Scan.option P.term >> termination_cmd) end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/function_common.ML --- a/src/HOL/Tools/Function/function_common.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/function_common.ML Tue May 04 20:30:22 2010 +0200 @@ -172,7 +172,7 @@ structure TerminationProver = Generic_Data ( - type T = Proof.context -> Proof.method + type T = Proof.context -> tactic val empty = (fn _ => error "Termination prover not configured") val extend = I fun merge (a, b) = b (* FIXME ? *) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/lexicographic_order.ML --- a/src/HOL/Tools/Function/lexicographic_order.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/lexicographic_order.ML Tue May 04 20:30:22 2010 +0200 @@ -225,6 +225,6 @@ Method.setup @{binding lexicographic_order} (Method.sections clasimp_modifiers >> (K lexicographic_order)) "termination prover for lexicographic orderings" - #> Context.theory_map (Function_Common.set_termination_prover lexicographic_order) + #> Context.theory_map (Function_Common.set_termination_prover (lexicographic_order_tac false)) end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/relation.ML --- a/src/HOL/Tools/Function/relation.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/relation.ML Tue May 04 20:30:22 2010 +0200 @@ -14,19 +14,20 @@ structure Function_Relation : FUNCTION_RELATION = struct -fun inst_thm ctxt rel st = +fun inst_state_tac ctxt rel st = let val cert = Thm.cterm_of (ProofContext.theory_of ctxt) val rel' = cert (singleton (Variable.polymorphic ctxt) rel) val st' = Thm.incr_indexes (#maxidx (Thm.rep_cterm rel') + 1) st - val Rvar = cert (Var (the_single (Term.add_vars (prop_of st') []))) - in - Drule.cterm_instantiate [(Rvar, rel')] st' + in case Term.add_vars (prop_of st') [] of + [v] => + PRIMITIVE (Drule.cterm_instantiate [(cert (Var v), rel')]) st' + | _ => Seq.empty end fun relation_tac ctxt rel i = TRY (Function_Common.apply_termination_rule ctxt i) - THEN PRIMITIVE (inst_thm ctxt rel) + THEN inst_state_tac ctxt rel val setup = Method.setup @{binding relation} diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/scnp_reconstruct.ML --- a/src/HOL/Tools/Function/scnp_reconstruct.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/scnp_reconstruct.ML Tue May 04 20:30:22 2010 +0200 @@ -10,7 +10,7 @@ val sizechange_tac : Proof.context -> tactic -> tactic - val decomp_scnp : ScnpSolve.label list -> Proof.context -> Proof.method + val decomp_scnp_tac : ScnpSolve.label list -> Proof.context -> tactic val setup : theory -> theory @@ -396,13 +396,12 @@ fun sizechange_tac ctxt autom_tac = gen_sizechange_tac [MAX, MS, MIN] autom_tac ctxt (K (K all_tac)) -fun decomp_scnp orders ctxt = +fun decomp_scnp_tac orders ctxt = let val extra_simps = Function_Common.Termination_Simps.get ctxt val autom_tac = auto_tac (clasimpset_of ctxt addsimps2 extra_simps) in - SIMPLE_METHOD - (gen_sizechange_tac orders autom_tac ctxt (print_error ctxt)) + gen_sizechange_tac orders autom_tac ctxt (print_error ctxt) end @@ -416,7 +415,8 @@ || Scan.succeed [MAX, MS, MIN] val setup = Method.setup @{binding size_change} - (Scan.lift orders --| Method.sections clasimp_modifiers >> decomp_scnp) + (Scan.lift orders --| Method.sections clasimp_modifiers >> + (fn orders => SIMPLE_METHOD o decomp_scnp_tac orders)) "termination prover with graph decomposition and the NP subset of size change termination" end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Function/size.ML --- a/src/HOL/Tools/Function/size.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Function/size.ML Tue May 04 20:30:22 2010 +0200 @@ -133,7 +133,7 @@ val (thm, lthy') = lthy |> Local_Theory.define ((Binding.name c, NoSyn), ((Binding.name def_name, []), rhs)) |-> (fn (t, (_, thm)) => Spec_Rules.add Spec_Rules.Equational ([t], [thm]) #> pair thm); - val ctxt_thy = ProofContext.init (ProofContext.theory_of lthy'); + val ctxt_thy = ProofContext.init_global (ProofContext.theory_of lthy'); val thm' = singleton (ProofContext.export lthy' ctxt_thy) thm; in (thm', lthy') end; @@ -152,7 +152,7 @@ ||> Class.prove_instantiation_instance (K (Class.intro_classes_tac [])) ||> Local_Theory.exit_global; - val ctxt = ProofContext.init thy'; + val ctxt = ProofContext.init_global thy'; val simpset1 = HOL_basic_ss addsimps @{thm Nat.add_0} :: @{thm Nat.add_0_right} :: size_def_thms @ size_def_thms' @ rec_rewrites @ extra_rewrites; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/HISTORY --- a/src/HOL/Tools/Nitpick/HISTORY Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/HISTORY Tue May 04 20:30:22 2010 +0200 @@ -16,7 +16,9 @@ * Fixed soundness bug related to higher-order constructors * Added cache to speed up repeated Kodkod invocations on the same problems * Renamed "MiniSatJNI", "zChaffJNI", "BerkMinAlloy", and "SAT4JLight" to - "MiniSat_JNI", "zChaff_JNI", "BerkMin_Alloy", and "SAT4J_Light" + "MiniSat_JNI", "zChaff_JNI", "BerkMin_Alloy", and "SAT4J_Light" + * Removed "skolemize", "uncurry", "sym_break", "flatten_prop", + "sharing_depth", and "show_skolems" options Version 2009-1 diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/kodkod.ML --- a/src/HOL/Tools/Nitpick/kodkod.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/kodkod.ML Tue May 04 20:30:22 2010 +0200 @@ -120,11 +120,10 @@ AssignRelReg of n_ary_index * rel_expr | AssignIntReg of int * int_expr - type 'a fold_expr_funcs = { - formula_func: formula -> 'a -> 'a, - rel_expr_func: rel_expr -> 'a -> 'a, - int_expr_func: int_expr -> 'a -> 'a - } + type 'a fold_expr_funcs = + {formula_func: formula -> 'a -> 'a, + rel_expr_func: rel_expr -> 'a -> 'a, + int_expr_func: int_expr -> 'a -> 'a} val fold_formula : 'a fold_expr_funcs -> formula -> 'a -> 'a val fold_rel_expr : 'a fold_expr_funcs -> rel_expr -> 'a -> 'a @@ -132,10 +131,9 @@ val fold_decl : 'a fold_expr_funcs -> decl -> 'a -> 'a val fold_expr_assign : 'a fold_expr_funcs -> expr_assign -> 'a -> 'a - type 'a fold_tuple_funcs = { - tuple_func: tuple -> 'a -> 'a, - tuple_set_func: tuple_set -> 'a -> 'a - } + type 'a fold_tuple_funcs = + {tuple_func: tuple -> 'a -> 'a, + tuple_set_func: tuple_set -> 'a -> 'a} val fold_tuple : 'a fold_tuple_funcs -> tuple -> 'a -> 'a val fold_tuple_set : 'a fold_tuple_funcs -> tuple_set -> 'a -> 'a @@ -144,15 +142,15 @@ 'a fold_expr_funcs -> 'a fold_tuple_funcs -> bound -> 'a -> 'a val fold_int_bound : 'a fold_tuple_funcs -> int_bound -> 'a -> 'a - type problem = { - comment: string, - settings: setting list, - univ_card: int, - tuple_assigns: tuple_assign list, - bounds: bound list, - int_bounds: int_bound list, - expr_assigns: expr_assign list, - formula: formula} + type problem = + {comment: string, + settings: setting list, + univ_card: int, + tuple_assigns: tuple_assign list, + bounds: bound list, + int_bounds: int_bound list, + expr_assigns: expr_assign list, + formula: formula} type raw_bound = n_ary_index * int list list @@ -291,15 +289,15 @@ AssignRelReg of n_ary_index * rel_expr | AssignIntReg of int * int_expr -type problem = { - comment: string, - settings: setting list, - univ_card: int, - tuple_assigns: tuple_assign list, - bounds: bound list, - int_bounds: int_bound list, - expr_assigns: expr_assign list, - formula: formula} +type problem = + {comment: string, + settings: setting list, + univ_card: int, + tuple_assigns: tuple_assign list, + bounds: bound list, + int_bounds: int_bound list, + expr_assigns: expr_assign list, + formula: formula} type raw_bound = n_ary_index * int list list @@ -313,15 +311,13 @@ exception SYNTAX of string * string -type 'a fold_expr_funcs = { - formula_func: formula -> 'a -> 'a, - rel_expr_func: rel_expr -> 'a -> 'a, - int_expr_func: int_expr -> 'a -> 'a -} +type 'a fold_expr_funcs = + {formula_func: formula -> 'a -> 'a, + rel_expr_func: rel_expr -> 'a -> 'a, + int_expr_func: int_expr -> 'a -> 'a} (** Auxiliary functions on ML representation of Kodkod problems **) -(* 'a fold_expr_funcs -> formula -> 'a -> 'a *) fun fold_formula (F : 'a fold_expr_funcs) formula = case formula of All (ds, f) => fold (fold_decl F) ds #> fold_formula F f @@ -354,7 +350,6 @@ | False => #formula_func F formula | True => #formula_func F formula | FormulaReg _ => #formula_func F formula -(* 'a fold_expr_funcs -> rel_expr -> 'a -> 'a *) and fold_rel_expr F rel_expr = case rel_expr of RelLet (bs, r) => fold (fold_expr_assign F) bs #> fold_rel_expr F r @@ -383,7 +378,6 @@ | Rel _ => #rel_expr_func F rel_expr | Var _ => #rel_expr_func F rel_expr | RelReg _ => #rel_expr_func F rel_expr -(* 'a fold_expr_funcs -> int_expr -> 'a -> 'a *) and fold_int_expr F int_expr = case int_expr of Sum (ds, i) => fold (fold_decl F) ds #> fold_int_expr F i @@ -409,7 +403,6 @@ | Signum i => fold_int_expr F i | Num _ => #int_expr_func F int_expr | IntReg _ => #int_expr_func F int_expr -(* 'a fold_expr_funcs -> decl -> 'a -> 'a *) and fold_decl F decl = case decl of DeclNo (x, r) => fold_rel_expr F (Var x) #> fold_rel_expr F r @@ -417,21 +410,17 @@ | DeclOne (x, r) => fold_rel_expr F (Var x) #> fold_rel_expr F r | DeclSome (x, r) => fold_rel_expr F (Var x) #> fold_rel_expr F r | DeclSet (x, r) => fold_rel_expr F (Var x) #> fold_rel_expr F r -(* 'a fold_expr_funcs -> expr_assign -> 'a -> 'a *) and fold_expr_assign F assign = case assign of AssignFormulaReg (x, f) => fold_formula F (FormulaReg x) #> fold_formula F f | AssignRelReg (x, r) => fold_rel_expr F (RelReg x) #> fold_rel_expr F r | AssignIntReg (x, i) => fold_int_expr F (IntReg x) #> fold_int_expr F i -type 'a fold_tuple_funcs = { - tuple_func: tuple -> 'a -> 'a, - tuple_set_func: tuple_set -> 'a -> 'a -} +type 'a fold_tuple_funcs = + {tuple_func: tuple -> 'a -> 'a, + tuple_set_func: tuple_set -> 'a -> 'a} -(* 'a fold_tuple_funcs -> tuple -> 'a -> 'a *) fun fold_tuple (F : 'a fold_tuple_funcs) = #tuple_func F -(* 'a fold_tuple_funcs -> tuple_set -> 'a -> 'a *) fun fold_tuple_set F tuple_set = case tuple_set of TupleUnion (ts1, ts2) => fold_tuple_set F ts1 #> fold_tuple_set F ts2 @@ -444,23 +433,18 @@ | TupleArea (t1, t2) => fold_tuple F t1 #> fold_tuple F t2 | TupleAtomSeq _ => #tuple_set_func F tuple_set | TupleSetReg _ => #tuple_set_func F tuple_set -(* 'a fold_tuple_funcs -> tuple_assign -> 'a -> 'a *) fun fold_tuple_assign F assign = case assign of AssignTuple (x, t) => fold_tuple F (TupleReg x) #> fold_tuple F t | AssignTupleSet (x, ts) => fold_tuple_set F (TupleSetReg x) #> fold_tuple_set F ts -(* 'a fold_expr_funcs -> 'a fold_tuple_funcs -> bound -> 'a -> 'a *) fun fold_bound expr_F tuple_F (zs, tss) = fold (fold_rel_expr expr_F) (map (Rel o fst) zs) #> fold (fold_tuple_set tuple_F) tss -(* 'a fold_tuple_funcs -> int_bound -> 'a -> 'a *) fun fold_int_bound F (_, tss) = fold (fold_tuple_set F) tss -(* int -> int *) fun max_arity univ_card = floor (Math.ln 2147483647.0 / Math.ln (Real.fromInt univ_card)) -(* rel_expr -> int *) fun arity_of_rel_expr (RelLet (_, r)) = arity_of_rel_expr r | arity_of_rel_expr (RelIf (_, r1, _)) = arity_of_rel_expr r1 | arity_of_rel_expr (Union (r1, _)) = arity_of_rel_expr r1 @@ -487,23 +471,18 @@ | arity_of_rel_expr (Rel (n, _)) = n | arity_of_rel_expr (Var (n, _)) = n | arity_of_rel_expr (RelReg (n, _)) = n -(* rel_expr -> rel_expr -> int *) and sum_arities_of_rel_exprs r1 r2 = arity_of_rel_expr r1 + arity_of_rel_expr r2 -(* decl -> int *) and arity_of_decl (DeclNo ((n, _), _)) = n | arity_of_decl (DeclLone ((n, _), _)) = n | arity_of_decl (DeclOne ((n, _), _)) = n | arity_of_decl (DeclSome ((n, _), _)) = n | arity_of_decl (DeclSet ((n, _), _)) = n -(* problem -> bool *) fun is_problem_trivially_false ({formula = False, ...} : problem) = true | is_problem_trivially_false _ = false -(* string -> string list *) val chop_solver = take 2 o space_explode "," -(* setting list * setting list -> bool *) fun settings_equivalent ([], []) = true | settings_equivalent ((key1, value1) :: settings1, (key2, value2) :: settings2) = @@ -513,7 +492,6 @@ settings_equivalent (settings1, settings2) | settings_equivalent _ = false -(* problem * problem -> bool *) fun problems_equivalent (p1 : problem, p2 : problem) = #univ_card p1 = #univ_card p2 andalso #formula p1 = #formula p2 andalso @@ -525,16 +503,13 @@ (** Serialization of problem **) -(* int -> string *) fun base_name j = if j < 0 then string_of_int (~j - 1) ^ "'" else string_of_int j -(* n_ary_index -> string -> string -> string -> string *) fun n_ary_name (1, j) prefix _ _ = prefix ^ base_name j | n_ary_name (2, j) _ prefix _ = prefix ^ base_name j | n_ary_name (n, j) _ _ prefix = prefix ^ string_of_int n ^ "_" ^ base_name j -(* int -> string *) fun atom_name j = "A" ^ base_name j fun atom_seq_name (k, 0) = "u" ^ base_name k | atom_seq_name (k, j0) = "u" ^ base_name k ^ "@" ^ base_name j0 @@ -542,14 +517,12 @@ fun rel_reg_name j = "$e" ^ base_name j fun int_reg_name j = "$i" ^ base_name j -(* n_ary_index -> string *) fun tuple_name x = n_ary_name x "A" "P" "T" fun rel_name x = n_ary_name x "s" "r" "m" fun var_name x = n_ary_name x "S" "R" "M" fun tuple_reg_name x = n_ary_name x "$A" "$P" "$T" fun tuple_set_reg_name x = n_ary_name x "$a" "$p" "$t" -(* string -> string *) fun inline_comment "" = "" | inline_comment comment = " /* " ^ translate_string (fn "\n" => " " | "*" => "* " | s => s) comment ^ @@ -557,10 +530,8 @@ fun block_comment "" = "" | block_comment comment = prefix_lines "// " comment ^ "\n" -(* (n_ary_index * string) -> string *) fun commented_rel_name (x, s) = rel_name x ^ inline_comment s -(* tuple -> string *) fun string_for_tuple (Tuple js) = "[" ^ commas (map atom_name js) ^ "]" | string_for_tuple (TupleIndex x) = tuple_name x | string_for_tuple (TupleReg x) = tuple_reg_name x @@ -571,7 +542,6 @@ val prec_TupleProduct = 3 val prec_TupleProject = 4 -(* tuple_set -> int *) fun precedence_ts (TupleUnion _) = prec_TupleUnion | precedence_ts (TupleDifference _) = prec_TupleUnion | precedence_ts (TupleIntersect _) = prec_TupleIntersect @@ -579,10 +549,8 @@ | precedence_ts (TupleProject _) = prec_TupleProject | precedence_ts _ = no_prec -(* tuple_set -> string *) fun string_for_tuple_set tuple_set = let - (* tuple_set -> int -> string *) fun sub tuple_set outer_prec = let val prec = precedence_ts tuple_set @@ -608,19 +576,16 @@ end in sub tuple_set 0 end -(* tuple_assign -> string *) fun string_for_tuple_assign (AssignTuple (x, t)) = tuple_reg_name x ^ " := " ^ string_for_tuple t ^ "\n" | string_for_tuple_assign (AssignTupleSet (x, ts)) = tuple_set_reg_name x ^ " := " ^ string_for_tuple_set ts ^ "\n" -(* bound -> string *) fun string_for_bound (zs, tss) = "bounds " ^ commas (map commented_rel_name zs) ^ ": " ^ (if length tss = 1 then "" else "[") ^ commas (map string_for_tuple_set tss) ^ (if length tss = 1 then "" else "]") ^ "\n" -(* int_bound -> string *) fun int_string_for_bound (opt_n, tss) = (case opt_n of SOME n => signed_string_of_int n ^ ": " @@ -645,7 +610,6 @@ val prec_Join = 18 val prec_BitNot = 19 -(* formula -> int *) fun precedence_f (All _) = prec_All | precedence_f (Exist _) = prec_All | precedence_f (FormulaLet _) = prec_All @@ -671,7 +635,6 @@ | precedence_f False = no_prec | precedence_f True = no_prec | precedence_f (FormulaReg _) = no_prec -(* rel_expr -> int *) and precedence_r (RelLet _) = prec_All | precedence_r (RelIf _) = prec_All | precedence_r (Union _) = prec_Add @@ -697,7 +660,6 @@ | precedence_r (Rel _) = no_prec | precedence_r (Var _) = no_prec | precedence_r (RelReg _) = no_prec -(* int_expr -> int *) and precedence_i (Sum _) = prec_All | precedence_i (IntLet _) = prec_All | precedence_i (IntIf _) = prec_All @@ -721,14 +683,11 @@ | precedence_i (Num _) = no_prec | precedence_i (IntReg _) = no_prec -(* (string -> unit) -> problem list -> unit *) fun write_problem_file out problems = let - (* formula -> unit *) fun out_outmost_f (And (f1, f2)) = (out_outmost_f f1; out "\n && "; out_outmost_f f2) | out_outmost_f f = out_f f prec_And - (* formula -> int -> unit *) and out_f formula outer_prec = let val prec = precedence_f formula @@ -773,7 +732,6 @@ | FormulaReg j => out (formula_reg_name j)); (if need_parens then out ")" else ()) end - (* rel_expr -> int -> unit *) and out_r rel_expr outer_prec = let val prec = precedence_r rel_expr @@ -813,7 +771,6 @@ | RelReg (_, j) => out (rel_reg_name j)); (if need_parens then out ")" else ()) end - (* int_expr -> int -> unit *) and out_i int_expr outer_prec = let val prec = precedence_i int_expr @@ -848,11 +805,9 @@ | IntReg j => out (int_reg_name j)); (if need_parens then out ")" else ()) end - (* decl list -> unit *) and out_decls [] = () | out_decls [d] = out_decl d | out_decls (d :: ds) = (out_decl d; out ", "; out_decls ds) - (* decl -> unit *) and out_decl (DeclNo (x, r)) = (out (var_name x); out " : no "; out_r r 0) | out_decl (DeclLone (x, r)) = @@ -863,22 +818,18 @@ (out (var_name x); out " : some "; out_r r 0) | out_decl (DeclSet (x, r)) = (out (var_name x); out " : set "; out_r r 0) - (* assign_expr list -> unit *) and out_assigns [] = () | out_assigns [b] = out_assign b | out_assigns (b :: bs) = (out_assign b; out ", "; out_assigns bs) - (* assign_expr -> unit *) and out_assign (AssignFormulaReg (j, f)) = (out (formula_reg_name j); out " := "; out_f f 0) | out_assign (AssignRelReg ((_, j), r)) = (out (rel_reg_name j); out " := "; out_r r 0) | out_assign (AssignIntReg (j, i)) = (out (int_reg_name j); out " := "; out_i i 0) - (* int_expr list -> unit *) and out_columns [] = () | out_columns [i] = out_i i 0 | out_columns (i :: is) = (out_i i 0; out ", "; out_columns is) - (* problem -> unit *) and out_problem {comment, settings, univ_card, tuple_assigns, bounds, int_bounds, expr_assigns, formula} = (out ("\n" ^ block_comment comment ^ @@ -896,19 +847,16 @@ out "solve "; out_outmost_f formula; out ";\n") in out ("// This file was generated by Isabelle (most likely Nitpick)\n" ^ - "// " ^ Date.fmt "%Y-%m-%d %H:%M:%S" - (Date.fromTimeLocal (Time.now ())) ^ "\n"); + "// " ^ Sledgehammer_Util.timestamp () ^ "\n"); map out_problem problems end (** Parsing of solution **) -(* string -> bool *) fun is_ident_char s = Symbol.is_ascii_letter s orelse Symbol.is_ascii_digit s orelse s = "_" orelse s = "'" orelse s = "$" -(* string list -> string list *) fun strip_blanks [] = [] | strip_blanks (" " :: ss) = strip_blanks ss | strip_blanks [s1, " "] = [s1] @@ -919,29 +867,20 @@ strip_blanks (s1 :: s2 :: ss) | strip_blanks (s :: ss) = s :: strip_blanks ss -(* (string list -> 'a * string list) -> string list -> 'a list * string list *) fun scan_non_empty_list scan = scan ::: Scan.repeat ($$ "," |-- scan) fun scan_list scan = scan_non_empty_list scan || Scan.succeed [] -(* string list -> int * string list *) val scan_nat = Scan.repeat1 (Scan.one Symbol.is_ascii_digit) >> (the o Int.fromString o space_implode "") -(* string list -> (int * int) * string list *) val scan_rel_name = $$ "s" |-- scan_nat >> pair 1 || $$ "r" |-- scan_nat >> pair 2 || ($$ "m" |-- scan_nat --| $$ "_") -- scan_nat -(* string list -> int * string list *) val scan_atom = $$ "A" |-- scan_nat -(* string list -> int list * string list *) val scan_tuple = $$ "[" |-- scan_list scan_atom --| $$ "]" -(* string list -> int list list * string list *) val scan_tuple_set = $$ "[" |-- scan_list scan_tuple --| $$ "]" -(* string list -> ((int * int) * int list list) * string list *) val scan_assignment = (scan_rel_name --| $$ "=") -- scan_tuple_set -(* string list -> ((int * int) * int list list) list * string list *) val scan_instance = Scan.this_string "relations:" |-- $$ "{" |-- scan_list scan_assignment --| $$ "}" -(* string -> raw_bound list *) val parse_instance = fst o Scan.finite Symbol.stopper (Scan.error (!! (fn _ => @@ -954,12 +893,10 @@ val outcome_marker = "---OUTCOME---\n" val instance_marker = "---INSTANCE---\n" -(* string -> substring -> string *) fun read_section_body marker = Substring.string o fst o Substring.position "\n\n" o Substring.triml (size marker) -(* substring -> raw_bound list *) fun read_next_instance s = let val s = Substring.position instance_marker s |> snd in if Substring.isEmpty s then @@ -968,8 +905,6 @@ read_section_body instance_marker s |> parse_instance end -(* int -> substring * (int * raw_bound list) list * int list - -> substring * (int * raw_bound list) list * int list *) fun read_next_outcomes j (s, ps, js) = let val (s1, s2) = Substring.position outcome_marker s in if Substring.isEmpty s2 orelse @@ -991,8 +926,6 @@ end end -(* substring * (int * raw_bound list) list * int list - -> (int * raw_bound list) list * int list *) fun read_next_problems (s, ps, js) = let val s = Substring.position problem_marker s |> snd in if Substring.isEmpty s then @@ -1008,7 +941,6 @@ handle Option.Option => raise SYNTAX ("Kodkod.read_next_problems", "expected number after \"PROBLEM\"") -(* Path.T -> bool * ((int * raw_bound list) list * int list) *) fun read_output_file path = (false, read_next_problems (Substring.full (File.read path), [], []) |>> rev ||> rev) @@ -1018,7 +950,6 @@ val created_temp_dir = Unsynchronized.ref false -(* bool -> string * string *) fun serial_string_and_temporary_dir_for_overlord overlord = if overlord then ("", getenv "ISABELLE_HOME_USER") @@ -1033,14 +964,12 @@ is partly due to the JVM and partly due to the ML "bash" function. *) val fudge_ms = 250 -(* Time.time option -> int *) fun milliseconds_until_deadline deadline = case deadline of NONE => ~1 | SOME time => Int.max (0, Time.toMilliseconds (Time.- (time, Time.now ())) - fudge_ms) -(* bool -> Time.time option -> int -> int -> problem list -> outcome *) fun uncached_solve_any_problem overlord deadline max_threads max_solutions problems = let @@ -1052,7 +981,6 @@ (0 upto length problems - 1 ~~ problems) val triv_js = filter_out (AList.defined (op =) indexed_problems) (0 upto length problems - 1) - (* int -> int *) val reindex = fst o nth indexed_problems in if null indexed_problems then @@ -1061,18 +989,15 @@ let val (serial_str, temp_dir) = serial_string_and_temporary_dir_for_overlord overlord - (* string -> Path.T *) fun path_for suf = Path.explode (temp_dir ^ "/kodkodi" ^ serial_str ^ "." ^ suf) val in_path = path_for "kki" val in_buf = Unsynchronized.ref Buffer.empty - (* string -> unit *) fun out s = Unsynchronized.change in_buf (Buffer.add s) val out_path = path_for "out" val err_path = path_for "err" val _ = write_problem_file out (map snd indexed_problems) val _ = File.write_buffer in_path (!in_buf) - (* unit -> unit *) fun remove_temporary_files () = if overlord then () else List.app (K () o try File.rm) [in_path, out_path, err_path] @@ -1151,10 +1076,8 @@ Synchronized.var "Kodkod.cached_outcome" (NONE : ((int * problem list) * outcome) option) -(* bool -> Time.time option -> int -> int -> problem list -> outcome *) fun solve_any_problem overlord deadline max_threads max_solutions problems = let - (* unit -> outcome *) fun do_solve () = uncached_solve_any_problem overlord deadline max_threads max_solutions problems in diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/kodkod_sat.ML --- a/src/HOL/Tools/Nitpick/kodkod_sat.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/kodkod_sat.ML Tue May 04 20:30:22 2010 +0200 @@ -51,8 +51,6 @@ ("HaifaSat", ExternalV2 (ToStdout, "HAIFASAT_HOME", "HaifaSat", ["-p", "1"], "s SATISFIABLE", "v ", "s UNSATISFIABLE"))] -(* string -> sink -> string -> string -> string list -> string list - -> (string * (unit -> string list)) option *) fun dynamic_entry_for_external name dev home exec args markers = case getenv home of "" => NONE @@ -74,8 +72,6 @@ if dev = ToFile then out_file else ""] @ markers @ (if dev = ToFile then [out_file] else []) @ args end) -(* bool -> bool -> string * sat_solver_info - -> (string * (unit -> string list)) option *) fun dynamic_entry_for_info incremental (name, Internal (Java, mode, ss)) = if incremental andalso mode = Batch then NONE else SOME (name, K ss) | dynamic_entry_for_info incremental (name, Internal (JNI, mode, ss)) = @@ -98,20 +94,15 @@ (name, ExternalV2 (dev, home, exec, args, m1, m2, m3)) = dynamic_entry_for_external name dev home exec args [m1, m2, m3] | dynamic_entry_for_info true _ = NONE -(* bool -> (string * (unit -> string list)) list *) fun dynamic_list incremental = map_filter (dynamic_entry_for_info incremental) static_list -(* bool -> string list *) val configured_sat_solvers = map fst o dynamic_list -(* bool -> string *) val smart_sat_solver_name = fst o hd o dynamic_list -(* string -> string * string list *) fun sat_solver_spec name = let val dyn_list = dynamic_list false - (* (string * 'a) list -> string *) fun enum_solvers solvers = commas (distinct (op =) (map (quote o fst) solvers)) in diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/minipick.ML --- a/src/HOL/Tools/Nitpick/minipick.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/minipick.ML Tue May 04 20:30:22 2010 +0200 @@ -35,7 +35,6 @@ datatype rep = SRep | RRep -(* Proof.context -> typ -> unit *) fun check_type ctxt (Type (@{type_name fun}, Ts)) = List.app (check_type ctxt) Ts | check_type ctxt (Type (@{type_name "*"}, Ts)) = @@ -46,7 +45,6 @@ | check_type ctxt T = raise NOT_SUPPORTED ("type " ^ quote (Syntax.string_of_typ ctxt T)) -(* rep -> (typ -> int) -> typ -> int list *) fun atom_schema_of SRep card (Type (@{type_name fun}, [T1, T2])) = replicate_list (card T1) (atom_schema_of SRep card T2) | atom_schema_of RRep card (Type (@{type_name fun}, [T1, @{typ bool}])) = @@ -56,42 +54,32 @@ | atom_schema_of _ card (Type (@{type_name "*"}, Ts)) = maps (atom_schema_of SRep card) Ts | atom_schema_of _ card T = [card T] -(* rep -> (typ -> int) -> typ -> int *) val arity_of = length ooo atom_schema_of -(* (typ -> int) -> typ list -> int -> int *) fun index_for_bound_var _ [_] 0 = 0 | index_for_bound_var card (_ :: Ts) 0 = index_for_bound_var card Ts 0 + arity_of SRep card (hd Ts) | index_for_bound_var card Ts n = index_for_bound_var card (tl Ts) (n - 1) -(* (typ -> int) -> rep -> typ list -> int -> rel_expr list *) fun vars_for_bound_var card R Ts j = map (curry Var 1) (index_seq (index_for_bound_var card Ts j) (arity_of R card (nth Ts j))) -(* (typ -> int) -> rep -> typ list -> int -> rel_expr *) val rel_expr_for_bound_var = foldl1 Product oooo vars_for_bound_var -(* rep -> (typ -> int) -> typ list -> typ -> decl list *) fun decls_for R card Ts T = map2 (curry DeclOne o pair 1) (index_seq (index_for_bound_var card (T :: Ts) 0) (arity_of R card (nth (T :: Ts) 0))) (map (AtomSeq o rpair 0) (atom_schema_of R card T)) -(* int list -> rel_expr *) val atom_product = foldl1 Product o map Atom val false_atom = Atom 0 val true_atom = Atom 1 -(* rel_expr -> formula *) fun formula_from_atom r = RelEq (r, true_atom) -(* formula -> rel_expr *) fun atom_from_formula f = RelIf (f, true_atom, false_atom) -(* Proof.context -> (typ -> int) -> styp list -> term -> formula *) fun kodkod_formula_from_term ctxt card frees = let - (* typ -> rel_expr -> rel_expr *) fun R_rep_from_S_rep (T as Type (@{type_name fun}, [T1, @{typ bool}])) r = let val jss = atom_schema_of SRep card T1 |> map (rpair 0) @@ -117,13 +105,11 @@ |> foldl1 Union end | R_rep_from_S_rep _ r = r - (* typ list -> typ -> rel_expr -> rel_expr *) fun S_rep_from_R_rep Ts (T as Type (@{type_name fun}, _)) r = Comprehension (decls_for SRep card Ts T, RelEq (R_rep_from_S_rep T (rel_expr_for_bound_var card SRep (T :: Ts) 0), r)) | S_rep_from_R_rep _ _ r = r - (* typ list -> term -> formula *) fun to_F Ts t = (case t of @{const Not} $ t1 => Not (to_F Ts t1) @@ -154,28 +140,26 @@ | Const (s, _) => raise NOT_SUPPORTED ("constant " ^ quote s) | _ => raise TERM ("Minipick.kodkod_formula_from_term.to_F", [t])) handle SAME () => formula_from_atom (to_R_rep Ts t) - (* typ list -> term -> rel_expr *) and to_S_rep Ts t = - case t of - Const (@{const_name Pair}, _) $ t1 $ t2 => - Product (to_S_rep Ts t1, to_S_rep Ts t2) - | Const (@{const_name Pair}, _) $ _ => to_S_rep Ts (eta_expand Ts t 1) - | Const (@{const_name Pair}, _) => to_S_rep Ts (eta_expand Ts t 2) - | Const (@{const_name fst}, _) $ t1 => - let val fst_arity = arity_of SRep card (fastype_of1 (Ts, t)) in - Project (to_S_rep Ts t1, num_seq 0 fst_arity) - end - | Const (@{const_name fst}, _) => to_S_rep Ts (eta_expand Ts t 1) - | Const (@{const_name snd}, _) $ t1 => - let - val pair_arity = arity_of SRep card (fastype_of1 (Ts, t1)) - val snd_arity = arity_of SRep card (fastype_of1 (Ts, t)) - val fst_arity = pair_arity - snd_arity - in Project (to_S_rep Ts t1, num_seq fst_arity snd_arity) end - | Const (@{const_name snd}, _) => to_S_rep Ts (eta_expand Ts t 1) - | Bound j => rel_expr_for_bound_var card SRep Ts j - | _ => S_rep_from_R_rep Ts (fastype_of1 (Ts, t)) (to_R_rep Ts t) - (* term -> rel_expr *) + case t of + Const (@{const_name Pair}, _) $ t1 $ t2 => + Product (to_S_rep Ts t1, to_S_rep Ts t2) + | Const (@{const_name Pair}, _) $ _ => to_S_rep Ts (eta_expand Ts t 1) + | Const (@{const_name Pair}, _) => to_S_rep Ts (eta_expand Ts t 2) + | Const (@{const_name fst}, _) $ t1 => + let val fst_arity = arity_of SRep card (fastype_of1 (Ts, t)) in + Project (to_S_rep Ts t1, num_seq 0 fst_arity) + end + | Const (@{const_name fst}, _) => to_S_rep Ts (eta_expand Ts t 1) + | Const (@{const_name snd}, _) $ t1 => + let + val pair_arity = arity_of SRep card (fastype_of1 (Ts, t1)) + val snd_arity = arity_of SRep card (fastype_of1 (Ts, t)) + val fst_arity = pair_arity - snd_arity + in Project (to_S_rep Ts t1, num_seq fst_arity snd_arity) end + | Const (@{const_name snd}, _) => to_S_rep Ts (eta_expand Ts t 1) + | Bound j => rel_expr_for_bound_var card SRep Ts j + | _ => S_rep_from_R_rep Ts (fastype_of1 (Ts, t)) (to_R_rep Ts t) and to_R_rep Ts t = (case t of @{const Not} => to_R_rep Ts (eta_expand Ts t 1) @@ -282,7 +266,6 @@ handle SAME () => R_rep_from_S_rep (fastype_of1 (Ts, t)) (to_S_rep Ts t) in to_F [] end -(* (typ -> int) -> int -> styp -> bound *) fun bound_for_free card i (s, T) = let val js = atom_schema_of RRep card T in ([((length js, i), s)], @@ -290,7 +273,6 @@ |> tuple_set_from_atom_schema]) end -(* (typ -> int) -> typ list -> typ -> rel_expr -> formula *) fun declarative_axiom_for_rel_expr card Ts (Type (@{type_name fun}, [T1, T2])) r = if body_type T2 = bool_T then @@ -300,15 +282,12 @@ declarative_axiom_for_rel_expr card (T1 :: Ts) T2 (List.foldl Join r (vars_for_bound_var card SRep (T1 :: Ts) 0))) | declarative_axiom_for_rel_expr _ _ _ r = One r -(* (typ -> int) -> bool -> int -> styp -> formula *) fun declarative_axiom_for_free card i (_, T) = declarative_axiom_for_rel_expr card [] T (Rel (arity_of RRep card T, i)) -(* Proof.context -> (typ -> int) -> term -> problem *) fun kodkod_problem_from_term ctxt raw_card t = let val thy = ProofContext.theory_of ctxt - (* typ -> int *) fun card (Type (@{type_name fun}, [T1, T2])) = reasonable_power (card T2) (card T1) | card (Type (@{type_name "*"}, [T1, T2])) = card T1 * card T2 @@ -328,7 +307,6 @@ bounds = bounds, int_bounds = [], expr_assigns = [], formula = formula} end -(* theory -> problem list -> string *) fun solve_any_kodkod_problem thy problems = let val {overlord, ...} = Nitpick_Isar.default_params thy [] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick.ML --- a/src/HOL/Tools/Nitpick/nitpick.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick.ML Tue May 04 20:30:22 2010 +0200 @@ -9,51 +9,45 @@ sig type styp = Nitpick_Util.styp type term_postprocessor = Nitpick_Model.term_postprocessor - type params = { - cards_assigns: (typ option * int list) list, - maxes_assigns: (styp option * int list) list, - iters_assigns: (styp option * int list) list, - bitss: int list, - bisim_depths: int list, - boxes: (typ option * bool option) list, - finitizes: (typ option * bool option) list, - monos: (typ option * bool option) list, - stds: (typ option * bool) list, - wfs: (styp option * bool option) list, - sat_solver: string, - blocking: bool, - falsify: bool, - debug: bool, - verbose: bool, - overlord: bool, - user_axioms: bool option, - assms: bool, - merge_type_vars: bool, - binary_ints: bool option, - destroy_constrs: bool, - specialize: bool, - skolemize: bool, - star_linear_preds: bool, - uncurry: bool, - fast_descrs: bool, - peephole_optim: bool, - timeout: Time.time option, - tac_timeout: Time.time option, - sym_break: int, - sharing_depth: int, - flatten_props: bool, - max_threads: int, - show_skolems: bool, - show_datatypes: bool, - show_consts: bool, - evals: term list, - formats: (term option * int list) list, - max_potential: int, - max_genuine: int, - check_potential: bool, - check_genuine: bool, - batch_size: int, - expect: string} + type params = + {cards_assigns: (typ option * int list) list, + maxes_assigns: (styp option * int list) list, + iters_assigns: (styp option * int list) list, + bitss: int list, + bisim_depths: int list, + boxes: (typ option * bool option) list, + finitizes: (typ option * bool option) list, + monos: (typ option * bool option) list, + stds: (typ option * bool) list, + wfs: (styp option * bool option) list, + sat_solver: string, + blocking: bool, + falsify: bool, + debug: bool, + verbose: bool, + overlord: bool, + user_axioms: bool option, + assms: bool, + merge_type_vars: bool, + binary_ints: bool option, + destroy_constrs: bool, + specialize: bool, + star_linear_preds: bool, + fast_descrs: bool, + peephole_optim: bool, + timeout: Time.time option, + tac_timeout: Time.time option, + max_threads: int, + show_datatypes: bool, + show_consts: bool, + evals: term list, + formats: (term option * int list) list, + max_potential: int, + max_genuine: int, + check_potential: bool, + check_genuine: bool, + batch_size: int, + expect: string} val register_frac_type : string -> (string * string) list -> theory -> theory val unregister_frac_type : string -> theory -> theory @@ -85,63 +79,56 @@ structure KK = Kodkod -type params = { - cards_assigns: (typ option * int list) list, - maxes_assigns: (styp option * int list) list, - iters_assigns: (styp option * int list) list, - bitss: int list, - bisim_depths: int list, - boxes: (typ option * bool option) list, - finitizes: (typ option * bool option) list, - monos: (typ option * bool option) list, - stds: (typ option * bool) list, - wfs: (styp option * bool option) list, - sat_solver: string, - blocking: bool, - falsify: bool, - debug: bool, - verbose: bool, - overlord: bool, - user_axioms: bool option, - assms: bool, - merge_type_vars: bool, - binary_ints: bool option, - destroy_constrs: bool, - specialize: bool, - skolemize: bool, - star_linear_preds: bool, - uncurry: bool, - fast_descrs: bool, - peephole_optim: bool, - timeout: Time.time option, - tac_timeout: Time.time option, - sym_break: int, - sharing_depth: int, - flatten_props: bool, - max_threads: int, - show_skolems: bool, - show_datatypes: bool, - show_consts: bool, - evals: term list, - formats: (term option * int list) list, - max_potential: int, - max_genuine: int, - check_potential: bool, - check_genuine: bool, - batch_size: int, - expect: string} +type params = + {cards_assigns: (typ option * int list) list, + maxes_assigns: (styp option * int list) list, + iters_assigns: (styp option * int list) list, + bitss: int list, + bisim_depths: int list, + boxes: (typ option * bool option) list, + finitizes: (typ option * bool option) list, + monos: (typ option * bool option) list, + stds: (typ option * bool) list, + wfs: (styp option * bool option) list, + sat_solver: string, + blocking: bool, + falsify: bool, + debug: bool, + verbose: bool, + overlord: bool, + user_axioms: bool option, + assms: bool, + merge_type_vars: bool, + binary_ints: bool option, + destroy_constrs: bool, + specialize: bool, + star_linear_preds: bool, + fast_descrs: bool, + peephole_optim: bool, + timeout: Time.time option, + tac_timeout: Time.time option, + max_threads: int, + show_datatypes: bool, + show_consts: bool, + evals: term list, + formats: (term option * int list) list, + max_potential: int, + max_genuine: int, + check_potential: bool, + check_genuine: bool, + batch_size: int, + expect: string} -type problem_extension = { - free_names: nut list, - sel_names: nut list, - nonsel_names: nut list, - rel_table: nut NameTable.table, - unsound: bool, - scope: scope} - +type problem_extension = + {free_names: nut list, + sel_names: nut list, + nonsel_names: nut list, + rel_table: nut NameTable.table, + unsound: bool, + scope: scope} + type rich_problem = KK.problem * problem_extension -(* Proof.context -> string -> term list -> Pretty.T list *) fun pretties_for_formulas _ _ [] = [] | pretties_for_formulas ctxt s ts = [Pretty.str (s ^ plural_s_for_list ts ^ ":"), @@ -152,10 +139,8 @@ Pretty.str (if j = 1 then "." else ";")]) (length ts downto 1) ts))] -(* unit -> string *) fun install_java_message () = "Nitpick requires a Java 1.5 virtual machine called \"java\"." -(* unit -> string *) fun install_kodkodi_message () = "Nitpick requires the external Java program Kodkodi. To install it, download \ \the package from Isabelle's web page and add the \"kodkodi-x.y.z\" \ @@ -167,35 +152,27 @@ val max_unsound_delay_ms = 200 val max_unsound_delay_percent = 2 -(* Time.time option -> int *) fun unsound_delay_for_timeout NONE = max_unsound_delay_ms | unsound_delay_for_timeout (SOME timeout) = Int.max (0, Int.min (max_unsound_delay_ms, Time.toMilliseconds timeout * max_unsound_delay_percent div 100)) -(* Time.time option -> bool *) fun passed_deadline NONE = false | passed_deadline (SOME time) = Time.compare (Time.now (), time) <> LESS -(* ('a * bool option) list -> bool *) fun none_true assigns = forall (not_equal (SOME true) o snd) assigns val syntactic_sorts = @{sort "{default,zero,one,plus,minus,uminus,times,inverse,abs,sgn,ord,eq}"} @ @{sort number} -(* typ -> bool *) fun has_tfree_syntactic_sort (TFree (_, S as _ :: _)) = subset (op =) (S, syntactic_sorts) | has_tfree_syntactic_sort _ = false -(* term -> bool *) val has_syntactic_sorts = exists_type (exists_subtype has_tfree_syntactic_sort) -(* (unit -> string) -> Pretty.T *) fun plazy f = Pretty.blk (0, pstrs (f ())) -(* Time.time -> Proof.state -> params -> bool -> int -> int -> int - -> (term * term) list -> term list -> term -> string * Proof.state *) fun pick_them_nits_in_term deadline state (params : params) auto i n step subst orig_assm_ts orig_t = let @@ -211,14 +188,11 @@ val {cards_assigns, maxes_assigns, iters_assigns, bitss, bisim_depths, boxes, finitizes, monos, stds, wfs, sat_solver, falsify, debug, verbose, overlord, user_axioms, assms, merge_type_vars, binary_ints, - destroy_constrs, specialize, skolemize, star_linear_preds, uncurry, - fast_descrs, peephole_optim, tac_timeout, sym_break, sharing_depth, - flatten_props, max_threads, show_skolems, show_datatypes, show_consts, + destroy_constrs, specialize, star_linear_preds, fast_descrs, + peephole_optim, tac_timeout, max_threads, show_datatypes, show_consts, evals, formats, max_potential, max_genuine, check_potential, - check_genuine, batch_size, ...} = - params + check_genuine, batch_size, ...} = params val state_ref = Unsynchronized.ref state - (* Pretty.T -> unit *) val pprint = if auto then Unsynchronized.change state_ref o Proof.goal_message o K @@ -227,22 +201,17 @@ else (fn s => (priority s; if debug then tracing s else ())) o Pretty.string_of - (* (unit -> Pretty.T) -> unit *) fun pprint_m f = () |> not auto ? pprint o f fun pprint_v f = () |> verbose ? pprint o f fun pprint_d f = () |> debug ? pprint o f - (* string -> unit *) val print = pprint o curry Pretty.blk 0 o pstrs val print_g = pprint o Pretty.str - (* (unit -> string) -> unit *) val print_m = pprint_m o K o plazy val print_v = pprint_v o K o plazy - (* unit -> unit *) fun check_deadline () = if debug andalso passed_deadline deadline then raise TimeLimit.TimeOut else () - (* unit -> 'a *) fun do_interrupted () = if passed_deadline deadline then raise TimeLimit.TimeOut else raise Interrupt @@ -288,8 +257,7 @@ {thy = thy, ctxt = ctxt, max_bisim_depth = max_bisim_depth, boxes = boxes, stds = stds, wfs = wfs, user_axioms = user_axioms, debug = debug, binary_ints = binary_ints, destroy_constrs = destroy_constrs, - specialize = specialize, skolemize = skolemize, - star_linear_preds = star_linear_preds, uncurry = uncurry, + specialize = specialize, star_linear_preds = star_linear_preds, fast_descrs = fast_descrs, tac_timeout = tac_timeout, evals = evals, case_names = case_names, def_table = def_table, nondef_table = nondef_table, user_nondefs = user_nondefs, @@ -307,7 +275,6 @@ val got_all_user_axioms = got_all_mono_user_axioms andalso no_poly_user_axioms - (* styp * (bool * bool) -> unit *) fun print_wf (x, (gfp, wf)) = pprint (Pretty.blk (0, pstrs ("The " ^ (if gfp then "co" else "") ^ "inductive predicate \"") @@ -344,18 +311,16 @@ *) val unique_scope = forall (curry (op =) 1 o length o snd) cards_assigns - (* typ list -> string -> string *) fun monotonicity_message Ts extra = let val ss = map (quote o string_for_type ctxt) Ts in "The type" ^ plural_s_for_list ss ^ " " ^ - space_implode " " (Sledgehammer_Util.serial_commas "and" ss) ^ " " ^ + space_implode " " (serial_commas "and" ss) ^ " " ^ (if none_true monos then "passed the monotonicity test" else (if length ss = 1 then "is" else "are") ^ " considered monotonic") ^ ". " ^ extra end - (* typ -> bool *) fun is_type_fundamentally_monotonic T = (is_datatype thy stds T andalso not (is_quot_type thy T) andalso (not (is_pure_typedef thy T) orelse is_univ_typedef thy T)) orelse @@ -416,7 +381,6 @@ () (* This detection code is an ugly hack. Fortunately, it is used only to provide a hint to the user. *) - (* string * (Rule_Cases.T * bool) -> bool *) fun is_struct_induct_step (name, (Rule_Cases.Case {fixes, assumes, ...}, _)) = not (null fixes) andalso exists (String.isSuffix ".hyps" o fst) assumes andalso @@ -439,10 +403,10 @@ val _ = List.app (print_g o string_for_type ctxt) nonmono_Ts *) - val need_incremental = Int.max (max_potential, max_genuine) >= 2 - val effective_sat_solver = + val incremental = Int.max (max_potential, max_genuine) >= 2 + val actual_sat_solver = if sat_solver <> "smart" then - if need_incremental andalso + if incremental andalso not (member (op =) (Kodkod_SAT.configured_sat_solvers true) sat_solver) then (print_m (K ("An incremental SAT solver is required: \"SAT4J\" will \ @@ -451,21 +415,19 @@ else sat_solver else - Kodkod_SAT.smart_sat_solver_name need_incremental + Kodkod_SAT.smart_sat_solver_name incremental val _ = if sat_solver = "smart" then - print_v (fn () => "Using SAT solver " ^ quote effective_sat_solver ^ - ". The following" ^ - (if need_incremental then " incremental " else " ") ^ - "solvers are configured: " ^ - commas (map quote (Kodkod_SAT.configured_sat_solvers - need_incremental)) ^ ".") + print_v (fn () => + "Using SAT solver " ^ quote actual_sat_solver ^ ". The following" ^ + (if incremental then " incremental " else " ") ^ + "solvers are configured: " ^ + commas_quote (Kodkod_SAT.configured_sat_solvers incremental) ^ ".") else () val too_big_scopes = Unsynchronized.ref [] - (* bool -> scope -> rich_problem option *) fun problem_for_scope unsound (scope as {card_assigns, bits, bisim_depth, datatypes, ofs, ...}) = let @@ -481,7 +443,6 @@ (Typtab.dest ofs) *) val all_exact = forall (is_exact_type datatypes true) all_Ts - (* nut list -> rep NameTable.table -> nut list * rep NameTable.table *) val repify_consts = choose_reps_for_consts scope all_exact val main_j0 = offset_of_type ofs bool_T val (nat_card, nat_j0) = spec_of_type scope nat_T @@ -495,9 +456,9 @@ val (sel_names, rep_table) = choose_reps_for_all_sels scope rep_table val (nonsel_names, rep_table) = repify_consts nonsel_names rep_table val min_highest_arity = - NameTable.fold (curry Int.max o arity_of_rep o snd) rep_table 1 + NameTable.fold (Integer.max o arity_of_rep o snd) rep_table 1 val min_univ_card = - NameTable.fold (curry Int.max o min_univ_card_of_rep o snd) rep_table + NameTable.fold (Integer.max o min_univ_card_of_rep o snd) rep_table (univ_card nat_card int_card main_j0 [] KK.True) val _ = check_arity min_univ_card min_highest_arity @@ -524,20 +485,20 @@ val comment = (if unsound then "unsound" else "sound") ^ "\n" ^ PrintMode.setmp [] multiline_string_for_scope scope val kodkod_sat_solver = - Kodkod_SAT.sat_solver_spec effective_sat_solver |> snd + Kodkod_SAT.sat_solver_spec actual_sat_solver |> snd val bit_width = if bits = 0 then 16 else bits + 1 - val delay = if unsound then - Option.map (fn time => Time.- (time, Time.now ())) - deadline - |> unsound_delay_for_timeout - else - 0 - val settings = [("solver", commas (map quote kodkod_sat_solver)), + val delay = + if unsound then + Option.map (fn time => Time.- (time, Time.now ())) deadline + |> unsound_delay_for_timeout + else + 0 + val settings = [("solver", commas_quote kodkod_sat_solver), ("skolem_depth", "-1"), ("bit_width", string_of_int bit_width), - ("symmetry_breaking", signed_string_of_int sym_break), - ("sharing", signed_string_of_int sharing_depth), - ("flatten", Bool.toString flatten_props), + ("symmetry_breaking", "20"), + ("sharing", "3"), + ("flatten", "false"), ("delay", signed_string_of_int delay)] val plain_rels = free_rels @ other_rels val plain_bounds = map (bound_for_plain_rel ctxt debug) plain_rels @@ -605,22 +566,18 @@ val checked_problems = Unsynchronized.ref (SOME []) val met_potential = Unsynchronized.ref 0 - (* rich_problem list -> int list -> unit *) fun update_checked_problems problems = List.app (Unsynchronized.change checked_problems o Option.map o cons o nth problems) - (* string -> unit *) fun show_kodkod_warning "" = () | show_kodkod_warning s = print_m (fn () => "Kodkod warning: " ^ s ^ ".") - (* bool -> KK.raw_bound list -> problem_extension -> bool * bool option *) fun print_and_check_model genuine bounds ({free_names, sel_names, nonsel_names, rel_table, scope, ...} : problem_extension) = let val (reconstructed_model, codatatypes_ok) = - reconstruct_hol_model {show_skolems = show_skolems, - show_datatypes = show_datatypes, + reconstruct_hol_model {show_datatypes = show_datatypes, show_consts = show_consts} scope formats frees free_names sel_names nonsel_names rel_table bounds @@ -686,8 +643,7 @@ options in print ("Try again with " ^ - space_implode " " - (Sledgehammer_Util.serial_commas "and" ss) ^ + space_implode " " (serial_commas "and" ss) ^ " to confirm that the " ^ das_wort_model ^ " is genuine.") end @@ -721,18 +677,15 @@ NONE) |> pair genuine_means_genuine end - (* bool * int * int * int -> bool -> rich_problem list - -> bool * int * int * int *) fun solve_any_problem (found_really_genuine, max_potential, max_genuine, donno) first_time problems = let val max_potential = Int.max (0, max_potential) val max_genuine = Int.max (0, max_genuine) - (* bool -> int * KK.raw_bound list -> bool * bool option *) fun print_and_check genuine (j, bounds) = print_and_check_model genuine bounds (snd (nth problems j)) val max_solutions = max_potential + max_genuine - |> not need_incremental ? curry Int.min 1 + |> not incremental ? Integer.min 1 in if max_solutions <= 0 then (found_really_genuine, 0, 0, donno) @@ -828,8 +781,6 @@ (found_really_genuine, max_potential, max_genuine, donno + 1)) end - (* int -> int -> scope list -> bool * int * int * int - -> bool * int * int * int *) fun run_batch j n scopes (found_really_genuine, max_potential, max_genuine, donno) = let @@ -857,8 +808,6 @@ (length scopes downto 1) scopes))]) else () - (* scope * bool -> rich_problem list * bool - -> rich_problem list * bool *) fun add_problem_for_scope (scope, unsound) (problems, donno) = (check_deadline (); case problem_for_scope unsound scope of @@ -904,13 +853,10 @@ donno) true (rev problems) end - (* rich_problem list -> scope -> int *) fun scope_count (problems : rich_problem list) scope = length (filter (curry scopes_equivalent scope o #scope o snd) problems) - (* string -> string *) fun excipit did_so_and_so = let - (* rich_problem list -> rich_problem list *) val do_filter = if !met_potential = max_potential then filter_out (#unsound o snd) else I @@ -932,7 +878,6 @@ "") ^ "." end - (* int -> int -> scope list -> bool * int * int * int -> KK.outcome *) fun run_batches _ _ [] (found_really_genuine, max_potential, max_genuine, donno) = if donno > 0 andalso max_genuine > 0 then @@ -961,8 +906,8 @@ end val (skipped, the_scopes) = - all_scopes hol_ctxt binarize sym_break cards_assigns maxes_assigns - iters_assigns bitss bisim_depths mono_Ts nonmono_Ts deep_dataTs + all_scopes hol_ctxt binarize cards_assigns maxes_assigns iters_assigns + bitss bisim_depths mono_Ts nonmono_Ts deep_dataTs finitizable_dataTs val _ = if skipped > 0 then print_m (fn () => "Too many scopes. Skipping " ^ @@ -998,8 +943,6 @@ else error "Nitpick was interrupted." -(* Proof.state -> params -> bool -> int -> int -> int -> (term * term) list - -> term list -> term -> string * Proof.state *) fun pick_nits_in_term state (params as {debug, timeout, expect, ...}) auto i n step subst orig_assm_ts orig_t = if getenv "KODKODI" = "" then @@ -1018,12 +961,10 @@ else error ("Unexpected outcome: " ^ quote outcome_code ^ ".") end -(* string list -> term -> bool *) fun is_fixed_equation fixes (Const (@{const_name "=="}, _) $ Free (s, _) $ Const _) = member (op =) fixes s | is_fixed_equation _ _ = false -(* Proof.context -> term list * term -> (term * term) list * term list * term *) fun extract_fixed_frees ctxt (assms, t) = let val fixes = Variable.fixes_of ctxt |> map snd @@ -1032,7 +973,6 @@ |>> map Logic.dest_equals in (subst, other_assms, subst_atomic subst t) end -(* Proof.state -> params -> bool -> int -> int -> string * Proof.state *) fun pick_nits_in_subgoal state params auto i step = let val ctxt = Proof.context_of state @@ -1042,8 +982,7 @@ 0 => (priority "No subgoal!"; ("none", state)) | n => let - val (t, frees) = Logic.goal_params t i - val t = subst_bounds (frees, t) + val t = Logic.goal_params t i |> fst val assms = map term_of (Assumption.all_assms_of ctxt) val (subst, assms, t) = extract_fixed_frees ctxt (assms, t) in pick_nits_in_term state params auto i n step subst assms t end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_hol.ML --- a/src/HOL/Tools/Nitpick/nitpick_hol.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_hol.ML Tue May 04 20:30:22 2010 +0200 @@ -13,39 +13,37 @@ type unrolled = styp * styp type wf_cache = (styp * (bool * bool)) list - type hol_context = { - thy: theory, - ctxt: Proof.context, - max_bisim_depth: int, - boxes: (typ option * bool option) list, - stds: (typ option * bool) list, - wfs: (styp option * bool option) list, - user_axioms: bool option, - debug: bool, - binary_ints: bool option, - destroy_constrs: bool, - specialize: bool, - skolemize: bool, - star_linear_preds: bool, - uncurry: bool, - fast_descrs: bool, - tac_timeout: Time.time option, - evals: term list, - case_names: (string * int) list, - def_table: const_table, - nondef_table: const_table, - user_nondefs: term list, - simp_table: const_table Unsynchronized.ref, - psimp_table: const_table, - choice_spec_table: const_table, - intro_table: const_table, - ground_thm_table: term list Inttab.table, - ersatz_table: (string * string) list, - skolems: (string * string list) list Unsynchronized.ref, - special_funs: special_fun list Unsynchronized.ref, - unrolled_preds: unrolled list Unsynchronized.ref, - wf_cache: wf_cache Unsynchronized.ref, - constr_cache: (typ * styp list) list Unsynchronized.ref} + type hol_context = + {thy: theory, + ctxt: Proof.context, + max_bisim_depth: int, + boxes: (typ option * bool option) list, + stds: (typ option * bool) list, + wfs: (styp option * bool option) list, + user_axioms: bool option, + debug: bool, + binary_ints: bool option, + destroy_constrs: bool, + specialize: bool, + star_linear_preds: bool, + fast_descrs: bool, + tac_timeout: Time.time option, + evals: term list, + case_names: (string * int) list, + def_table: const_table, + nondef_table: const_table, + user_nondefs: term list, + simp_table: const_table Unsynchronized.ref, + psimp_table: const_table, + choice_spec_table: const_table, + intro_table: const_table, + ground_thm_table: term list Inttab.table, + ersatz_table: (string * string) list, + skolems: (string * string list) list Unsynchronized.ref, + special_funs: special_fun list Unsynchronized.ref, + unrolled_preds: unrolled list Unsynchronized.ref, + wf_cache: wf_cache Unsynchronized.ref, + constr_cache: (typ * styp list) list Unsynchronized.ref} datatype fixpoint_kind = Lfp | Gfp | NoFp datatype boxability = @@ -217,7 +215,6 @@ structure Nitpick_HOL : NITPICK_HOL = struct -open Sledgehammer_Util open Nitpick_Util type const_table = term list Symtab.table @@ -225,39 +222,37 @@ type unrolled = styp * styp type wf_cache = (styp * (bool * bool)) list -type hol_context = { - thy: theory, - ctxt: Proof.context, - max_bisim_depth: int, - boxes: (typ option * bool option) list, - stds: (typ option * bool) list, - wfs: (styp option * bool option) list, - user_axioms: bool option, - debug: bool, - binary_ints: bool option, - destroy_constrs: bool, - specialize: bool, - skolemize: bool, - star_linear_preds: bool, - uncurry: bool, - fast_descrs: bool, - tac_timeout: Time.time option, - evals: term list, - case_names: (string * int) list, - def_table: const_table, - nondef_table: const_table, - user_nondefs: term list, - simp_table: const_table Unsynchronized.ref, - psimp_table: const_table, - choice_spec_table: const_table, - intro_table: const_table, - ground_thm_table: term list Inttab.table, - ersatz_table: (string * string) list, - skolems: (string * string list) list Unsynchronized.ref, - special_funs: special_fun list Unsynchronized.ref, - unrolled_preds: unrolled list Unsynchronized.ref, - wf_cache: wf_cache Unsynchronized.ref, - constr_cache: (typ * styp list) list Unsynchronized.ref} +type hol_context = + {thy: theory, + ctxt: Proof.context, + max_bisim_depth: int, + boxes: (typ option * bool option) list, + stds: (typ option * bool) list, + wfs: (styp option * bool option) list, + user_axioms: bool option, + debug: bool, + binary_ints: bool option, + destroy_constrs: bool, + specialize: bool, + star_linear_preds: bool, + fast_descrs: bool, + tac_timeout: Time.time option, + evals: term list, + case_names: (string * int) list, + def_table: const_table, + nondef_table: const_table, + user_nondefs: term list, + simp_table: const_table Unsynchronized.ref, + psimp_table: const_table, + choice_spec_table: const_table, + intro_table: const_table, + ground_thm_table: term list Inttab.table, + ersatz_table: (string * string) list, + skolems: (string * string list) list Unsynchronized.ref, + special_funs: special_fun list Unsynchronized.ref, + unrolled_preds: unrolled list Unsynchronized.ref, + wf_cache: wf_cache Unsynchronized.ref, + constr_cache: (typ * styp list) list Unsynchronized.ref} datatype fixpoint_kind = Lfp | Gfp | NoFp datatype boxability = @@ -269,7 +264,7 @@ val empty = {frac_types = [], codatatypes = []} val extend = I fun merge ({frac_types = fs1, codatatypes = cs1}, - {frac_types = fs2, codatatypes = cs2}) : T = + {frac_types = fs2, codatatypes = cs2}) : T = {frac_types = AList.merge (op =) (K true) (fs1, fs2), codatatypes = AList.merge (op =) (K true) (cs1, cs2)}) @@ -294,31 +289,24 @@ (** Constant/type information and term/type manipulation **) -(* int -> string *) fun sel_prefix_for j = sel_prefix ^ string_of_int j ^ name_sep -(* Proof.context -> typ -> string *) fun quot_normal_name_for_type ctxt T = quot_normal_prefix ^ unyxml (Syntax.string_of_typ ctxt T) -(* string -> string * string *) val strip_first_name_sep = Substring.full #> Substring.position name_sep ##> Substring.triml 1 #> pairself Substring.string -(* string -> string *) fun original_name s = if String.isPrefix nitpick_prefix s then case strip_first_name_sep s of (s1, "") => s1 | (_, s2) => original_name s2 else s -(* term * term -> term *) fun s_betapply (Const (@{const_name If}, _) $ @{const True} $ t, _) = t | s_betapply (Const (@{const_name If}, _) $ @{const False} $ _, t) = t | s_betapply p = betapply p -(* term * term list -> term *) val s_betapplys = Library.foldl s_betapply -(* term * term -> term *) fun s_conj (t1, @{const True}) = t1 | s_conj (@{const True}, t2) = t2 | s_conj (t1, t2) = @@ -330,18 +318,15 @@ if t1 = @{const True} orelse t2 = @{const True} then @{const True} else HOLogic.mk_disj (t1, t2) -(* term -> term -> term list *) fun strip_connective conn_t (t as (t0 $ t1 $ t2)) = if t0 = conn_t then strip_connective t0 t2 @ strip_connective t0 t1 else [t] | strip_connective _ t = [t] -(* term -> term list * term *) fun strip_any_connective (t as (t0 $ _ $ _)) = if t0 = @{const "op &"} orelse t0 = @{const "op |"} then (strip_connective t0 t, t0) else ([t], @{const Not}) | strip_any_connective t = ([t], @{const Not}) -(* term -> term list *) val conjuncts_of = strip_connective @{const "op &"} val disjuncts_of = strip_connective @{const "op |"} @@ -416,7 +401,6 @@ (@{const_name minus_class.minus}, 2), (@{const_name ord_class.less_eq}, 2)] -(* typ -> typ *) fun unarize_type @{typ "unsigned_bit word"} = nat_T | unarize_type @{typ "signed_bit word"} = int_T | unarize_type (Type (s, Ts as _ :: _)) = Type (s, map unarize_type Ts) @@ -437,44 +421,33 @@ | uniterize_type T = T val uniterize_unarize_unbox_etc_type = uniterize_type o unarize_unbox_etc_type -(* Proof.context -> typ -> string *) fun string_for_type ctxt = Syntax.string_of_typ ctxt o unarize_unbox_etc_type -(* string -> string -> string *) val prefix_name = Long_Name.qualify o Long_Name.base_name -(* string -> string *) fun shortest_name s = List.last (space_explode "." s) handle List.Empty => "" -(* string -> term -> term *) val prefix_abs_vars = Term.map_abs_vars o prefix_name -(* string -> string *) fun short_name s = case space_explode name_sep s of [_] => s |> String.isPrefix nitpick_prefix s ? unprefix nitpick_prefix | ss => map shortest_name ss |> space_implode "_" -(* typ -> typ *) fun shorten_names_in_type (Type (s, Ts)) = Type (short_name s, map shorten_names_in_type Ts) | shorten_names_in_type T = T -(* term -> term *) val shorten_names_in_term = map_aterms (fn Const (s, T) => Const (short_name s, T) | t => t) #> map_types shorten_names_in_type -(* theory -> typ * typ -> bool *) fun strict_type_match thy (T1, T2) = (Sign.typ_match thy (T2, T1) Vartab.empty; true) handle Type.TYPE_MATCH => false fun type_match thy = strict_type_match thy o pairself unarize_unbox_etc_type -(* theory -> styp * styp -> bool *) fun const_match thy ((s1, T1), (s2, T2)) = s1 = s2 andalso type_match thy (T1, T2) -(* theory -> term * term -> bool *) fun term_match thy (Const x1, Const x2) = const_match thy (x1, x2) | term_match thy (Free (s1, T1), Free (s2, T2)) = const_match thy ((shortest_name s1, T1), (shortest_name s2, T2)) | term_match _ (t1, t2) = t1 aconv t2 -(* typ -> term -> term -> term *) fun frac_from_term_pair T t1 t2 = case snd (HOLogic.dest_number t1) of 0 => HOLogic.mk_number T 0 @@ -483,7 +456,6 @@ | n2 => Const (@{const_name divide}, T --> T --> T) $ HOLogic.mk_number T n1 $ HOLogic.mk_number T n2 -(* typ -> bool *) fun is_TFree (TFree _) = true | is_TFree _ = false fun is_higher_order_type (Type (@{type_name fun}, _)) = true @@ -509,50 +481,41 @@ | is_word_type _ = false val is_integer_like_type = is_iterator_type orf is_integer_type orf is_word_type val is_record_type = not o null o Record.dest_recTs -(* theory -> typ -> bool *) fun is_frac_type thy (Type (s, [])) = not (null (these (AList.lookup (op =) (#frac_types (Data.get thy)) s))) | is_frac_type _ _ = false fun is_number_type thy = is_integer_like_type orf is_frac_type thy -(* bool -> styp -> typ *) fun iterator_type_for_const gfp (s, T) = Type ((if gfp then gfp_iterator_prefix else lfp_iterator_prefix) ^ s, binder_types T) -(* typ -> styp *) fun const_for_iterator_type (Type (s, Ts)) = (strip_first_name_sep s |> snd, Ts ---> bool_T) | const_for_iterator_type T = raise TYPE ("Nitpick_HOL.const_for_iterator_type", [T], []) -(* int -> typ -> typ list * typ *) fun strip_n_binders 0 T = ([], T) | strip_n_binders n (Type (@{type_name fun}, [T1, T2])) = strip_n_binders (n - 1) T2 |>> cons T1 | strip_n_binders n (Type (@{type_name fun_box}, Ts)) = strip_n_binders n (Type (@{type_name fun}, Ts)) | strip_n_binders _ T = raise TYPE ("Nitpick_HOL.strip_n_binders", [T], []) -(* typ -> typ *) val nth_range_type = snd oo strip_n_binders -(* typ -> int *) fun num_factors_in_type (Type (@{type_name "*"}, [T1, T2])) = fold (Integer.add o num_factors_in_type) [T1, T2] 0 | num_factors_in_type _ = 1 fun num_binder_types (Type (@{type_name fun}, [_, T2])) = 1 + num_binder_types T2 | num_binder_types _ = 0 -(* typ -> typ list *) val curried_binder_types = maps HOLogic.flatten_tupleT o binder_types fun maybe_curried_binder_types T = (if is_pair_type (body_type T) then binder_types else curried_binder_types) T -(* typ -> term list -> term *) fun mk_flat_tuple _ [t] = t | mk_flat_tuple (Type (@{type_name "*"}, [T1, T2])) (t :: ts) = HOLogic.pair_const T1 T2 $ t $ (mk_flat_tuple T2 ts) | mk_flat_tuple T ts = raise TYPE ("Nitpick_HOL.mk_flat_tuple", [T], ts) -(* int -> term -> term list *) fun dest_n_tuple 1 t = [t] | dest_n_tuple n t = HOLogic.dest_prod t ||> dest_n_tuple (n - 1) |> op :: @@ -561,7 +524,6 @@ set_def: thm option, prop_of_Rep: thm, set_name: string, Abs_inverse: thm option, Rep_inverse: thm option} -(* theory -> string -> typedef_info *) fun typedef_info thy s = if is_frac_type thy (Type (s, [])) then SOME {abs_type = Type (s, []), rep_type = @{typ "int * int"}, @@ -579,21 +541,17 @@ Rep_inverse = SOME Rep_inverse} | _ => NONE -(* theory -> string -> bool *) val is_typedef = is_some oo typedef_info val is_real_datatype = is_some oo Datatype.get_info -(* theory -> (typ option * bool) list -> typ -> bool *) fun is_standard_datatype thy = the oo triple_lookup (type_match thy) (* FIXME: Use antiquotation for "code_numeral" below or detect "rep_datatype", e.g., by adding a field to "Datatype_Aux.info". *) -(* theory -> (typ option * bool) list -> string -> bool *) fun is_basic_datatype thy stds s = member (op =) [@{type_name "*"}, @{type_name bool}, @{type_name unit}, @{type_name int}, "Code_Numeral.code_numeral"] s orelse (s = @{type_name nat} andalso is_standard_datatype thy stds nat_T) -(* theory -> typ -> typ -> typ -> typ *) fun instantiate_type thy T1 T1' T2 = Same.commit (Envir.subst_type_same (Sign.typ_match thy (T1, T1') Vartab.empty)) T2 @@ -602,20 +560,16 @@ fun varify_and_instantiate_type thy T1 T1' T2 = instantiate_type thy (Logic.varifyT_global T1) T1' (Logic.varifyT_global T2) -(* theory -> typ -> typ -> styp *) fun repair_constr_type thy body_T' T = varify_and_instantiate_type thy (body_type T) body_T' T -(* string -> (string * string) list -> theory -> theory *) fun register_frac_type frac_s ersaetze thy = let val {frac_types, codatatypes} = Data.get thy val frac_types = AList.update (op =) (frac_s, ersaetze) frac_types in Data.put {frac_types = frac_types, codatatypes = codatatypes} thy end -(* string -> theory -> theory *) fun unregister_frac_type frac_s = register_frac_type frac_s [] -(* typ -> string -> styp list -> theory -> theory *) fun register_codatatype co_T case_name constr_xs thy = let val {frac_types, codatatypes} = Data.get thy @@ -631,10 +585,8 @@ val codatatypes = AList.update (op =) (co_s, (case_name, constr_xs)) codatatypes in Data.put {frac_types = frac_types, codatatypes = codatatypes} thy end -(* typ -> theory -> theory *) fun unregister_codatatype co_T = register_codatatype co_T "" [] -(* theory -> typ -> bool *) fun is_quot_type thy (Type (s, _)) = is_some (Quotient_Info.quotdata_lookup_raw thy s) | is_quot_type _ _ = false @@ -671,32 +623,26 @@ end | NONE => false) | is_univ_typedef _ _ = false -(* theory -> (typ option * bool) list -> typ -> bool *) fun is_datatype thy stds (T as Type (s, _)) = (is_typedef thy s orelse is_codatatype thy T orelse T = @{typ ind} orelse is_quot_type thy T) andalso not (is_basic_datatype thy stds s) | is_datatype _ _ _ = false -(* theory -> typ -> (string * typ) list * (string * typ) *) fun all_record_fields thy T = let val (recs, more) = Record.get_extT_fields thy T in recs @ more :: all_record_fields thy (snd more) end handle TYPE _ => [] -(* styp -> bool *) fun is_record_constr (s, T) = String.isSuffix Record.extN s andalso let val dataT = body_type T in is_record_type dataT andalso s = unsuffix Record.ext_typeN (fst (dest_Type dataT)) ^ Record.extN end -(* theory -> typ -> int *) val num_record_fields = Integer.add 1 o length o fst oo Record.get_extT_fields -(* theory -> string -> typ -> int *) fun no_of_record_field thy s T1 = find_index (curry (op =) s o fst) (Record.get_extT_fields thy T1 ||> single |> op @) -(* theory -> styp -> bool *) fun is_record_get thy (s, Type (@{type_name fun}, [T1, _])) = exists (curry (op =) s o fst) (all_record_fields thy T1) | is_record_get _ _ = false @@ -715,7 +661,6 @@ SOME {Rep_name, ...} => s = Rep_name | NONE => false) | is_rep_fun _ _ = false -(* Proof.context -> styp -> bool *) fun is_quot_abs_fun ctxt (x as (_, Type (@{type_name fun}, [_, Type (s', _)]))) = (try (Quotient_Term.absrep_const_chk Quotient_Term.AbsF ctxt) s' @@ -727,19 +672,16 @@ = SOME (Const x)) | is_quot_rep_fun _ _ = false -(* theory -> styp -> styp *) fun mate_of_rep_fun thy (x as (_, Type (@{type_name fun}, [T1 as Type (s', _), T2]))) = (case typedef_info thy s' of SOME {Abs_name, ...} => (Abs_name, Type (@{type_name fun}, [T2, T1])) | NONE => raise TERM ("Nitpick_HOL.mate_of_rep_fun", [Const x])) | mate_of_rep_fun _ x = raise TERM ("Nitpick_HOL.mate_of_rep_fun", [Const x]) -(* theory -> typ -> typ *) fun rep_type_for_quot_type thy (T as Type (s, _)) = let val {qtyp, rtyp, ...} = Quotient_Info.quotdata_lookup thy s in instantiate_type thy qtyp T rtyp end -(* theory -> typ -> term *) fun equiv_relation_for_quot_type thy (Type (s, Ts)) = let val {qtyp, equiv_rel, ...} = Quotient_Info.quotdata_lookup thy s @@ -748,7 +690,6 @@ | equiv_relation_for_quot_type _ T = raise TYPE ("Nitpick_HOL.equiv_relation_for_quot_type", [T], []) -(* theory -> styp -> bool *) fun is_coconstr thy (s, T) = let val {codatatypes, ...} = Data.get thy @@ -771,19 +712,16 @@ fun is_stale_constr thy (x as (_, T)) = is_codatatype thy (body_type T) andalso is_constr_like thy x andalso not (is_coconstr thy x) -(* theory -> (typ option * bool) list -> styp -> bool *) fun is_constr thy stds (x as (_, T)) = is_constr_like thy x andalso not (is_basic_datatype thy stds (fst (dest_Type (unarize_type (body_type T))))) andalso not (is_stale_constr thy x) -(* string -> bool *) val is_sel = String.isPrefix discr_prefix orf String.isPrefix sel_prefix val is_sel_like_and_no_discr = String.isPrefix sel_prefix orf (member (op =) [@{const_name fst}, @{const_name snd}]) -(* boxability -> boxability *) fun in_fun_lhs_for InConstr = InSel | in_fun_lhs_for _ = InFunLHS fun in_fun_rhs_for InConstr = InConstr @@ -791,7 +729,6 @@ | in_fun_rhs_for InFunRHS1 = InFunRHS2 | in_fun_rhs_for _ = InFunRHS1 -(* hol_context -> boxability -> typ -> bool *) fun is_boxing_worth_it (hol_ctxt : hol_context) boxy T = case T of Type (@{type_name fun}, _) => @@ -803,12 +740,10 @@ exists (is_boxing_worth_it hol_ctxt InPair) (map (box_type hol_ctxt InPair) Ts)) | _ => false -(* hol_context -> boxability -> string * typ list -> string *) and should_box_type (hol_ctxt as {thy, boxes, ...}) boxy z = case triple_lookup (type_match thy) boxes (Type z) of SOME (SOME box_me) => box_me | _ => is_boxing_worth_it hol_ctxt boxy (Type z) -(* hol_context -> boxability -> typ -> typ *) and box_type hol_ctxt boxy T = case T of Type (z as (@{type_name fun}, [T1, T2])) => @@ -830,37 +765,29 @@ else InPair)) Ts) | _ => T -(* typ -> typ *) fun binarize_nat_and_int_in_type @{typ nat} = @{typ "unsigned_bit word"} | binarize_nat_and_int_in_type @{typ int} = @{typ "signed_bit word"} | binarize_nat_and_int_in_type (Type (s, Ts)) = Type (s, map binarize_nat_and_int_in_type Ts) | binarize_nat_and_int_in_type T = T -(* term -> term *) val binarize_nat_and_int_in_term = map_types binarize_nat_and_int_in_type -(* styp -> styp *) fun discr_for_constr (s, T) = (discr_prefix ^ s, body_type T --> bool_T) -(* typ -> int *) fun num_sels_for_constr_type T = length (maybe_curried_binder_types T) -(* string -> int -> string *) fun nth_sel_name_for_constr_name s n = if s = @{const_name Pair} then if n = 0 then @{const_name fst} else @{const_name snd} else sel_prefix_for n ^ s -(* styp -> int -> styp *) fun nth_sel_for_constr x ~1 = discr_for_constr x | nth_sel_for_constr (s, T) n = (nth_sel_name_for_constr_name s n, body_type T --> nth (maybe_curried_binder_types T) n) -(* hol_context -> bool -> styp -> int -> styp *) fun binarized_and_boxed_nth_sel_for_constr hol_ctxt binarize = apsnd ((binarize ? binarize_nat_and_int_in_type) o box_type hol_ctxt InSel) oo nth_sel_for_constr -(* string -> int *) fun sel_no_from_name s = if String.isPrefix discr_prefix s then ~1 @@ -871,15 +798,12 @@ else 0 -(* term -> term *) val close_form = let - (* (indexname * typ) list -> (indexname * typ) list -> term -> term *) fun close_up zs zs' = fold (fn (z as ((s, _), T)) => fn t' => Term.all T $ Abs (s, T, abstract_over (Var z, t'))) (take (length zs' - length zs) zs') - (* (indexname * typ) list -> term -> term *) fun aux zs (@{const "==>"} $ t1 $ t2) = let val zs' = Term.add_vars t1 zs in close_up zs zs' (Logic.mk_implies (t1, aux zs' t2)) @@ -887,7 +811,6 @@ | aux zs t = close_up zs (Term.add_vars t zs) t in aux [] end -(* typ list -> term -> int -> term *) fun eta_expand _ t 0 = t | eta_expand Ts (Abs (s, T, t')) n = Abs (s, T, eta_expand (T :: Ts) t' (n - 1)) @@ -896,7 +819,6 @@ (List.take (binder_types (fastype_of1 (Ts, t)), n)) (list_comb (incr_boundvars n t, map Bound (n - 1 downto 0))) -(* term -> term *) fun extensionalize t = case t of (t0 as @{const Trueprop}) $ t1 => t0 $ extensionalize t1 @@ -906,17 +828,14 @@ end | _ => t -(* typ -> term list -> term *) fun distinctness_formula T = all_distinct_unordered_pairs_of #> map (fn (t1, t2) => @{const Not} $ (HOLogic.eq_const T $ t1 $ t2)) #> List.foldr (s_conj o swap) @{const True} -(* typ -> term *) fun zero_const T = Const (@{const_name zero_class.zero}, T) fun suc_const T = Const (@{const_name Suc}, T --> T) -(* hol_context -> typ -> styp list *) fun uncached_datatype_constrs ({thy, stds, ...} : hol_context) (T as Type (s, Ts)) = (case AList.lookup (op =) (#codatatypes (Data.get thy)) s of @@ -953,7 +872,6 @@ else []) | uncached_datatype_constrs _ _ = [] -(* hol_context -> typ -> styp list *) fun datatype_constrs (hol_ctxt as {constr_cache, ...}) T = case AList.lookup (op =) (!constr_cache) T of SOME xs => xs @@ -961,18 +879,14 @@ let val xs = uncached_datatype_constrs hol_ctxt T in (Unsynchronized.change constr_cache (cons (T, xs)); xs) end -(* hol_context -> bool -> typ -> styp list *) fun binarized_and_boxed_datatype_constrs hol_ctxt binarize = map (apsnd ((binarize ? binarize_nat_and_int_in_type) o box_type hol_ctxt InConstr)) o datatype_constrs hol_ctxt -(* hol_context -> typ -> int *) val num_datatype_constrs = length oo datatype_constrs -(* string -> string *) fun constr_name_for_sel_like @{const_name fst} = @{const_name Pair} | constr_name_for_sel_like @{const_name snd} = @{const_name Pair} | constr_name_for_sel_like s' = original_name s' -(* hol_context -> bool -> styp -> styp *) fun binarized_and_boxed_constr_for_sel hol_ctxt binarize (s', T') = let val s = constr_name_for_sel_like s' in AList.lookup (op =) @@ -981,7 +895,6 @@ |> the |> pair s end -(* hol_context -> styp -> term *) fun discr_term_for_constr hol_ctxt (x as (s, T)) = let val dataT = body_type T in if s = @{const_name Suc} then @@ -992,7 +905,6 @@ else Abs (Name.uu, dataT, @{const True}) end -(* hol_context -> styp -> term -> term *) fun discriminate_value (hol_ctxt as {thy, ...}) x t = case head_of t of Const x' => @@ -1001,7 +913,6 @@ else betapply (discr_term_for_constr hol_ctxt x, t) | _ => betapply (discr_term_for_constr hol_ctxt x, t) -(* theory -> (typ option * bool) list -> styp -> term -> term *) fun nth_arg_sel_term_for_constr thy stds (x as (s, T)) n = let val (arg_Ts, dataT) = strip_type T in if dataT = nat_T andalso is_standard_datatype thy stds nat_T then @@ -1010,7 +921,6 @@ Const (nth_sel_for_constr x n) else let - (* int -> typ -> int * term *) fun aux m (Type (@{type_name "*"}, [T1, T2])) = let val (m, t1) = aux m T1 @@ -1023,7 +933,6 @@ (List.take (arg_Ts, n)) 0 in Abs ("x", dataT, aux m (nth arg_Ts n) |> snd) end end -(* theory -> (typ option * bool) list -> styp -> term -> int -> typ -> term *) fun select_nth_constr_arg thy stds x t n res_T = (case strip_comb t of (Const x', args) => @@ -1033,7 +942,6 @@ | _ => raise SAME()) handle SAME () => betapply (nth_arg_sel_term_for_constr thy stds x n, t) -(* theory -> (typ option * bool) list -> styp -> term list -> term *) fun construct_value _ _ x [] = Const x | construct_value thy stds (x as (s, _)) args = let val args = map Envir.eta_contract args in @@ -1050,7 +958,6 @@ | _ => list_comb (Const x, args) end -(* hol_context -> typ -> term -> term *) fun constr_expand (hol_ctxt as {thy, stds, ...}) T t = (case head_of t of Const x => if is_constr_like thy x then t else raise SAME () @@ -1070,17 +977,14 @@ (index_seq 0 (length arg_Ts)) arg_Ts) end -(* (term -> term) -> int -> term -> term *) fun coerce_bound_no f j t = case t of t1 $ t2 => coerce_bound_no f j t1 $ coerce_bound_no f j t2 | Abs (s, T, t') => Abs (s, T, coerce_bound_no f (j + 1) t') | Bound j' => if j' = j then f t else t | _ => t -(* hol_context -> typ -> typ -> term -> term *) fun coerce_bound_0_in_term hol_ctxt new_T old_T = old_T <> new_T ? coerce_bound_no (coerce_term hol_ctxt [new_T] old_T new_T) 0 -(* hol_context -> typ list -> typ -> typ -> term -> term *) and coerce_term (hol_ctxt as {thy, stds, fast_descrs, ...}) Ts new_T old_T t = if old_T = new_T then t @@ -1125,7 +1029,6 @@ raise TYPE ("Nitpick_HOL.coerce_term", [new_T, old_T], [t]) | _ => raise TYPE ("Nitpick_HOL.coerce_term", [new_T, old_T], [t]) -(* (typ * int) list -> typ -> int *) fun card_of_type assigns (Type (@{type_name fun}, [T1, T2])) = reasonable_power (card_of_type assigns T2) (card_of_type assigns T1) | card_of_type assigns (Type (@{type_name "*"}, [T1, T2])) = @@ -1139,7 +1042,6 @@ SOME k => k | NONE => if T = @{typ bisim_iterator} then 0 else raise TYPE ("Nitpick_HOL.card_of_type", [T], []) -(* int -> (typ * int) list -> typ -> int *) fun bounded_card_of_type max default_card assigns (Type (@{type_name fun}, [T1, T2])) = let @@ -1162,11 +1064,9 @@ card_of_type assigns T handle TYPE ("Nitpick_HOL.card_of_type", _, _) => default_card) -(* hol_context -> typ list -> int -> (typ * int) list -> typ -> int *) fun bounded_exact_card_of_type hol_ctxt finitizable_dataTs max default_card assigns T = let - (* typ list -> typ -> int *) fun aux avoid T = (if member (op =) avoid T then 0 @@ -1215,47 +1115,36 @@ val small_type_max_card = 5 -(* hol_context -> typ -> bool *) fun is_finite_type hol_ctxt T = bounded_exact_card_of_type hol_ctxt [] 1 2 [] T > 0 -(* hol_context -> typ -> bool *) fun is_small_finite_type hol_ctxt T = let val n = bounded_exact_card_of_type hol_ctxt [] 1 2 [] T in n > 0 andalso n <= small_type_max_card end -(* term -> bool *) fun is_ground_term (t1 $ t2) = is_ground_term t1 andalso is_ground_term t2 | is_ground_term (Const _) = true | is_ground_term _ = false -(* term -> word -> word *) fun hashw_term (t1 $ t2) = hashw (hashw_term t1, hashw_term t2) | hashw_term (Const (s, _)) = hashw_string (s, 0w0) | hashw_term _ = 0w0 -(* term -> int *) val hash_term = Word.toInt o hashw_term -(* term list -> (indexname * typ) list *) fun special_bounds ts = fold Term.add_vars ts [] |> sort (Term_Ord.fast_indexname_ord o pairself fst) -(* indexname * typ -> term -> term *) fun abs_var ((s, j), T) body = Abs (s, T, abstract_over (Var ((s, j), T), body)) -(* theory -> string -> bool *) fun is_funky_typedef_name thy s = member (op =) [@{type_name unit}, @{type_name "*"}, @{type_name "+"}, @{type_name int}] s orelse is_frac_type thy (Type (s, [])) -(* theory -> typ -> bool *) fun is_funky_typedef thy (Type (s, _)) = is_funky_typedef_name thy s | is_funky_typedef _ _ = false -(* term -> bool *) fun is_arity_type_axiom (Const (@{const_name HOL.type_class}, _) $ Const (@{const_name TYPE}, _)) = true | is_arity_type_axiom _ = false -(* theory -> bool -> term -> bool *) fun is_typedef_axiom thy boring (@{const "==>"} $ _ $ t2) = is_typedef_axiom thy boring t2 | is_typedef_axiom thy boring @@ -1264,7 +1153,6 @@ $ Const _ $ _)) = boring <> is_funky_typedef_name thy s andalso is_typedef thy s | is_typedef_axiom _ _ _ = false -(* term -> bool *) val is_class_axiom = Logic.strip_horn #> swap #> op :: #> forall (can Logic.dest_of_class) @@ -1272,7 +1160,6 @@ typedef axioms, and (3) other axioms, and returns the pair ((1), (3)). Typedef axioms are uninteresting to Nitpick, because it can retrieve them using "typedef_info". *) -(* theory -> (string * term) list -> string list -> term list * term list *) fun partition_axioms_by_definitionality thy axioms def_names = let val axioms = sort (fast_string_ord o pairself fst) axioms @@ -1285,15 +1172,12 @@ (* Ideally we would check against "Complex_Main", not "Refute", but any theory will do as long as it contains all the "axioms" and "axiomatization" commands. *) -(* theory -> bool *) fun is_built_in_theory thy = Theory.subthy (thy, @{theory Refute}) -(* term -> bool *) val is_trivial_definition = the_default false o try (op aconv o Logic.dest_equals) val is_plain_definition = let - (* term -> bool *) fun do_lhs t1 = case strip_comb t1 of (Const _, args) => @@ -1305,10 +1189,8 @@ | do_eq _ = false in do_eq end -(* theory -> (term * term) list -> term list * term list * term list *) fun all_axioms_of thy subst = let - (* theory list -> term list *) val axioms_of_thys = maps Thm.axioms_of #> map (apsnd (subst_atomic subst o prop_of)) @@ -1337,7 +1219,6 @@ user_defs @ built_in_defs in (defs, built_in_nondefs, user_nondefs) end -(* theory -> (typ option * bool) list -> bool -> styp -> int option *) fun arity_of_built_in_const thy stds fast_descrs (s, T) = if s = @{const_name If} then if nth_range_type 3 T = @{typ bool} then NONE else SOME 3 @@ -1365,12 +1246,10 @@ else NONE end -(* theory -> (typ option * bool) list -> bool -> styp -> bool *) val is_built_in_const = is_some oooo arity_of_built_in_const (* This function is designed to work for both real definition axioms and simplification rules (equational specifications). *) -(* term -> term *) fun term_under_def t = case t of @{const "==>"} $ _ $ t2 => term_under_def t2 @@ -1381,23 +1260,19 @@ | t1 $ _ => term_under_def t1 | _ => t -(* Here we crucially rely on "Refute.specialize_type" performing a preorder - traversal of the term, without which the wrong occurrence of a constant could - be matched in the face of overloading. *) -(* theory -> (typ option * bool) list -> bool -> const_table -> styp - -> term list *) +(* Here we crucially rely on "specialize_type" performing a preorder traversal + of the term, without which the wrong occurrence of a constant could be + matched in the face of overloading. *) fun def_props_for_const thy stds fast_descrs table (x as (s, _)) = if is_built_in_const thy stds fast_descrs x then [] else these (Symtab.lookup table s) - |> map_filter (try (Refute.specialize_type thy x)) + |> map_filter (try (specialize_type thy x)) |> filter (curry (op =) (Const x) o term_under_def) -(* term -> term option *) fun normalized_rhs_of t = let - (* term option -> term option *) fun aux (v as Var _) (SOME t) = SOME (lambda v t) | aux (c as Const (@{const_name TYPE}, _)) (SOME t) = SOME (lambda c t) | aux _ _ = NONE @@ -1410,7 +1285,6 @@ val args = strip_comb lhs |> snd in fold_rev aux args (SOME rhs) end -(* theory -> const_table -> styp -> term option *) fun def_of_const thy table (x as (s, _)) = if is_built_in_const thy [(NONE, false)] false x orelse original_name s <> s then @@ -1420,16 +1294,13 @@ |> normalized_rhs_of |> Option.map (prefix_abs_vars s) handle List.Empty => NONE -(* term -> fixpoint_kind *) fun fixpoint_kind_of_rhs (Abs (_, _, t)) = fixpoint_kind_of_rhs t | fixpoint_kind_of_rhs (Const (@{const_name lfp}, _) $ Abs _) = Lfp | fixpoint_kind_of_rhs (Const (@{const_name gfp}, _) $ Abs _) = Gfp | fixpoint_kind_of_rhs _ = NoFp -(* theory -> const_table -> term -> bool *) fun is_mutually_inductive_pred_def thy table t = let - (* term -> bool *) fun is_good_arg (Bound _) = true | is_good_arg (Const (s, _)) = s = @{const_name True} orelse s = @{const_name False} orelse @@ -1443,7 +1314,6 @@ | NONE => false) | _ => false end -(* theory -> const_table -> term -> term *) fun unfold_mutually_inductive_preds thy table = map_aterms (fn t as Const x => (case def_of_const thy table x of @@ -1455,7 +1325,6 @@ | NONE => t) | t => t) -(* theory -> (typ option * bool) list -> (string * int) list *) fun case_const_names thy stds = Symtab.fold (fn (dtype_s, {index, descr, case_name, ...}) => if is_basic_datatype thy stds dtype_s then @@ -1466,7 +1335,6 @@ (Datatype.get_all thy) [] @ map (apsnd length o snd) (#codatatypes (Data.get thy)) -(* theory -> const_table -> string * typ -> fixpoint_kind *) fun fixpoint_kind_of_const thy table x = if is_built_in_const thy [(NONE, false)] false x then NoFp @@ -1474,7 +1342,6 @@ fixpoint_kind_of_rhs (the (def_of_const thy table x)) handle Option.Option => NoFp -(* hol_context -> styp -> bool *) fun is_real_inductive_pred ({thy, stds, fast_descrs, def_table, intro_table, ...} : hol_context) x = fixpoint_kind_of_const thy def_table x <> NoFp andalso @@ -1490,7 +1357,6 @@ (is_real_equational_fun hol_ctxt orf is_real_inductive_pred hol_ctxt orf (String.isPrefix ubfp_prefix orf String.isPrefix lbfp_prefix) o fst) -(* term -> term *) fun lhs_of_equation t = case t of Const (@{const_name all}, _) $ Abs (_, _, t1) => lhs_of_equation t1 @@ -1501,7 +1367,6 @@ | Const (@{const_name "op ="}, _) $ t1 $ _ => SOME t1 | @{const "op -->"} $ _ $ t2 => lhs_of_equation t2 | _ => NONE -(* theory -> term -> bool *) fun is_constr_pattern _ (Bound _) = true | is_constr_pattern _ (Var _) = true | is_constr_pattern thy t = @@ -1516,21 +1381,19 @@ SOME t' => is_constr_pattern_lhs thy t' | NONE => false -(* Similar to "Refute.specialize_type" but returns all matches rather than only - the first (preorder) match. *) -(* theory -> styp -> term -> term list *) +(* Similar to "specialize_type" but returns all matches rather than only the + first (preorder) match. *) fun multi_specialize_type thy slack (s, T) t = let - (* term -> (typ * term) list -> (typ * term) list *) fun aux (Const (s', T')) ys = if s = s' then ys |> (if AList.defined (op =) ys T' then I else - cons (T', Refute.monomorphic_term - (Sign.typ_match thy (T', T) Vartab.empty) t) + cons (T', monomorphic_term (Sign.typ_match thy (T', T) + Vartab.empty) t) handle Type.TYPE_MATCH => I - | Refute.REFUTE _ => + | TERM _ => if slack then I else @@ -1540,22 +1403,18 @@ ys | aux _ ys = ys in map snd (fold_aterms aux t []) end -(* theory -> bool -> const_table -> styp -> term list *) fun nondef_props_for_const thy slack table (x as (s, _)) = these (Symtab.lookup table s) |> maps (multi_specialize_type thy slack x) -(* term -> term *) fun unvarify_term (t1 $ t2) = unvarify_term t1 $ unvarify_term t2 | unvarify_term (Var ((s, 0), T)) = Free (s, T) | unvarify_term (Abs (s, T, t')) = Abs (s, T, unvarify_term t') | unvarify_term t = t -(* theory -> term -> term *) fun axiom_for_choice_spec thy = unvarify_term #> Object_Logic.atomize_term thy #> Choice_Specification.close_form #> HOLogic.mk_Trueprop -(* hol_context -> styp -> bool *) fun is_choice_spec_fun ({thy, def_table, nondef_table, choice_spec_table, ...} : hol_context) x = case nondef_props_for_const thy true choice_spec_table x of @@ -1571,7 +1430,6 @@ ts') ts end -(* theory -> const_table -> term -> bool *) fun is_choice_spec_axiom thy choice_spec_table t = Symtab.exists (fn (_, ts) => exists (curry (op aconv) t o axiom_for_choice_spec thy) ts) @@ -1579,18 +1437,15 @@ (** Constant unfolding **) -(* theory -> (typ option * bool) list -> int * styp -> term *) fun constr_case_body thy stds (j, (x as (_, T))) = let val arg_Ts = binder_types T in list_comb (Bound j, map2 (select_nth_constr_arg thy stds x (Bound 0)) (index_seq 0 (length arg_Ts)) arg_Ts) end -(* hol_context -> typ -> int * styp -> term -> term *) fun add_constr_case (hol_ctxt as {thy, stds, ...}) res_T (j, x) res_t = Const (@{const_name If}, bool_T --> res_T --> res_T --> res_T) $ discriminate_value hol_ctxt x (Bound 0) $ constr_case_body thy stds (j, x) $ res_t -(* hol_context -> typ -> typ -> term *) fun optimized_case_def (hol_ctxt as {thy, stds, ...}) dataT res_T = let val xs = datatype_constrs hol_ctxt dataT @@ -1601,7 +1456,6 @@ |> fold_rev (add_constr_case hol_ctxt res_T) (length xs downto 2 ~~ xs') |> fold_rev (curry absdummy) (func_Ts @ [dataT]) end -(* hol_context -> string -> typ -> typ -> term -> term *) fun optimized_record_get (hol_ctxt as {thy, stds, ...}) s rec_T res_T t = let val constr_x = hd (datatype_constrs hol_ctxt rec_T) in case no_of_record_field thy s rec_T of @@ -1618,7 +1472,6 @@ [])) | j => select_nth_constr_arg thy stds constr_x t j res_T end -(* hol_context -> string -> typ -> term -> term -> term *) fun optimized_record_update (hol_ctxt as {thy, stds, ...}) s rec_T fun_t rec_t = let val constr_x as (_, constr_T) = hd (datatype_constrs hol_ctxt rec_T) @@ -1641,12 +1494,10 @@ (* Prevents divergence in case of cyclic or infinite definition dependencies. *) val unfold_max_depth = 255 -(* hol_context -> term -> term *) fun unfold_defs_in_term (hol_ctxt as {thy, ctxt, stds, fast_descrs, case_names, def_table, ground_thm_table, ersatz_table, ...}) = let - (* int -> typ list -> term -> term *) fun do_term depth Ts t = case t of (t0 as Const (@{const_name Int.number_class.number_of}, @@ -1696,13 +1547,11 @@ | Var _ => t | Bound _ => t | Abs (s, T, body) => Abs (s, T, do_term depth (T :: Ts) body) - (* int -> typ list -> styp -> term list -> int -> typ -> term * term list *) and select_nth_constr_arg_with_args _ _ (x as (_, T)) [] n res_T = (Abs (Name.uu, body_type T, select_nth_constr_arg thy stds x (Bound 0) n res_T), []) | select_nth_constr_arg_with_args depth Ts x (t :: ts) n res_T = (select_nth_constr_arg thy stds x (do_term depth Ts t) n res_T, ts) - (* int -> typ list -> term -> styp -> term list -> term *) and do_const depth Ts t (x as (s, T)) ts = case AList.lookup (op =) ersatz_table s of SOME s' => @@ -1783,39 +1632,30 @@ (** Axiom extraction/generation **) -(* term -> string * term *) fun pair_for_prop t = case term_under_def t of Const (s, _) => (s, t) | t' => raise TERM ("Nitpick_HOL.pair_for_prop", [t, t']) -(* (Proof.context -> term list) -> Proof.context -> (term * term) list - -> const_table *) fun def_table_for get ctxt subst = ctxt |> get |> map (pair_for_prop o subst_atomic subst) |> AList.group (op =) |> Symtab.make -(* term -> string * term *) fun paired_with_consts t = map (rpair t) (Term.add_const_names t []) -(* Proof.context -> (term * term) list -> term list -> const_table *) fun const_def_table ctxt subst ts = def_table_for (map prop_of o Nitpick_Defs.get) ctxt subst |> fold (fn (s, t) => Symtab.map_default (s, []) (cons t)) (map pair_for_prop ts) -(* term list -> const_table *) fun const_nondef_table ts = fold (append o paired_with_consts) ts [] |> AList.group (op =) |> Symtab.make -(* Proof.context -> (term * term) list -> const_table *) val const_simp_table = def_table_for (map prop_of o Nitpick_Simps.get) val const_psimp_table = def_table_for (map prop_of o Nitpick_Psimps.get) fun const_choice_spec_table ctxt subst = map (subst_atomic subst o prop_of) (Nitpick_Choice_Specs.get ctxt) |> const_nondef_table -(* Proof.context -> (term * term) list -> const_table -> const_table *) fun inductive_intro_table ctxt subst def_table = def_table_for (map (unfold_mutually_inductive_preds (ProofContext.theory_of ctxt) def_table o prop_of) o Nitpick_Intros.get) ctxt subst -(* theory -> term list Inttab.table *) fun ground_theorem_table thy = fold ((fn @{const Trueprop} $ t1 => is_ground_term t1 ? Inttab.map_default (hash_term t1, []) (cons t1) @@ -1831,24 +1671,20 @@ (@{const_name wf_wfrec}, @{const_name wf_wfrec'}), (@{const_name wfrec}, @{const_name wfrec'})] -(* theory -> (string * string) list *) fun ersatz_table thy = fold (append o snd) (#frac_types (Data.get thy)) basic_ersatz_table -(* const_table Unsynchronized.ref -> string -> term list -> unit *) fun add_simps simp_table s eqs = Unsynchronized.change simp_table (Symtab.update (s, eqs @ these (Symtab.lookup (!simp_table) s))) -(* theory -> styp -> term list *) fun inverse_axioms_for_rep_fun thy (x as (_, T)) = let val abs_T = domain_type T in typedef_info thy (fst (dest_Type abs_T)) |> the |> pairf #Abs_inverse #Rep_inverse - |> pairself (Refute.specialize_type thy x o prop_of o the) + |> pairself (specialize_type thy x o prop_of o the) ||> single |> op :: end -(* theory -> string * typ list -> term list *) fun optimized_typedef_axioms thy (abs_z as (abs_s, _)) = let val abs_T = Type abs_z in if is_univ_typedef thy abs_T then @@ -1861,7 +1697,7 @@ val set_t = Const (set_name, rep_T --> bool_T) val set_t' = prop_of_Rep |> HOLogic.dest_Trueprop - |> Refute.specialize_type thy (dest_Const rep_t) + |> specialize_type thy (dest_Const rep_t) |> HOLogic.dest_mem |> snd in [HOLogic.all_const abs_T @@ -1871,7 +1707,6 @@ end | NONE => [] end -(* Proof.context -> string * typ list -> term list *) fun optimized_quot_type_axioms ctxt stds abs_z = let val thy = ProofContext.theory_of ctxt @@ -1900,7 +1735,6 @@ HOLogic.mk_Trueprop (equiv_rel $ x_var $ normal_x))] end -(* hol_context -> typ -> term list *) fun codatatype_bisim_axioms (hol_ctxt as {thy, stds, ...}) T = let val xs = datatype_constrs hol_ctxt T @@ -1915,13 +1749,11 @@ $ (suc_const iter_T $ Bound 0) $ n_var) val x_var = Var (("x", 0), T) val y_var = Var (("y", 0), T) - (* styp -> int -> typ -> term *) fun nth_sub_bisim x n nth_T = (if is_codatatype thy nth_T then bisim_const $ n_var_minus_1 else HOLogic.eq_const nth_T) $ select_nth_constr_arg thy stds x x_var n nth_T $ select_nth_constr_arg thy stds x y_var n nth_T - (* styp -> term *) fun case_func (x as (_, T)) = let val arg_Ts = binder_types T @@ -1943,22 +1775,18 @@ exception NO_TRIPLE of unit -(* theory -> styp -> term -> term list * term list * term *) fun triple_for_intro_rule thy x t = let val prems = Logic.strip_imp_prems t |> map (Object_Logic.atomize_term thy) val concl = Logic.strip_imp_concl t |> Object_Logic.atomize_term thy val (main, side) = List.partition (exists_Const (curry (op =) x)) prems - (* term -> bool *) - val is_good_head = curry (op =) (Const x) o head_of + val is_good_head = curry (op =) (Const x) o head_of in if forall is_good_head main then (side, main, concl) else raise NO_TRIPLE () end -(* term -> term *) val tuple_for_args = HOLogic.mk_tuple o snd o strip_comb -(* indexname * typ -> term list -> term -> term -> term *) fun wf_constraint_for rel side concl main = let val core = HOLogic.mk_mem (HOLogic.mk_prod (tuple_for_args main, @@ -1972,12 +1800,9 @@ (t, vars) end -(* indexname * typ -> term list * term list * term -> term *) fun wf_constraint_for_triple rel (side, main, concl) = map (wf_constraint_for rel side concl) main |> foldr1 s_conj -(* Proof.context -> Time.time option -> thm - -> (Proof.context -> tactic -> tactic) -> bool *) fun terminates_by ctxt timeout goal tac = can (SINGLE (Classical.safe_tac (claset_of ctxt)) #> the #> SINGLE (DETERM_TIMEOUT timeout @@ -1993,7 +1818,6 @@ val termination_tacs = [Lexicographic_Order.lex_order_tac true, ScnpReconstruct.sizechange_tac] -(* hol_context -> const_table -> styp -> bool *) fun uncached_is_well_founded_inductive_pred ({thy, ctxt, stds, debug, fast_descrs, tac_timeout, intro_table, ...} : hol_context) (x as (_, T)) = @@ -2038,7 +1862,6 @@ (* The type constraint below is a workaround for a Poly/ML crash. *) -(* hol_context -> styp -> bool *) fun is_well_founded_inductive_pred (hol_ctxt as {thy, wfs, def_table, wf_cache, ...} : hol_context) (x as (s, _)) = @@ -2055,7 +1878,6 @@ Unsynchronized.change wf_cache (cons (x, (gfp, wf))); wf end -(* typ list -> typ -> term -> term *) fun ap_curry [_] _ t = t | ap_curry arg_Ts tuple_T t = let val n = length arg_Ts in @@ -2064,7 +1886,6 @@ $ mk_flat_tuple tuple_T (map Bound (n - 1 downto 0))) end -(* int -> term -> int *) fun num_occs_of_bound_in_term j (t1 $ t2) = op + (pairself (num_occs_of_bound_in_term j) (t1, t2)) | num_occs_of_bound_in_term j (Abs (_, _, t')) = @@ -2072,10 +1893,8 @@ | num_occs_of_bound_in_term j (Bound j') = if j' = j then 1 else 0 | num_occs_of_bound_in_term _ _ = 0 -(* term -> bool *) val is_linear_inductive_pred_def = let - (* int -> term -> bool *) fun do_disjunct j (Const (@{const_name Ex}, _) $ Abs (_, _, t2)) = do_disjunct (j + 1) t2 | do_disjunct j t = @@ -2083,7 +1902,6 @@ 0 => true | 1 => exists (curry (op =) (Bound j) o head_of) (conjuncts_of t) | _ => false - (* term -> bool *) fun do_lfp_def (Const (@{const_name lfp}, _) $ t2) = let val (xs, body) = strip_abs t2 in case length xs of @@ -2093,24 +1911,19 @@ | do_lfp_def _ = false in do_lfp_def o strip_abs_body end -(* int -> int list list *) fun n_ptuple_paths 0 = [] | n_ptuple_paths 1 = [] | n_ptuple_paths n = [] :: map (cons 2) (n_ptuple_paths (n - 1)) -(* int -> typ -> typ -> term -> term *) val ap_n_split = HOLogic.mk_psplits o n_ptuple_paths -(* term -> term * term *) val linear_pred_base_and_step_rhss = let - (* term -> term *) fun aux (Const (@{const_name lfp}, _) $ t2) = let val (xs, body) = strip_abs t2 val arg_Ts = map snd (tl xs) val tuple_T = HOLogic.mk_tupleT arg_Ts val j = length arg_Ts - (* int -> term -> term *) fun repair_rec j (Const (@{const_name Ex}, T1) $ Abs (s2, T2, t2')) = Const (@{const_name Ex}, T1) $ Abs (s2, T2, repair_rec (j + 1) t2') @@ -2140,7 +1953,6 @@ raise TERM ("Nitpick_HOL.linear_pred_base_and_step_rhss.aux", [t]) in aux end -(* hol_context -> styp -> term -> term *) fun starred_linear_pred_const (hol_ctxt as {simp_table, ...}) (s, T) def = let val j = maxidx_of_term def + 1 @@ -2174,7 +1986,6 @@ |> unfold_defs_in_term hol_ctxt end -(* hol_context -> bool -> styp -> term *) fun unrolled_inductive_pred_const (hol_ctxt as {thy, star_linear_preds, def_table, simp_table, ...}) gfp (x as (s, T)) = @@ -2211,7 +2022,6 @@ in unrolled_const end end -(* hol_context -> styp -> term *) fun raw_inductive_pred_axiom ({thy, def_table, ...} : hol_context) x = let val def = the (def_of_const thy def_table x) @@ -2238,7 +2048,6 @@ else raw_inductive_pred_axiom hol_ctxt x -(* hol_context -> styp -> term list *) fun raw_equational_fun_axioms (hol_ctxt as {thy, stds, fast_descrs, simp_table, psimp_table, ...}) x = case def_props_for_const thy stds fast_descrs (!simp_table) x of @@ -2247,7 +2056,6 @@ | psimps => psimps) | simps => simps val equational_fun_axioms = map extensionalize oo raw_equational_fun_axioms -(* hol_context -> styp -> bool *) fun is_equational_fun_surely_complete hol_ctxt x = case raw_equational_fun_axioms hol_ctxt x of [@{const Trueprop} $ (Const (@{const_name "op ="}, _) $ t1 $ _)] => @@ -2256,10 +2064,8 @@ (** Type preprocessing **) -(* term list -> term list *) fun merge_type_vars_in_terms ts = let - (* typ -> (sort * string) list -> (sort * string) list *) fun add_type (TFree (s, S)) table = (case AList.lookup (op =) table S of SOME s' => @@ -2268,12 +2074,10 @@ | NONE => (S, s) :: table) | add_type _ table = table val table = fold (fold_types (fold_atyps add_type)) ts [] - (* typ -> typ *) fun coalesce (TFree (_, S)) = TFree (AList.lookup (op =) table S |> the, S) | coalesce T = T in map (map_types (map_atyps coalesce)) ts end -(* hol_context -> bool -> typ -> typ list -> typ list *) fun add_ground_types hol_ctxt binarize = let fun aux T accum = @@ -2294,10 +2098,8 @@ | _ => insert (op =) T accum in aux end -(* hol_context -> bool -> typ -> typ list *) fun ground_types_in_type hol_ctxt binarize T = add_ground_types hol_ctxt binarize T [] -(* hol_context -> term list -> typ list *) fun ground_types_in_terms hol_ctxt binarize ts = fold (fold_types (add_ground_types hol_ctxt binarize)) ts [] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_isar.ML --- a/src/HOL/Tools/Nitpick/nitpick_isar.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_isar.ML Tue May 04 20:30:22 2010 +0200 @@ -55,22 +55,16 @@ ("binary_ints", "smart"), ("destroy_constrs", "true"), ("specialize", "true"), - ("skolemize", "true"), ("star_linear_preds", "true"), - ("uncurry", "true"), ("fast_descrs", "true"), ("peephole_optim", "true"), ("timeout", "30 s"), ("tac_timeout", "500 ms"), - ("sym_break", "20"), - ("sharing_depth", "3"), - ("flatten_props", "false"), ("max_threads", "0"), ("debug", "false"), ("verbose", "false"), ("overlord", "false"), ("show_all", "false"), - ("show_skolems", "true"), ("show_datatypes", "false"), ("show_consts", "false"), ("format", "1"), @@ -93,23 +87,18 @@ ("unary_ints", "binary_ints"), ("dont_destroy_constrs", "destroy_constrs"), ("dont_specialize", "specialize"), - ("dont_skolemize", "skolemize"), ("dont_star_linear_preds", "star_linear_preds"), - ("dont_uncurry", "uncurry"), ("full_descrs", "fast_descrs"), ("no_peephole_optim", "peephole_optim"), - ("dont_flatten_props", "flatten_props"), ("no_debug", "debug"), ("quiet", "verbose"), ("no_overlord", "overlord"), ("dont_show_all", "show_all"), - ("hide_skolems", "show_skolems"), ("hide_datatypes", "show_datatypes"), ("hide_consts", "show_consts"), ("trust_potential", "check_potential"), ("trust_genuine", "check_genuine")] -(* string -> bool *) fun is_known_raw_param s = AList.defined (op =) default_default_params s orelse AList.defined (op =) negated_params s orelse @@ -118,19 +107,16 @@ ["card", "max", "iter", "box", "dont_box", "finitize", "dont_finitize", "mono", "non_mono", "std", "non_std", "wf", "non_wf", "format"] -(* string * 'a -> unit *) fun check_raw_param (s, _) = if is_known_raw_param s then () else error ("Unknown parameter: " ^ quote s ^ ".") -(* string -> string option *) fun unnegate_param_name name = case AList.lookup (op =) negated_params name of NONE => if String.isPrefix "dont_" name then SOME (unprefix "dont_" name) else if String.isPrefix "non_" name then SOME (unprefix "non_" name) else NONE | some_name => some_name -(* raw_param -> raw_param *) fun unnegate_raw_param (name, value) = case unnegate_param_name name of SOME name' => (name', case value of @@ -142,47 +128,36 @@ structure Data = Theory_Data( type T = raw_param list - val empty = default_default_params |> map (apsnd single) + val empty = map (apsnd single) default_default_params val extend = I - fun merge p : T = AList.merge (op =) (K true) p) + fun merge (x, y) = AList.merge (op =) (K true) (x, y)) -(* raw_param -> theory -> theory *) val set_default_raw_param = Data.map o AList.update (op =) o unnegate_raw_param -(* theory -> raw_param list *) val default_raw_params = Data.get -(* string -> bool *) fun is_punctuation s = (s = "," orelse s = "-" orelse s = "\") -(* string list -> string *) fun stringify_raw_param_value [] = "" | stringify_raw_param_value [s] = s | stringify_raw_param_value (s1 :: s2 :: ss) = s1 ^ (if is_punctuation s1 orelse is_punctuation s2 then "" else " ") ^ stringify_raw_param_value (s2 :: ss) -(* int -> string -> int *) fun maxed_int_from_string min_int s = Int.max (min_int, the (Int.fromString s)) -(* Proof.context -> bool -> raw_param list -> raw_param list -> params *) fun extract_params ctxt auto default_params override_params = let val override_params = map unnegate_raw_param override_params val raw_params = rev override_params @ rev default_params - (* string -> string *) val lookup = Option.map stringify_raw_param_value o AList.lookup (op =) raw_params val lookup_string = the_default "" o lookup - (* bool -> bool option -> string -> bool option *) fun general_lookup_bool option default_value name = case lookup name of - SOME s => Sledgehammer_Util.parse_bool_option option name s + SOME s => parse_bool_option option name s | NONE => default_value - (* string -> bool *) val lookup_bool = the o general_lookup_bool false (SOME false) - (* string -> bool option *) val lookup_bool_option = general_lookup_bool true NONE - (* string -> string option -> int *) fun do_int name value = case value of SOME s => (case Int.fromString s of @@ -190,14 +165,11 @@ | NONE => error ("Parameter " ^ quote name ^ " must be assigned an integer value.")) | NONE => 0 - (* string -> int *) fun lookup_int name = do_int name (lookup name) - (* string -> int option *) fun lookup_int_option name = case lookup name of SOME "smart" => NONE | value => SOME (do_int name value) - (* string -> int -> string -> int list *) fun int_range_from_string name min_int s = let val (k1, k2) = @@ -211,17 +183,14 @@ handle Option.Option => error ("Parameter " ^ quote name ^ " must be assigned a sequence of integers.") - (* string -> int -> string -> int list *) fun int_seq_from_string name min_int s = maps (int_range_from_string name min_int) (space_explode "," s) - (* string -> int -> int list *) fun lookup_int_seq name min_int = case lookup name of SOME s => (case int_seq_from_string name min_int s of [] => [min_int] | value => value) | NONE => [min_int] - (* (string -> 'a) -> int -> string -> ('a option * int list) list *) fun lookup_ints_assigns read prefix min_int = (NONE, lookup_int_seq prefix min_int) :: map (fn (name, value) => @@ -229,38 +198,31 @@ value |> stringify_raw_param_value |> int_seq_from_string name min_int)) (filter (String.isPrefix (prefix ^ " ") o fst) raw_params) - (* (string -> 'a) -> string -> ('a option * bool) list *) fun lookup_bool_assigns read prefix = (NONE, lookup_bool prefix) :: map (fn (name, value) => (SOME (read (String.extract (name, size prefix + 1, NONE))), value |> stringify_raw_param_value - |> Sledgehammer_Util.parse_bool_option false name - |> the)) + |> parse_bool_option false name |> the)) (filter (String.isPrefix (prefix ^ " ") o fst) raw_params) - (* (string -> 'a) -> string -> ('a option * bool option) list *) fun lookup_bool_option_assigns read prefix = (NONE, lookup_bool_option prefix) :: map (fn (name, value) => (SOME (read (String.extract (name, size prefix + 1, NONE))), value |> stringify_raw_param_value - |> Sledgehammer_Util.parse_bool_option true name)) + |> parse_bool_option true name)) (filter (String.isPrefix (prefix ^ " ") o fst) raw_params) - (* string -> Time.time option *) fun lookup_time name = case lookup name of NONE => NONE - | SOME s => Sledgehammer_Util.parse_time_option name s - (* string -> term list *) + | SOME s => parse_time_option name s val lookup_term_list = AList.lookup (op =) raw_params #> these #> Syntax.read_terms ctxt val read_type_polymorphic = Syntax.read_typ ctxt #> Logic.mk_type #> singleton (Variable.polymorphic ctxt) #> Logic.dest_type - (* string -> term *) val read_term_polymorphic = Syntax.read_term ctxt #> singleton (Variable.polymorphic ctxt) - (* string -> styp *) val read_const_polymorphic = read_term_polymorphic #> dest_Const val cards_assigns = lookup_ints_assigns read_type_polymorphic "card" 1 val maxes_assigns = lookup_ints_assigns read_const_polymorphic "max" ~1 @@ -284,19 +246,13 @@ val binary_ints = lookup_bool_option "binary_ints" val destroy_constrs = lookup_bool "destroy_constrs" val specialize = lookup_bool "specialize" - val skolemize = lookup_bool "skolemize" val star_linear_preds = lookup_bool "star_linear_preds" - val uncurry = lookup_bool "uncurry" val fast_descrs = lookup_bool "fast_descrs" val peephole_optim = lookup_bool "peephole_optim" val timeout = if auto then NONE else lookup_time "timeout" val tac_timeout = lookup_time "tac_timeout" - val sym_break = Int.max (0, lookup_int "sym_break") - val sharing_depth = Int.max (1, lookup_int "sharing_depth") - val flatten_props = lookup_bool "flatten_props" val max_threads = Int.max (0, lookup_int "max_threads") val show_all = debug orelse lookup_bool "show_all" - val show_skolems = show_all orelse lookup_bool "show_skolems" val show_datatypes = show_all orelse lookup_bool "show_datatypes" val show_consts = show_all orelse lookup_bool "show_consts" val formats = lookup_ints_assigns read_term_polymorphic "format" 0 @@ -306,9 +262,10 @@ val max_genuine = Int.max (0, lookup_int "max_genuine") val check_potential = lookup_bool "check_potential" val check_genuine = lookup_bool "check_genuine" - val batch_size = case lookup_int_option "batch_size" of - SOME n => Int.max (1, n) - | NONE => if debug then 1 else 64 + val batch_size = + case lookup_int_option "batch_size" of + SOME n => Int.max (1, n) + | NONE => if debug then 1 else 64 val expect = lookup_string "expect" in {cards_assigns = cards_assigns, maxes_assigns = maxes_assigns, @@ -319,37 +276,28 @@ user_axioms = user_axioms, assms = assms, merge_type_vars = merge_type_vars, binary_ints = binary_ints, destroy_constrs = destroy_constrs, specialize = specialize, - skolemize = skolemize, star_linear_preds = star_linear_preds, - uncurry = uncurry, fast_descrs = fast_descrs, + star_linear_preds = star_linear_preds, fast_descrs = fast_descrs, peephole_optim = peephole_optim, timeout = timeout, - tac_timeout = tac_timeout, sym_break = sym_break, - sharing_depth = sharing_depth, flatten_props = flatten_props, - max_threads = max_threads, show_skolems = show_skolems, + tac_timeout = tac_timeout, max_threads = max_threads, show_datatypes = show_datatypes, show_consts = show_consts, formats = formats, evals = evals, max_potential = max_potential, max_genuine = max_genuine, check_potential = check_potential, check_genuine = check_genuine, batch_size = batch_size, expect = expect} end -(* theory -> (string * string) list -> params *) fun default_params thy = - extract_params (ProofContext.init thy) false (default_raw_params thy) + extract_params (ProofContext.init_global thy) false (default_raw_params thy) o map (apsnd single) -(* P.token list -> string * P.token list *) val parse_key = Scan.repeat1 P.typ_group >> space_implode " " -(* P.token list -> string list * P.token list *) val parse_value = Scan.repeat1 (P.minus >> single || Scan.repeat1 (Scan.unless P.minus P.name) || P.$$$ "," |-- P.number >> prefix "," >> single) >> flat -(* P.token list -> raw_param * P.token list *) val parse_param = parse_key -- Scan.optional (P.$$$ "=" |-- parse_value) [] -(* P.token list -> raw_param list * P.token list *) val parse_params = Scan.optional (P.$$$ "[" |-- P.list parse_param --| P.$$$ "]") [] -(* Proof.context -> ('a -> 'a) -> 'a -> 'a *) fun handle_exceptions ctxt f x = f x handle ARG (loc, details) => @@ -387,7 +335,6 @@ | Refute.REFUTE (loc, details) => error ("Unhandled Refute error (" ^ quote loc ^ "): " ^ details ^ ".") -(* raw_param list -> bool -> int -> int -> Proof.state -> bool * Proof.state *) fun pick_nits override_params auto i step state = let val thy = Proof.theory_of state @@ -395,7 +342,6 @@ val _ = List.app check_raw_param override_params val params as {blocking, debug, ...} = extract_params ctxt auto (default_raw_params thy) override_params - (* unit -> bool * Proof.state *) fun go () = (false, state) |> (if auto then perhaps o try @@ -408,17 +354,14 @@ else (Toplevel.thread true (fn () => (go (); ())); (false, state)) end -(* raw_param list * int -> Toplevel.transition -> Toplevel.transition *) fun nitpick_trans (params, i) = Toplevel.keep (fn st => (pick_nits params false i (Toplevel.proof_position_of st) (Toplevel.proof_of st); ())) -(* raw_param -> string *) fun string_for_raw_param (name, value) = name ^ " = " ^ stringify_raw_param_value value -(* raw_param list -> Toplevel.transition -> Toplevel.transition *) fun nitpick_params_trans params = Toplevel.theory (fold set_default_raw_param params @@ -431,20 +374,17 @@ params |> map string_for_raw_param |> sort_strings |> cat_lines))))) -(* P.token list - -> (Toplevel.transition -> Toplevel.transition) * P.token list *) val parse_nitpick_command = (parse_params -- Scan.optional P.nat 1) #>> nitpick_trans val parse_nitpick_params_command = parse_params #>> nitpick_params_trans val _ = OuterSyntax.improper_command "nitpick" - "try to find a counterexample for a given subgoal using Kodkod" + "try to find a counterexample for a given subgoal using Nitpick" K.diag parse_nitpick_command val _ = OuterSyntax.command "nitpick_params" "set and display the default parameters for Nitpick" K.thy_decl parse_nitpick_params_command -(* Proof.state -> bool * Proof.state *) fun auto_nitpick state = if not (!auto) then (false, state) else pick_nits [] true 1 0 state diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_kodkod.ML --- a/src/HOL/Tools/Nitpick/nitpick_kodkod.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_kodkod.ML Tue May 04 20:30:22 2010 +0200 @@ -57,24 +57,19 @@ structure NfaGraph = Typ_Graph -(* int -> KK.int_expr list *) fun flip_nums n = index_seq 1 n @ [0] |> map KK.Num -(* int -> int -> int -> KK.bound list -> KK.formula -> int *) fun univ_card nat_card int_card main_j0 bounds formula = let - (* KK.rel_expr -> int -> int *) fun rel_expr_func r k = Int.max (k, case r of KK.Atom j => j + 1 | KK.AtomSeq (k', j0) => j0 + k' | _ => 0) - (* KK.tuple -> int -> int *) fun tuple_func t k = case t of KK.Tuple js => fold Integer.max (map (Integer.add 1) js) k | _ => k - (* KK.tuple_set -> int -> int *) fun tuple_set_func ts k = Int.max (k, case ts of KK.TupleAtomSeq (k', j0) => j0 + k' | _ => 0) val expr_F = {formula_func = K I, rel_expr_func = rel_expr_func, @@ -84,10 +79,8 @@ |> KK.fold_formula expr_F formula in Int.max (main_j0 + fold Integer.max [2, nat_card, int_card] 0, card) end -(* int -> KK.formula -> unit *) fun check_bits bits formula = let - (* KK.int_expr -> unit -> unit *) fun int_expr_func (KK.Num k) () = if is_twos_complement_representable bits k then () @@ -100,7 +93,6 @@ int_expr_func = int_expr_func} in KK.fold_formula expr_F formula () end -(* int -> int -> unit *) fun check_arity univ_card n = if n > KK.max_arity univ_card then raise TOO_LARGE ("Nitpick_Kodkod.check_arity", @@ -109,7 +101,6 @@ else () -(* bool -> int -> int list -> KK.tuple *) fun kk_tuple debug univ_card js = if debug then KK.Tuple js @@ -117,19 +108,13 @@ KK.TupleIndex (length js, fold (fn j => fn accum => accum * univ_card + j) js 0) -(* (int * int) list -> KK.tuple_set *) val tuple_set_from_atom_schema = foldl1 KK.TupleProduct o map KK.TupleAtomSeq -(* rep -> KK.tuple_set *) val upper_bound_for_rep = tuple_set_from_atom_schema o atom_schema_of_rep -(* int -> KK.tuple_set *) val single_atom = KK.TupleSet o single o KK.Tuple o single -(* int -> KK.int_bound list *) fun sequential_int_bounds n = [(NONE, map single_atom (index_seq 0 n))] -(* int -> int -> KK.int_bound list *) fun pow_of_two_int_bounds bits j0 = let - (* int -> int -> int -> KK.int_bound list *) fun aux 0 _ _ = [] | aux 1 pow_of_two j = [(SOME (~ pow_of_two), [single_atom j])] | aux iter pow_of_two j = @@ -137,10 +122,8 @@ aux (iter - 1) (2 * pow_of_two) (j + 1) in aux (bits + 1) 1 j0 end -(* KK.formula -> KK.n_ary_index list *) fun built_in_rels_in_formula formula = let - (* KK.rel_expr -> KK.n_ary_index list -> KK.n_ary_index list *) fun rel_expr_func (KK.Rel (x as (n, j))) = if x = unsigned_bit_word_sel_rel orelse x = signed_bit_word_sel_rel then I @@ -155,7 +138,6 @@ val max_table_size = 65536 -(* int -> unit *) fun check_table_size k = if k > max_table_size then raise TOO_LARGE ("Nitpick_Kodkod.check_table_size", @@ -163,7 +145,6 @@ else () -(* bool -> int -> int * int -> (int -> int) -> KK.tuple list *) fun tabulate_func1 debug univ_card (k, j0) f = (check_table_size k; map_filter (fn j1 => let val j2 = f j1 in @@ -172,7 +153,6 @@ else NONE end) (index_seq 0 k)) -(* bool -> int -> int * int -> int -> (int * int -> int) -> KK.tuple list *) fun tabulate_op2 debug univ_card (k, j0) res_j0 f = (check_table_size (k * k); map_filter (fn j => let @@ -186,8 +166,6 @@ else NONE end) (index_seq 0 (k * k))) -(* bool -> int -> int * int -> int -> (int * int -> int * int) - -> KK.tuple list *) fun tabulate_op2_2 debug univ_card (k, j0) res_j0 f = (check_table_size (k * k); map_filter (fn j => let @@ -202,33 +180,27 @@ else NONE end) (index_seq 0 (k * k))) -(* bool -> int -> int * int -> (int * int -> int) -> KK.tuple list *) fun tabulate_nat_op2 debug univ_card (k, j0) f = tabulate_op2 debug univ_card (k, j0) j0 (atom_for_nat (k, 0) o f) fun tabulate_int_op2 debug univ_card (k, j0) f = tabulate_op2 debug univ_card (k, j0) j0 (atom_for_int (k, 0) o f o pairself (int_for_atom (k, 0))) -(* bool -> int -> int * int -> (int * int -> int * int) -> KK.tuple list *) fun tabulate_int_op2_2 debug univ_card (k, j0) f = tabulate_op2_2 debug univ_card (k, j0) j0 (pairself (atom_for_int (k, 0)) o f o pairself (int_for_atom (k, 0))) -(* int * int -> int *) fun isa_div (m, n) = m div n handle General.Div => 0 fun isa_mod (m, n) = m mod n handle General.Div => m fun isa_gcd (m, 0) = m | isa_gcd (m, n) = isa_gcd (n, isa_mod (m, n)) fun isa_lcm (m, n) = isa_div (m * n, isa_gcd (m, n)) val isa_zgcd = isa_gcd o pairself abs -(* int * int -> int * int *) fun isa_norm_frac (m, n) = if n < 0 then isa_norm_frac (~m, ~n) else if m = 0 orelse n = 0 then (0, 1) else let val p = isa_zgcd (m, n) in (isa_div (m, p), isa_div (n, p)) end -(* bool -> int -> int -> int -> int -> int * int - -> string * bool * KK.tuple list *) fun tabulate_built_in_rel debug univ_card nat_card int_card j0 (x as (n, _)) = (check_arity univ_card n; if x = not3_rel then @@ -269,25 +241,21 @@ else raise ARG ("Nitpick_Kodkod.tabulate_built_in_rel", "unknown relation")) -(* bool -> int -> int -> int -> int -> int * int -> KK.rel_expr -> KK.bound *) fun bound_for_built_in_rel debug univ_card nat_card int_card j0 x = let val (nick, ts) = tabulate_built_in_rel debug univ_card nat_card int_card j0 x in ([(x, nick)], [KK.TupleSet ts]) end -(* bool -> int -> int -> int -> int -> KK.formula -> KK.bound list *) fun bounds_for_built_in_rels_in_formula debug univ_card nat_card int_card j0 = map (bound_for_built_in_rel debug univ_card nat_card int_card j0) o built_in_rels_in_formula -(* Proof.context -> bool -> string -> typ -> rep -> string *) fun bound_comment ctxt debug nick T R = short_name nick ^ (if debug then " :: " ^ unyxml (Syntax.string_of_typ ctxt T) else "") ^ " : " ^ string_for_rep R -(* Proof.context -> bool -> nut -> KK.bound *) fun bound_for_plain_rel ctxt debug (u as FreeRel (x, T, R, nick)) = ([(x, bound_comment ctxt debug nick T R)], if nick = @{const_name bisim_iterator_max} then @@ -299,7 +267,6 @@ | bound_for_plain_rel _ _ u = raise NUT ("Nitpick_Kodkod.bound_for_plain_rel", [u]) -(* Proof.context -> bool -> dtype_spec list -> nut -> KK.bound *) fun bound_for_sel_rel ctxt debug dtypes (FreeRel (x, T as Type (@{type_name fun}, [T1, T2]), R as Func (Atom (_, j0), R2), nick)) = @@ -331,12 +298,9 @@ | bound_for_sel_rel _ _ _ u = raise NUT ("Nitpick_Kodkod.bound_for_sel_rel", [u]) -(* KK.bound list -> KK.bound list *) fun merge_bounds bs = let - (* KK.bound -> int *) fun arity (zs, _) = fst (fst (hd zs)) - (* KK.bound list -> KK.bound -> KK.bound list -> KK.bound list *) fun add_bound ds b [] = List.revAppend (ds, [b]) | add_bound ds b (c :: cs) = if arity b = arity c andalso snd b = snd c then @@ -345,40 +309,33 @@ add_bound (c :: ds) b cs in fold (add_bound []) bs [] end -(* int -> int -> KK.rel_expr list *) fun unary_var_seq j0 n = map (curry KK.Var 1) (index_seq j0 n) -(* int list -> KK.rel_expr *) val singleton_from_combination = foldl1 KK.Product o map KK.Atom -(* rep -> KK.rel_expr list *) fun all_singletons_for_rep R = if is_lone_rep R then all_combinations_for_rep R |> map singleton_from_combination else raise REP ("Nitpick_Kodkod.all_singletons_for_rep", [R]) -(* KK.rel_expr -> KK.rel_expr list *) fun unpack_products (KK.Product (r1, r2)) = unpack_products r1 @ unpack_products r2 | unpack_products r = [r] fun unpack_joins (KK.Join (r1, r2)) = unpack_joins r1 @ unpack_joins r2 | unpack_joins r = [r] -(* rep -> KK.rel_expr *) val empty_rel_for_rep = empty_n_ary_rel o arity_of_rep fun full_rel_for_rep R = case atom_schema_of_rep R of [] => raise REP ("Nitpick_Kodkod.full_rel_for_rep", [R]) | schema => foldl1 KK.Product (map KK.AtomSeq schema) -(* int -> int list -> KK.decl list *) fun decls_for_atom_schema j0 schema = map2 (fn j => fn x => KK.DeclOne ((1, j), KK.AtomSeq x)) (index_seq j0 (length schema)) schema (* The type constraint below is a workaround for a Poly/ML bug. *) -(* kodkod_constrs -> rep -> KK.rel_expr -> KK.formula *) fun d_n_ary_function ({kk_all, kk_join, kk_lone, kk_one, ...} : kodkod_constrs) R r = let val body_R = body_rep R in @@ -420,14 +377,11 @@ d_n_ary_function kk R r | kk_n_ary_function kk R r = d_n_ary_function kk R r -(* kodkod_constrs -> KK.rel_expr list -> KK.formula *) fun kk_disjoint_sets _ [] = KK.True | kk_disjoint_sets (kk as {kk_and, kk_no, kk_intersect, ...} : kodkod_constrs) (r :: rs) = fold (kk_and o kk_no o kk_intersect r) rs (kk_disjoint_sets kk rs) -(* int -> kodkod_constrs -> (KK.rel_expr -> KK.rel_expr) -> KK.rel_expr - -> KK.rel_expr *) fun basic_rel_rel_let j ({kk_rel_let, ...} : kodkod_constrs) f r = if inline_rel_expr r then f r @@ -435,36 +389,25 @@ let val x = (KK.arity_of_rel_expr r, j) in kk_rel_let [KK.AssignRelReg (x, r)] (f (KK.RelReg x)) end -(* kodkod_constrs -> (KK.rel_expr -> KK.rel_expr) -> KK.rel_expr - -> KK.rel_expr *) val single_rel_rel_let = basic_rel_rel_let 0 -(* kodkod_constrs -> (KK.rel_expr -> KK.rel_expr -> KK.rel_expr) -> KK.rel_expr - -> KK.rel_expr -> KK.rel_expr *) fun double_rel_rel_let kk f r1 r2 = single_rel_rel_let kk (fn r1 => basic_rel_rel_let 1 kk (f r1) r2) r1 -(* kodkod_constrs -> (KK.rel_expr -> KK.rel_expr -> KK.rel_expr -> KK.rel_expr) - -> KK.rel_expr -> KK.rel_expr -> KK.rel_expr -> KK.rel_expr *) fun triple_rel_rel_let kk f r1 r2 r3 = double_rel_rel_let kk (fn r1 => fn r2 => basic_rel_rel_let 2 kk (f r1 r2) r3) r1 r2 -(* kodkod_constrs -> int -> KK.formula -> KK.rel_expr *) fun atom_from_formula ({kk_rel_if, ...} : kodkod_constrs) j0 f = kk_rel_if f (KK.Atom (j0 + 1)) (KK.Atom j0) -(* kodkod_constrs -> rep -> KK.formula -> KK.rel_expr *) fun rel_expr_from_formula kk R f = case unopt_rep R of Atom (2, j0) => atom_from_formula kk j0 f | _ => raise REP ("Nitpick_Kodkod.rel_expr_from_formula", [R]) -(* kodkod_cotrs -> int -> int -> KK.rel_expr -> KK.rel_expr list *) fun unpack_vect_in_chunks ({kk_project_seq, ...} : kodkod_constrs) chunk_arity num_chunks r = List.tabulate (num_chunks, fn j => kk_project_seq r (j * chunk_arity) chunk_arity) -(* kodkod_constrs -> bool -> rep -> rep -> KK.rel_expr -> KK.rel_expr - -> KK.rel_expr *) fun kk_n_fold_join (kk as {kk_intersect, kk_product, kk_join, kk_project_seq, ...}) one R1 res_R r1 r2 = @@ -484,8 +427,6 @@ arity1 (arity_of_rep res_R) end -(* kodkod_constrs -> rep -> rep -> KK.rel_expr -> KK.rel_expr list - -> KK.rel_expr list -> KK.rel_expr *) fun kk_case_switch (kk as {kk_union, kk_product, ...}) R1 R2 r rs1 rs2 = if rs1 = rs2 then r else kk_n_fold_join kk true R1 R2 r (fold1 kk_union (map2 kk_product rs1 rs2)) @@ -493,7 +434,6 @@ val lone_rep_fallback_max_card = 4096 val some_j0 = 0 -(* kodkod_constrs -> rep -> rep -> KK.rel_expr -> KK.rel_expr *) fun lone_rep_fallback kk new_R old_R r = if old_R = new_R then r @@ -510,7 +450,6 @@ else raise REP ("Nitpick_Kodkod.lone_rep_fallback", [old_R, new_R]) end -(* kodkod_constrs -> int * int -> rep -> KK.rel_expr -> KK.rel_expr *) and atom_from_rel_expr kk x old_R r = case old_R of Func (R1, R2) => @@ -523,7 +462,6 @@ end | Opt _ => raise REP ("Nitpick_Kodkod.atom_from_rel_expr", [old_R]) | _ => lone_rep_fallback kk (Atom x) old_R r -(* kodkod_constrs -> rep list -> rep -> KK.rel_expr -> KK.rel_expr *) and struct_from_rel_expr kk Rs old_R r = case old_R of Atom _ => lone_rep_fallback kk (Struct Rs) old_R r @@ -547,7 +485,6 @@ lone_rep_fallback kk (Struct Rs) old_R r end | _ => raise REP ("Nitpick_Kodkod.struct_from_rel_expr", [old_R]) -(* kodkod_constrs -> int -> rep -> rep -> KK.rel_expr -> KK.rel_expr *) and vect_from_rel_expr kk k R old_R r = case old_R of Atom _ => lone_rep_fallback kk (Vect (k, R)) old_R r @@ -570,7 +507,6 @@ (kk_n_fold_join kk true R1 R2 arg_r r)) (all_singletons_for_rep R1)) | _ => raise REP ("Nitpick_Kodkod.vect_from_rel_expr", [old_R]) -(* kodkod_constrs -> rep -> rep -> rep -> KK.rel_expr -> KK.rel_expr *) and func_from_no_opt_rel_expr kk R1 R2 (Atom x) r = let val dom_card = card_of_rep R1 @@ -599,7 +535,6 @@ let val args_rs = all_singletons_for_rep R1 val vals_rs = unpack_vect_in_chunks kk 1 k r - (* KK.rel_expr -> KK.rel_expr -> KK.rel_expr *) fun empty_or_singleton_set_for arg_r val_r = #kk_join kk val_r (#kk_product kk (KK.Atom (j0 + 1)) arg_r) in @@ -682,7 +617,6 @@ end | _ => raise REP ("Nitpick_Kodkod.func_from_no_opt_rel_expr", [old_R, Func (R1, R2)]) -(* kodkod_constrs -> rep -> rep -> KK.rel_expr -> KK.rel_expr *) and rel_expr_from_rel_expr kk new_R old_R r = let val unopt_old_R = unopt_rep old_R @@ -702,25 +636,20 @@ [old_R, new_R])) unopt_old_R r end -(* kodkod_constrs -> rep -> rep -> rep -> KK.rel_expr -> KK.rel_expr *) and rel_expr_to_func kk R1 R2 = rel_expr_from_rel_expr kk (Func (R1, R2)) -(* kodkod_constrs -> typ -> KK.rel_expr -> KK.rel_expr *) fun bit_set_from_atom ({kk_join, ...} : kodkod_constrs) T r = kk_join r (KK.Rel (if T = @{typ "unsigned_bit word"} then unsigned_bit_word_sel_rel else signed_bit_word_sel_rel)) -(* kodkod_constrs -> typ -> KK.rel_expr -> KK.int_expr *) val int_expr_from_atom = KK.SetSum ooo bit_set_from_atom -(* kodkod_constrs -> typ -> rep -> KK.int_expr -> KK.rel_expr *) fun atom_from_int_expr (kk as {kk_rel_eq, kk_comprehension, ...} : kodkod_constrs) T R i = kk_comprehension (decls_for_atom_schema ~1 (atom_schema_of_rep R)) (kk_rel_eq (bit_set_from_atom kk T (KK.Var (1, ~1))) (KK.Bits i)) -(* kodkod_constrs -> nut -> KK.formula *) fun declarative_axiom_for_plain_rel kk (FreeRel (x, _, R as Func _, nick)) = kk_n_ary_function kk (R |> nick = @{const_name List.set} ? unopt_rep) (KK.Rel x) @@ -732,17 +661,13 @@ | declarative_axiom_for_plain_rel _ u = raise NUT ("Nitpick_Kodkod.declarative_axiom_for_plain_rel", [u]) -(* nut NameTable.table -> styp -> KK.rel_expr * rep * int *) fun const_triple rel_table (x as (s, T)) = case the_name rel_table (ConstName (s, T, Any)) of FreeRel ((n, j), _, R, _) => (KK.Rel (n, j), R, n) | _ => raise TERM ("Nitpick_Kodkod.const_triple", [Const x]) -(* nut NameTable.table -> styp -> KK.rel_expr *) fun discr_rel_expr rel_table = #1 o const_triple rel_table o discr_for_constr -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table - -> dtype_spec list -> styp -> int -> nfa_transition list *) fun nfa_transitions_for_sel hol_ctxt binarize ({kk_project, ...} : kodkod_constrs) rel_table (dtypes : dtype_spec list) constr_x n = @@ -757,14 +682,10 @@ else SOME (kk_project r (map KK.Num [0, j]), T)) (index_seq 1 (arity - 1) ~~ tl type_schema) end -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table - -> dtype_spec list -> styp -> nfa_transition list *) fun nfa_transitions_for_constr hol_ctxt binarize kk rel_table dtypes (x as (_, T)) = maps (nfa_transitions_for_sel hol_ctxt binarize kk rel_table dtypes x) (index_seq 0 (num_sels_for_constr_type T)) -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table - -> dtype_spec list -> dtype_spec -> nfa_entry option *) fun nfa_entry_for_datatype _ _ _ _ _ ({co = true, ...} : dtype_spec) = NONE | nfa_entry_for_datatype _ _ _ _ _ {standard = false, ...} = NONE | nfa_entry_for_datatype _ _ _ _ _ {deep = false, ...} = NONE @@ -775,12 +696,10 @@ val empty_rel = KK.Product (KK.None, KK.None) -(* nfa_table -> typ -> typ -> KK.rel_expr list *) fun direct_path_rel_exprs nfa start_T final_T = case AList.lookup (op =) nfa final_T of SOME trans => map fst (filter (curry (op =) start_T o snd) trans) | NONE => [] -(* kodkod_constrs -> nfa_table -> typ list -> typ -> typ -> KK.rel_expr *) and any_path_rel_expr ({kk_union, ...} : kodkod_constrs) nfa [] start_T final_T = fold kk_union (direct_path_rel_exprs nfa start_T final_T) @@ -788,14 +707,11 @@ | any_path_rel_expr (kk as {kk_union, ...}) nfa (T :: Ts) start_T final_T = kk_union (any_path_rel_expr kk nfa Ts start_T final_T) (knot_path_rel_expr kk nfa Ts start_T T final_T) -(* kodkod_constrs -> nfa_table -> typ list -> typ -> typ -> typ - -> KK.rel_expr *) and knot_path_rel_expr (kk as {kk_join, kk_reflexive_closure, ...}) nfa Ts start_T knot_T final_T = kk_join (kk_join (any_path_rel_expr kk nfa Ts knot_T final_T) (kk_reflexive_closure (loop_path_rel_expr kk nfa Ts knot_T))) (any_path_rel_expr kk nfa Ts start_T knot_T) -(* kodkod_constrs -> nfa_table -> typ list -> typ -> KK.rel_expr *) and loop_path_rel_expr ({kk_union, ...} : kodkod_constrs) nfa [] start_T = fold kk_union (direct_path_rel_exprs nfa start_T start_T) empty_rel | loop_path_rel_expr (kk as {kk_union, kk_closure, ...}) nfa (T :: Ts) @@ -806,12 +722,9 @@ kk_union (loop_path_rel_expr kk nfa Ts start_T) (knot_path_rel_expr kk nfa Ts start_T T start_T) -(* nfa_table -> unit NfaGraph.T *) fun graph_for_nfa nfa = let - (* typ -> unit NfaGraph.T -> unit NfaGraph.T *) fun new_node T = perhaps (try (NfaGraph.new_node (T, ()))) - (* nfa_table -> unit NfaGraph.T -> unit NfaGraph.T *) fun add_nfa [] = I | add_nfa ((_, []) :: nfa) = add_nfa nfa | add_nfa ((T, ((_, T') :: transitions)) :: nfa) = @@ -819,25 +732,19 @@ new_node T' o new_node T in add_nfa nfa NfaGraph.empty end -(* nfa_table -> nfa_table list *) fun strongly_connected_sub_nfas nfa = nfa |> graph_for_nfa |> NfaGraph.strong_conn |> map (fn keys => filter (member (op =) keys o fst) nfa) -(* kodkod_constrs -> nfa_table -> typ -> KK.formula *) fun acyclicity_axiom_for_datatype kk nfa start_T = #kk_no kk (#kk_intersect kk (loop_path_rel_expr kk nfa (map fst nfa) start_T) KK.Iden) -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table - -> dtype_spec list -> KK.formula list *) fun acyclicity_axioms_for_datatypes hol_ctxt binarize kk rel_table dtypes = map_filter (nfa_entry_for_datatype hol_ctxt binarize kk rel_table dtypes) dtypes |> strongly_connected_sub_nfas |> maps (fn nfa => map (acyclicity_axiom_for_datatype kk nfa o fst) nfa) -(* hol_context -> bool -> int -> kodkod_constrs -> nut NameTable.table - -> KK.rel_expr -> constr_spec -> int -> KK.formula *) fun sel_axiom_for_sel hol_ctxt binarize j0 (kk as {kk_all, kk_formula_if, kk_subset, kk_no, kk_join, ...}) rel_table dom_r ({const, delta, epsilon, exclusive, ...} : constr_spec) @@ -857,8 +764,6 @@ (kk_n_ary_function kk R2 r') (kk_no r')) end end -(* hol_context -> bool -> int -> int -> kodkod_constrs -> nut NameTable.table - -> constr_spec -> KK.formula list *) fun sel_axioms_for_constr hol_ctxt binarize bits j0 kk rel_table (constr as {const, delta, epsilon, explicit_max, ...}) = let @@ -885,19 +790,14 @@ (index_seq 0 (num_sels_for_constr_type (snd const))) end end -(* hol_context -> bool -> int -> int -> kodkod_constrs -> nut NameTable.table - -> dtype_spec -> KK.formula list *) fun sel_axioms_for_datatype hol_ctxt binarize bits j0 kk rel_table ({constrs, ...} : dtype_spec) = maps (sel_axioms_for_constr hol_ctxt binarize bits j0 kk rel_table) constrs -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table -> constr_spec - -> KK.formula list *) fun uniqueness_axiom_for_constr hol_ctxt binarize ({kk_all, kk_implies, kk_and, kk_rel_eq, kk_lone, kk_join, ...} : kodkod_constrs) rel_table ({const, ...} : constr_spec) = let - (* KK.rel_expr -> KK.formula *) fun conjunct_for_sel r = kk_rel_eq (kk_join (KK.Var (1, 0)) r) (kk_join (KK.Var (1, 1)) r) val num_sels = num_sels_for_constr_type (snd const) @@ -915,16 +815,11 @@ (fold1 kk_and (map (conjunct_for_sel o #1) (tl triples))) (kk_rel_eq (KK.Var (1, 0)) (KK.Var (1, 1)))) end -(* hol_context -> bool -> kodkod_constrs -> nut NameTable.table -> dtype_spec - -> KK.formula list *) fun uniqueness_axioms_for_datatype hol_ctxt binarize kk rel_table ({constrs, ...} : dtype_spec) = map (uniqueness_axiom_for_constr hol_ctxt binarize kk rel_table) constrs -(* constr_spec -> int *) fun effective_constr_max ({delta, epsilon, ...} : constr_spec) = epsilon - delta -(* int -> kodkod_constrs -> nut NameTable.table -> dtype_spec - -> KK.formula list *) fun partition_axioms_for_datatype j0 (kk as {kk_rel_eq, kk_union, ...}) rel_table ({card, constrs, ...} : dtype_spec) = @@ -936,8 +831,6 @@ kk_disjoint_sets kk rs] end -(* hol_context -> bool -> int -> int Typtab.table -> kodkod_constrs - -> nut NameTable.table -> dtype_spec -> KK.formula list *) fun other_axioms_for_datatype _ _ _ _ _ _ {deep = false, ...} = [] | other_axioms_for_datatype hol_ctxt binarize bits ofs kk rel_table (dtype as {typ, ...}) = @@ -947,15 +840,12 @@ partition_axioms_for_datatype j0 kk rel_table dtype end -(* hol_context -> bool -> int -> int Typtab.table -> kodkod_constrs - -> nut NameTable.table -> dtype_spec list -> KK.formula list *) fun declarative_axioms_for_datatypes hol_ctxt binarize bits ofs kk rel_table dtypes = acyclicity_axioms_for_datatypes hol_ctxt binarize kk rel_table dtypes @ maps (other_axioms_for_datatype hol_ctxt binarize bits ofs kk rel_table) dtypes -(* int Typtab.table -> kodkod_constrs -> nut -> KK.formula *) fun kodkod_formula_from_nut ofs (kk as {kk_all, kk_exist, kk_formula_let, kk_formula_if, kk_or, kk_not, kk_iff, kk_implies, kk_and, kk_subset, kk_rel_eq, kk_no, @@ -970,17 +860,13 @@ val false_atom = KK.Atom bool_j0 val true_atom = KK.Atom (bool_j0 + 1) - (* polarity -> int -> KK.rel_expr -> KK.formula *) fun formula_from_opt_atom polar j0 r = case polar of Neg => kk_not (kk_rel_eq r (KK.Atom j0)) | _ => kk_rel_eq r (KK.Atom (j0 + 1)) - (* int -> KK.rel_expr -> KK.formula *) val formula_from_atom = formula_from_opt_atom Pos - (* KK.formula -> KK.formula -> KK.formula *) fun kk_notimplies f1 f2 = kk_and f1 (kk_not f2) - (* KK.rel_expr -> KK.rel_expr -> KK.rel_expr *) val kk_or3 = double_rel_rel_let kk (fn r1 => fn r2 => @@ -993,21 +879,15 @@ (kk_intersect r1 r2)) fun kk_notimplies3 r1 r2 = kk_and3 r1 (kk_not3 r2) - (* int -> KK.rel_expr -> KK.formula list *) val unpack_formulas = map (formula_from_atom bool_j0) oo unpack_vect_in_chunks kk 1 - (* (KK.formula -> KK.formula -> KK.formula) -> int -> KK.rel_expr - -> KK.rel_expr -> KK.rel_expr *) fun kk_vect_set_op connective k r1 r2 = fold1 kk_product (map2 (atom_from_formula kk bool_j0 oo connective) (unpack_formulas k r1) (unpack_formulas k r2)) - (* (KK.formula -> KK.formula -> KK.formula) -> int -> KK.rel_expr - -> KK.rel_expr -> KK.formula *) fun kk_vect_set_bool_op connective k r1 r2 = fold1 kk_and (map2 connective (unpack_formulas k r1) (unpack_formulas k r2)) - (* nut -> KK.formula *) fun to_f u = case rep_of u of Formula polar => @@ -1060,7 +940,6 @@ else let (* FIXME: merge with similar code below *) - (* bool -> nut -> KK.rel_expr *) fun set_to_r widen u = if widen then kk_difference (full_rel_for_rep dom_R) @@ -1078,7 +957,6 @@ kk_iff (to_f_with_polarity polar u1) (to_f_with_polarity polar u2) | min_R => let - (* nut -> nut list *) fun args (Op2 (Apply, _, _, u1, u2)) = u2 :: args u1 | args (Tuple (_, _, us)) = us | args _ = [] @@ -1177,14 +1055,12 @@ | _ => raise NUT ("Nitpick_Kodkod.to_f", [u])) | Atom (2, j0) => formula_from_atom j0 (to_r u) | _ => raise NUT ("Nitpick_Kodkod.to_f", [u]) - (* polarity -> nut -> KK.formula *) and to_f_with_polarity polar u = case rep_of u of Formula _ => to_f u | Atom (2, j0) => formula_from_atom j0 (to_r u) | Opt (Atom (2, j0)) => formula_from_opt_atom polar j0 (to_r u) | _ => raise NUT ("Nitpick_Kodkod.to_f_with_polarity", [u]) - (* nut -> KK.rel_expr *) and to_r u = case u of Cst (False, _, Atom _) => false_atom @@ -1523,7 +1399,6 @@ | Opt (Atom (2, _)) => let (* FIXME: merge with similar code above *) - (* rep -> rep -> nut -> KK.rel_expr *) fun must R1 R2 u = kk_join (to_rep (Func (Struct [R1, R2], body_R)) u) true_atom fun may R1 R2 u = @@ -1558,9 +1433,7 @@ (to_rep (Func (b_R, Formula Neut)) u2) | Opt (Atom (2, _)) => let - (* KK.rel_expr -> rep -> nut -> KK.rel_expr *) fun do_nut r R u = kk_join (to_rep (Func (R, body_R)) u) r - (* KK.rel_expr -> KK.rel_expr *) fun do_term r = kk_product (kk_product (do_nut r a_R u1) (do_nut r b_R u2)) r in kk_union (do_term true_atom) (do_term false_atom) end @@ -1572,7 +1445,6 @@ (Func (R11, R12), Func (R21, Formula Neut)) => if R21 = R11 andalso is_lone_rep R12 then let - (* KK.rel_expr -> KK.rel_expr *) fun big_join r = kk_n_fold_join kk false R21 R12 r (to_r u1) val core_r = big_join (to_r u2) val core_R = Func (R12, Formula Neut) @@ -1666,39 +1538,32 @@ | FreeRel (x, _, _, _) => KK.Rel x | RelReg (j, _, R) => KK.RelReg (arity_of_rep R, j) | u => raise NUT ("Nitpick_Kodkod.to_r", [u]) - (* nut -> KK.decl *) and to_decl (BoundRel (x, _, R, _)) = KK.DeclOne (x, KK.AtomSeq (the_single (atom_schema_of_rep R))) | to_decl u = raise NUT ("Nitpick_Kodkod.to_decl", [u]) - (* nut -> KK.expr_assign *) and to_expr_assign (FormulaReg (j, _, _)) u = KK.AssignFormulaReg (j, to_f u) | to_expr_assign (RelReg (j, _, R)) u = KK.AssignRelReg ((arity_of_rep R, j), to_r u) | to_expr_assign u1 _ = raise NUT ("Nitpick_Kodkod.to_expr_assign", [u1]) - (* int * int -> nut -> KK.rel_expr *) and to_atom (x as (k, j0)) u = case rep_of u of Formula _ => atom_from_formula kk j0 (to_f u) | Unit => if k = 1 then KK.Atom j0 else raise NUT ("Nitpick_Kodkod.to_atom", [u]) | R => atom_from_rel_expr kk x R (to_r u) - (* rep list -> nut -> KK.rel_expr *) and to_struct Rs u = case rep_of u of Unit => full_rel_for_rep (Struct Rs) | R' => struct_from_rel_expr kk Rs R' (to_r u) - (* int -> rep -> nut -> KK.rel_expr *) and to_vect k R u = case rep_of u of Unit => full_rel_for_rep (Vect (k, R)) | R' => vect_from_rel_expr kk k R R' (to_r u) - (* rep -> rep -> nut -> KK.rel_expr *) and to_func R1 R2 u = case rep_of u of Unit => full_rel_for_rep (Func (R1, R2)) | R' => rel_expr_to_func kk R1 R2 R' (to_r u) - (* rep -> nut -> KK.rel_expr *) and to_opt R u = let val old_R = rep_of u in if is_opt_rep old_R then @@ -1706,16 +1571,13 @@ else to_rep R u end - (* rep -> nut -> KK.rel_expr *) and to_rep (Atom x) u = to_atom x u | to_rep (Struct Rs) u = to_struct Rs u | to_rep (Vect (k, R)) u = to_vect k R u | to_rep (Func (R1, R2)) u = to_func R1 R2 u | to_rep (Opt R) u = to_opt R u | to_rep R _ = raise REP ("Nitpick_Kodkod.to_rep", [R]) - (* nut -> KK.rel_expr *) and to_integer u = to_opt (one_rep ofs (type_of u) (rep_of u)) u - (* nut list -> rep -> KK.rel_expr -> KK.rel_expr *) and to_guard guard_us R r = let val unpacked_rs = unpack_joins r @@ -1733,16 +1595,13 @@ if null guard_fs then r else kk_rel_if (fold1 kk_or guard_fs) (empty_rel_for_rep R) r end - (* rep -> rep -> KK.rel_expr -> int -> KK.rel_expr *) and to_project new_R old_R r j0 = rel_expr_from_rel_expr kk new_R old_R (kk_project_seq r j0 (arity_of_rep old_R)) - (* rep list -> nut list -> KK.rel_expr *) and to_product Rs us = case map (uncurry to_opt) (filter (not_equal Unit o fst) (Rs ~~ us)) of [] => raise REP ("Nitpick_Kodkod.to_product", Rs) | rs => fold1 kk_product rs - (* int -> typ -> rep -> nut -> KK.rel_expr *) and to_nth_pair_sel n res_T res_R u = case u of Tuple (_, _, us) => to_rep res_R (nth us n) @@ -1770,9 +1629,6 @@ (to_rep res_R (Cst (Unity, res_T, Unit))) | _ => to_project res_R nth_R (to_rep (Opt (Struct Rs)) u) j0 end - (* (KK.formula -> KK.formula -> KK.formula) - -> (KK.rel_expr -> KK.rel_expr -> KK.formula) -> nut -> nut - -> KK.formula *) and to_set_bool_op connective set_oper u1 u2 = let val min_R = min_rep (rep_of u1) (rep_of u2) @@ -1788,12 +1644,6 @@ (kk_join r2 true_atom) | _ => raise REP ("Nitpick_Kodkod.to_set_bool_op", [min_R]) end - (* (KK.formula -> KK.formula -> KK.formula) - -> (KK.rel_expr -> KK.rel_expr -> KK.rel_expr) - -> (KK.rel_expr -> KK.rel_expr -> KK.formula) - -> (KK.rel_expr -> KK.rel_expr -> KK.formula) - -> (KK.rel_expr -> KK.rel_expr -> KK.formula) -> bool -> rep -> nut - -> nut -> KK.rel_expr *) and to_set_op connective connective3 set_oper true_set_oper false_set_oper neg_second R u1 u2 = let @@ -1825,11 +1675,9 @@ r1 r2 | _ => raise REP ("Nitpick_Kodkod.to_set_op", [min_R])) end - (* typ -> rep -> (KK.int_expr -> KK.int_expr) -> KK.rel_expr *) and to_bit_word_unary_op T R oper = let val Ts = strip_type T ||> single |> op @ - (* int -> KK.int_expr *) fun int_arg j = int_expr_from_atom kk (nth Ts j) (KK.Var (1, j)) in kk_comprehension (decls_for_atom_schema 0 (atom_schema_of_rep R)) @@ -1837,12 +1685,9 @@ (map (fn j => KK.AssignIntReg (j, int_arg j)) (0 upto 1), KK.IntEq (KK.IntReg 1, oper (KK.IntReg 0)))) end - (* typ -> rep -> (KK.int_expr -> KK.int_expr -> KK.int_expr -> bool) option - -> (KK.int_expr -> KK.int_expr -> KK.int_expr) option -> KK.rel_expr *) and to_bit_word_binary_op T R opt_guard opt_oper = let val Ts = strip_type T ||> single |> op @ - (* int -> KK.int_expr *) fun int_arg j = int_expr_from_atom kk (nth Ts j) (KK.Var (1, j)) in kk_comprehension (decls_for_atom_schema 0 (atom_schema_of_rep R)) @@ -1859,7 +1704,6 @@ [KK.IntEq (KK.IntReg 2, oper (KK.IntReg 0) (KK.IntReg 1))])))) end - (* rep -> rep -> KK.rel_expr -> nut -> KK.rel_expr *) and to_apply (R as Formula _) func_u arg_u = raise REP ("Nitpick_Kodkod.to_apply", [R]) | to_apply res_R func_u arg_u = @@ -1896,7 +1740,6 @@ (kk_n_fold_join kk true R1 R2 (to_opt R1 arg_u) (to_r func_u)) |> body_rep R2 = Formula Neut ? to_guard [arg_u] res_R | _ => raise NUT ("Nitpick_Kodkod.to_apply", [func_u]) - (* int -> rep -> rep -> KK.rel_expr -> nut *) and to_apply_vect k R' res_R func_r arg_u = let val arg_R = one_rep ofs (type_of arg_u) (unopt_rep (rep_of arg_u)) @@ -1906,10 +1749,8 @@ kk_case_switch kk arg_R res_R (to_opt arg_R arg_u) (all_singletons_for_rep arg_R) vect_rs end - (* bool -> nut -> KK.formula *) and to_could_be_unrep neg u = if neg andalso is_opt_rep (rep_of u) then kk_no (to_r u) else KK.False - (* nut -> KK.rel_expr -> KK.rel_expr *) and to_compare_with_unrep u r = if is_opt_rep (rep_of u) then kk_rel_if (kk_some (to_r u)) r (empty_rel_for_rep (rep_of u)) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_model.ML --- a/src/HOL/Tools/Nitpick/nitpick_model.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_model.ML Tue May 04 20:30:22 2010 +0200 @@ -12,10 +12,10 @@ type rep = Nitpick_Rep.rep type nut = Nitpick_Nut.nut - type params = { - show_skolems: bool, - show_datatypes: bool, - show_consts: bool} + type params = + {show_datatypes: bool, + show_consts: bool} + type term_postprocessor = Proof.context -> string -> (typ -> term list) -> typ -> term -> term @@ -51,10 +51,9 @@ structure KK = Kodkod -type params = { - show_skolems: bool, - show_datatypes: bool, - show_consts: bool} +type params = + {show_datatypes: bool, + show_consts: bool} type term_postprocessor = Proof.context -> string -> (typ -> term list) -> typ -> term -> term @@ -63,7 +62,7 @@ type T = (typ * term_postprocessor) list val empty = [] val extend = I - fun merge (ps1, ps2) = AList.merge (op =) (K true) (ps1, ps2)) + fun merge (x, y) = AList.merge (op =) (K true) (x, y)) val irrelevant = "_" val unknown = "?" @@ -81,10 +80,8 @@ type atom_pool = ((string * int) * int list) list -(* Proof.context -> ((string * string) * (string * string)) * Proof.context *) fun add_wacky_syntax ctxt = let - (* term -> string *) val name_of = fst o dest_Const val thy = ProofContext.theory_of ctxt |> Context.reject_draft val (maybe_t, thy) = @@ -106,7 +103,6 @@ (** Term reconstruction **) -(* atom_pool Unsynchronized.ref -> string -> int -> int -> string *) fun nth_atom_suffix pool s j k = (case AList.lookup (op =) (!pool) (s, k) of SOME js => @@ -118,7 +114,6 @@ |> nat_subscript |> (s <> "" andalso Symbol.is_ascii_digit (List.last (explode s))) ? prefix "\<^isub>," -(* atom_pool Unsynchronized.ref -> string -> typ -> int -> int -> string *) fun nth_atom_name pool prefix (Type (s, _)) j k = let val s' = shortest_name s in prefix ^ (if String.isPrefix "\\" s' then s' else substring (s', 0, 1)) ^ @@ -128,18 +123,15 @@ prefix ^ perhaps (try (unprefix "'")) s ^ nth_atom_suffix pool s j k | nth_atom_name _ _ T _ _ = raise TYPE ("Nitpick_Model.nth_atom_name", [T], []) -(* atom_pool Unsynchronized.ref -> bool -> typ -> int -> int -> term *) fun nth_atom pool for_auto T j k = if for_auto then Free (nth_atom_name pool (hd (space_explode "." nitpick_prefix)) T j k, T) else Const (nth_atom_name pool "" T j k, T) -(* term -> real *) fun extract_real_number (Const (@{const_name divide}, _) $ t1 $ t2) = real (snd (HOLogic.dest_number t1)) / real (snd (HOLogic.dest_number t2)) | extract_real_number t = real (snd (HOLogic.dest_number t)) -(* term * term -> order *) fun nice_term_ord (Abs (_, _, t1), Abs (_, _, t2)) = nice_term_ord (t1, t2) | nice_term_ord tp = Real.compare (pairself extract_real_number tp) handle TERM ("dest_number", _) => @@ -150,16 +142,12 @@ | ord => ord) | _ => Term_Ord.fast_term_ord tp -(* typ -> term_postprocessor -> theory -> theory *) fun register_term_postprocessor T p = Data.map (cons (T, p)) -(* typ -> theory -> theory *) fun unregister_term_postprocessor T = Data.map (AList.delete (op =) T) -(* nut NameTable.table -> KK.raw_bound list -> nut -> int list list *) fun tuple_list_for_name rel_table bounds name = the (AList.lookup (op =) bounds (the_rel rel_table name)) handle NUT _ => [[]] -(* term -> term *) fun unarize_unbox_etc_term (Const (@{const_name FinFun}, _) $ t1) = unarize_unbox_etc_term t1 | unarize_unbox_etc_term (Const (@{const_name FunBox}, _) $ t1) = @@ -184,7 +172,6 @@ | unarize_unbox_etc_term (Abs (s, T, t')) = Abs (s, uniterize_unarize_unbox_etc_type T, unarize_unbox_etc_term t') -(* typ -> typ -> (typ * typ) * (typ * typ) *) fun factor_out_types (T1 as Type (@{type_name "*"}, [T11, T12])) (T2 as Type (@{type_name "*"}, [T21, T22])) = let val (n1, n2) = pairself num_factors_in_type (T11, T21) in @@ -209,25 +196,20 @@ ((T1, NONE), (T21, SOME T22)) | factor_out_types T1 T2 = ((T1, NONE), (T2, NONE)) -(* bool -> typ -> typ -> (term * term) list -> term *) fun make_plain_fun maybe_opt T1 T2 = let - (* typ -> typ -> (term * term) list -> term *) fun aux T1 T2 [] = Const (if maybe_opt then opt_flag else non_opt_flag, T1 --> T2) | aux T1 T2 ((t1, t2) :: tps) = Const (@{const_name fun_upd}, (T1 --> T2) --> T1 --> T2 --> T1 --> T2) $ aux T1 T2 tps $ t1 $ t2 in aux T1 T2 o rev end -(* term -> bool *) fun is_plain_fun (Const (s, _)) = (s = opt_flag orelse s = non_opt_flag) | is_plain_fun (Const (@{const_name fun_upd}, _) $ t0 $ _ $ _) = is_plain_fun t0 | is_plain_fun _ = false -(* term -> bool * (term list * term list) *) val dest_plain_fun = let - (* term -> bool * (term list * term list) *) fun aux (Abs (_, _, Const (s, _))) = (s <> irrelevant, ([], [])) | aux (Const (s, _)) = (s <> non_opt_flag, ([], [])) | aux (Const (@{const_name fun_upd}, _) $ t0 $ t1 $ t2) = @@ -237,7 +219,6 @@ | aux t = raise TERM ("Nitpick_Model.dest_plain_fun", [t]) in apsnd (pairself rev) o aux end -(* typ -> typ -> typ -> term -> term * term *) fun break_in_two T T1 T2 t = let val ps = HOLogic.flat_tupleT_paths T @@ -245,7 +226,6 @@ val (ps1, ps2) = pairself HOLogic.flat_tupleT_paths (T1, T2) val (ts1, ts2) = t |> HOLogic.strip_ptuple ps |> chop cut in (HOLogic.mk_ptuple ps1 T1 ts1, HOLogic.mk_ptuple ps2 T2 ts2) end -(* typ -> term -> term -> term *) fun pair_up (Type (@{type_name "*"}, [T1', T2'])) (t1 as Const (@{const_name Pair}, Type (@{type_name fun}, @@ -254,13 +234,10 @@ if T1 = T1' then HOLogic.mk_prod (t1, t2) else HOLogic.mk_prod (t11, pair_up T2' t12 t2) | pair_up _ t1 t2 = HOLogic.mk_prod (t1, t2) -(* typ -> term -> term list * term list -> (term * term) list*) fun multi_pair_up T1 t1 (ts2, ts3) = map2 (pair o pair_up T1 t1) ts2 ts3 -(* typ -> typ -> typ -> term -> term *) fun typecast_fun (Type (@{type_name fun}, [T1', T2'])) T1 T2 t = let - (* typ -> typ -> typ -> typ -> term -> term *) fun do_curry T1 T1a T1b T2 t = let val (maybe_opt, tsp) = dest_plain_fun t @@ -270,7 +247,6 @@ |> AList.coalesce (op =) |> map (apsnd (make_plain_fun maybe_opt T1b T2)) in make_plain_fun maybe_opt T1a (T1b --> T2) tps end - (* typ -> typ -> term -> term *) and do_uncurry T1 T2 t = let val (maybe_opt, tsp) = dest_plain_fun t @@ -279,7 +255,6 @@ |> maps (fn (t1, t2) => multi_pair_up T1 t1 (snd (dest_plain_fun t2))) in make_plain_fun maybe_opt T1 T2 tps end - (* typ -> typ -> typ -> typ -> term -> term *) and do_arrow T1' T2' _ _ (Const (s, _)) = Const (s, T1' --> T2') | do_arrow T1' T2' T1 T2 (Const (@{const_name fun_upd}, _) $ t0 $ t1 $ t2) = @@ -296,7 +271,6 @@ | ((T1a', SOME T1b'), (_, NONE)) => t |> do_arrow T1a' (T1b' --> T2') T1 T2 |> do_uncurry T1' T2' | _ => raise TYPE ("Nitpick_Model.typecast_fun.do_fun", [T1, T1'], []) - (* typ -> typ -> term -> term *) and do_term (Type (@{type_name fun}, [T1', T2'])) (Type (@{type_name fun}, [T1, T2])) t = do_fun T1' T2' T1 T2 t @@ -312,33 +286,25 @@ | typecast_fun T' _ _ _ = raise TYPE ("Nitpick_Model.typecast_fun", [T'], []) -(* term -> string *) fun truth_const_sort_key @{const True} = "0" | truth_const_sort_key @{const False} = "2" | truth_const_sort_key _ = "1" -(* typ -> term list -> term *) fun mk_tuple (Type (@{type_name "*"}, [T1, T2])) ts = HOLogic.mk_prod (mk_tuple T1 ts, mk_tuple T2 (List.drop (ts, length (HOLogic.flatten_tupleT T1)))) | mk_tuple _ (t :: _) = t | mk_tuple T [] = raise TYPE ("Nitpick_Model.mk_tuple", [T], []) -(* theory -> typ * typ -> bool *) fun varified_type_match thy (candid_T, pat_T) = strict_type_match thy (candid_T, Logic.varifyT_global pat_T) -(* atom_pool -> (string * string) * (string * string) -> scope -> nut list - -> nut list -> nut list -> nut NameTable.table -> KK.raw_bound list -> typ - -> term list *) fun all_values_of_type pool wacky_names (scope as {card_assigns, ...} : scope) sel_names rel_table bounds card T = let val card = if card = 0 then card_of_type card_assigns T else card - (* nat -> term *) fun nth_value_of_type n = let - (* bool -> term *) fun term unfold = reconstruct_term unfold pool wacky_names scope sel_names rel_table bounds T T (Atom (card, 0)) [[n]] @@ -352,15 +318,11 @@ | t => t end in index_seq 0 card |> map nth_value_of_type |> sort nice_term_ord end -(* bool -> atom_pool -> (string * string) * (string * string) -> scope - -> nut list -> nut list -> nut list -> nut NameTable.table - -> KK.raw_bound list -> typ -> typ -> rep -> int list list -> term *) and reconstruct_term unfold pool (wacky_names as ((maybe_name, abs_name), _)) (scope as {hol_ctxt as {ctxt, thy, stds, ...}, binarize, card_assigns, bits, datatypes, ofs, ...}) sel_names rel_table bounds = let val for_auto = (maybe_name = "") - (* int list list -> int *) fun value_of_bits jss = let val j0 = offset_of_type ofs @{typ unsigned_bit} @@ -369,10 +331,8 @@ fold (fn j => Integer.add (reasonable_power 2 j |> j = bits ? op ~)) js 0 end - (* typ -> term list *) val all_values = all_values_of_type pool wacky_names scope sel_names rel_table bounds 0 - (* typ -> term -> term *) fun postprocess_term (Type (@{type_name fun}, _)) = I | postprocess_term T = if null (Data.get thy) then @@ -380,7 +340,6 @@ else case AList.lookup (varified_type_match thy) (Data.get thy) T of SOME postproc => postproc ctxt maybe_name all_values T | NONE => I - (* typ list -> term -> term *) fun postprocess_subterms Ts (t1 $ t2) = let val t = postprocess_subterms Ts t1 $ postprocess_subterms Ts t2 in postprocess_term (fastype_of1 (Ts, t)) t @@ -388,13 +347,11 @@ | postprocess_subterms Ts (Abs (s, T, t')) = Abs (s, T, postprocess_subterms (T :: Ts) t') | postprocess_subterms Ts t = postprocess_term (fastype_of1 (Ts, t)) t - (* bool -> typ -> typ -> (term * term) list -> term *) fun make_set maybe_opt T1 T2 tps = let val empty_const = Const (@{const_abbrev Set.empty}, T1 --> T2) val insert_const = Const (@{const_name insert}, T1 --> (T1 --> T2) --> T1 --> T2) - (* (term * term) list -> term *) fun aux [] = if maybe_opt andalso not (is_complete_type datatypes false T1) then insert_const $ Const (unrep, T1) $ empty_const @@ -415,12 +372,10 @@ else aux tps end - (* bool -> typ -> typ -> typ -> (term * term) list -> term *) fun make_map maybe_opt T1 T2 T2' = let val update_const = Const (@{const_name fun_upd}, (T1 --> T2) --> T1 --> T2 --> T1 --> T2) - (* (term * term) list -> term *) fun aux' [] = Const (@{const_abbrev Map.empty}, T1 --> T2) | aux' ((t1, t2) :: tps) = (case t2 of @@ -433,7 +388,6 @@ else aux' tps in aux end - (* typ list -> term -> term *) fun polish_funs Ts t = (case fastype_of1 (Ts, t) of Type (@{type_name fun}, [T1, T2]) => @@ -474,7 +428,6 @@ else t | t => t - (* bool -> typ -> typ -> typ -> term list -> term list -> term *) fun make_fun maybe_opt T1 T2 T' ts1 ts2 = ts1 ~~ ts2 |> sort (nice_term_ord o pairself fst) |> make_plain_fun maybe_opt T1 T2 @@ -482,7 +435,6 @@ |> typecast_fun (uniterize_unarize_unbox_etc_type T') (uniterize_unarize_unbox_etc_type T1) (uniterize_unarize_unbox_etc_type T2) - (* (typ * int) list -> typ -> typ -> int -> term *) fun term_for_atom seen (T as Type (@{type_name fun}, [T1, T2])) T' j _ = let val k1 = card_of_type card_assigns T1 @@ -523,10 +475,8 @@ | SOME {deep = false, ...} => nth_atom pool for_auto T j k | SOME {co, standard, constrs, ...} => let - (* styp -> int list *) fun tuples_for_const (s, T) = tuple_list_for_name rel_table bounds (ConstName (s, T, Any)) - (* unit -> term *) fun cyclic_atom () = nth_atom pool for_auto (Type (cyclic_type_name, [])) j k fun cyclic_var () = Var ((nth_atom_name pool "" T j k, 0), T) @@ -615,14 +565,11 @@ t end end - (* (typ * int) list -> int -> rep -> typ -> typ -> typ -> int list - -> term *) and term_for_vect seen k R T1 T2 T' js = make_fun true T1 T2 T' (map (fn j => term_for_atom seen T1 T1 j k) (index_seq 0 k)) (map (term_for_rep true seen T2 T2 R o single) (batch_list (arity_of_rep R) js)) - (* bool -> (typ * int) list -> typ -> typ -> rep -> int list list -> term *) and term_for_rep _ seen T T' Unit [[]] = term_for_atom seen T T' 0 1 | term_for_rep _ seen T T' (R as Atom (k, j0)) [[j]] = if j >= j0 andalso j < j0 + k then term_for_atom seen T T' (j - j0) k @@ -674,14 +621,12 @@ (** Constant postprocessing **) -(* int -> typ -> typ list *) fun dest_n_tuple_type 1 T = [T] | dest_n_tuple_type n (Type (_, [T1, T2])) = T1 :: dest_n_tuple_type (n - 1) T2 | dest_n_tuple_type _ T = raise TYPE ("Nitpick_Model.dest_n_tuple_type", [T], []) -(* theory -> const_table -> styp -> int list *) fun const_format thy def_table (x as (s, T)) = if String.isPrefix unrolled_prefix s then const_format thy def_table (original_name s, range_type T) @@ -701,7 +646,6 @@ else [num_binder_types T] | NONE => [num_binder_types T] -(* int list -> int list -> int list *) fun intersect_formats _ [] = [] | intersect_formats [] _ = [] | intersect_formats ks1 ks2 = @@ -711,7 +655,6 @@ [Int.min (k1, k2)] end -(* theory -> const_table -> (term option * int list) list -> term -> int list *) fun lookup_format thy def_table formats t = case AList.lookup (fn (SOME x, SOME y) => (term_match thy) (x, y) | _ => false) @@ -724,7 +667,6 @@ | _ => format end -(* int list -> int list -> typ -> typ *) fun format_type default_format format T = let val T = uniterize_unarize_unbox_etc_type T @@ -742,28 +684,22 @@ |> map (HOLogic.mk_tupleT o rev) in List.foldl (op -->) body_T batched end end -(* theory -> const_table -> (term option * int list) list -> term -> typ *) fun format_term_type thy def_table formats t = format_type (the (AList.lookup (op =) formats NONE)) (lookup_format thy def_table formats t) (fastype_of t) -(* int list -> int -> int list -> int list *) fun repair_special_format js m format = m - 1 downto 0 |> chunk_list_unevenly (rev format) |> map (rev o filter_out (member (op =) js)) |> filter_out null |> map length |> rev -(* hol_context -> string * string -> (term option * int list) list - -> styp -> term * typ *) fun user_friendly_const ({thy, evals, def_table, skolems, special_funs, ...} : hol_context) (base_name, step_name) formats = let val default_format = the (AList.lookup (op =) formats NONE) - (* styp -> term * typ *) fun do_const (x as (s, T)) = (if String.isPrefix special_prefix s then let - (* term -> term *) val do_term = map_aterms (fn Const x => fst (do_const x) | t' => t') val (x' as (_, T'), js, ts) = AList.find (op =) (!special_funs) (s, unarize_unbox_etc_type T) @@ -772,7 +708,6 @@ val Ts = List.take (binder_types T', max_j + 1) val missing_js = filter_out (member (op =) js) (0 upto max_j) val missing_Ts = filter_indices missing_js Ts - (* int -> indexname *) fun nth_missing_var n = ((arg_var_prefix ^ nat_subscript (n + 1), 0), nth missing_Ts n) val missing_vars = map nth_missing_var (0 upto length missing_js - 1) @@ -864,7 +799,6 @@ |>> shorten_names_in_term |>> Term.map_abs_vars shortest_name in do_const end -(* styp -> string *) fun assign_operator_for_const (s, T) = if String.isPrefix ubfp_prefix s then if is_fun_type T then "\" else "\" @@ -877,8 +811,6 @@ (** Model reconstruction **) -(* atom_pool -> scope -> nut list -> nut NameTable.table -> KK.raw_bound list - -> nut -> term *) fun term_for_name pool scope sel_names rel_table bounds name = let val T = type_of name in tuple_list_for_name rel_table bounds name @@ -886,13 +818,11 @@ rel_table bounds T T (rep_of name) end -(* term -> term *) fun unfold_outer_the_binders (t as Const (@{const_name The}, _) $ Abs (s, T, Const (@{const_name "op ="}, _) $ Bound 0 $ t')) = betapply (Abs (s, T, t'), t) |> unfold_outer_the_binders | unfold_outer_the_binders t = t -(* typ list -> int -> term * term -> bool *) fun bisimilar_values _ 0 _ = true | bisimilar_values coTs max_depth (t1, t2) = let val T = fastype_of t1 in @@ -909,17 +839,14 @@ t1 = t2 end -(* params -> scope -> (term option * int list) list -> styp list -> nut list - -> nut list -> nut list -> nut NameTable.table -> KK.raw_bound list - -> Pretty.T * bool *) -fun reconstruct_hol_model {show_skolems, show_datatypes, show_consts} +fun reconstruct_hol_model {show_datatypes, show_consts} ({hol_ctxt = {thy, ctxt, max_bisim_depth, boxes, stds, wfs, user_axioms, debug, binary_ints, destroy_constrs, specialize, - skolemize, star_linear_preds, uncurry, fast_descrs, - tac_timeout, evals, case_names, def_table, nondef_table, - user_nondefs, simp_table, psimp_table, choice_spec_table, - intro_table, ground_thm_table, ersatz_table, skolems, - special_funs, unrolled_preds, wf_cache, constr_cache}, + star_linear_preds, fast_descrs, tac_timeout, evals, + case_names, def_table, nondef_table, user_nondefs, + simp_table, psimp_table, choice_spec_table, intro_table, + ground_thm_table, ersatz_table, skolems, special_funs, + unrolled_preds, wf_cache, constr_cache}, binarize, card_assigns, bits, bisim_depth, datatypes, ofs} : scope) formats all_frees free_names sel_names nonsel_names rel_table bounds = let @@ -930,8 +857,7 @@ {thy = thy, ctxt = ctxt, max_bisim_depth = max_bisim_depth, boxes = boxes, stds = stds, wfs = wfs, user_axioms = user_axioms, debug = debug, binary_ints = binary_ints, destroy_constrs = destroy_constrs, - specialize = specialize, skolemize = skolemize, - star_linear_preds = star_linear_preds, uncurry = uncurry, + specialize = specialize, star_linear_preds = star_linear_preds, fast_descrs = fast_descrs, tac_timeout = tac_timeout, evals = evals, case_names = case_names, def_table = def_table, nondef_table = nondef_table, user_nondefs = user_nondefs, @@ -941,16 +867,13 @@ skolems = skolems, special_funs = special_funs, unrolled_preds = unrolled_preds, wf_cache = wf_cache, constr_cache = constr_cache} - val scope = {hol_ctxt = hol_ctxt, binarize = binarize, - card_assigns = card_assigns, bits = bits, - bisim_depth = bisim_depth, datatypes = datatypes, ofs = ofs} - (* bool -> typ -> typ -> rep -> int list list -> term *) + val scope = + {hol_ctxt = hol_ctxt, binarize = binarize, card_assigns = card_assigns, + bits = bits, bisim_depth = bisim_depth, datatypes = datatypes, ofs = ofs} fun term_for_rep unfold = reconstruct_term unfold pool wacky_names scope sel_names rel_table bounds - (* nat -> typ -> nat -> term *) fun nth_value_of_type card T n = let - (* bool -> term *) fun aux unfold = term_for_rep unfold T T (Atom (card, 0)) [[n]] in case aux false of @@ -961,10 +884,8 @@ t | t => t end - (* nat -> typ -> term list *) val all_values = all_values_of_type pool wacky_names scope sel_names rel_table bounds - (* dtype_spec list -> dtype_spec -> bool *) fun is_codatatype_wellformed (cos : dtype_spec list) ({typ, card, ...} : dtype_spec) = let @@ -974,7 +895,6 @@ forall (not o bisimilar_values (map #typ cos) max_depth) (all_distinct_unordered_pairs_of ts) end - (* string -> Pretty.T *) fun pretty_for_assign name = let val (oper, (t1, T'), T) = @@ -998,7 +918,6 @@ [setmp_show_all_types (Syntax.pretty_term ctxt) t1, Pretty.str oper, Syntax.pretty_term ctxt t2]) end - (* dtype_spec -> Pretty.T *) fun pretty_for_datatype ({typ, card, complete, ...} : dtype_spec) = Pretty.block (Pretty.breaks (Syntax.pretty_typ ctxt (uniterize_unarize_unbox_etc_type typ) :: @@ -1012,7 +931,6 @@ (map (Syntax.pretty_term ctxt) (all_values card typ) @ (if fun_from_pair complete false then [] else [Pretty.str unrep]))])) - (* typ -> dtype_spec list *) fun integer_datatype T = [{typ = T, card = card_of_type card_assigns T, co = false, standard = true, complete = (false, false), concrete = (true, true), @@ -1035,7 +953,6 @@ (map pretty_for_datatype codatatypes)] else [] - (* bool -> string -> nut list -> Pretty.T list *) fun block_of_names show title names = if show andalso not (null names) then Pretty.str (title ^ plural_s_for_list names ^ ":") @@ -1062,7 +979,7 @@ | _ => raise TERM ("Nitpick_Model.reconstruct_hol_model", [Const x])) all_frees val chunks = block_of_names true "Free variable" free_names @ - block_of_names show_skolems "Skolem constant" skolem_names @ + block_of_names true "Skolem constant" skolem_names @ block_of_names true "Evaluated term" eval_names @ block_of_datatypes @ block_of_codatatypes @ block_of_names show_consts "Constant" @@ -1074,17 +991,13 @@ forall (is_codatatype_wellformed codatatypes) codatatypes) end -(* scope -> Time.time option -> nut list -> nut list -> nut NameTable.table - -> KK.raw_bound list -> term -> bool option *) fun prove_hol_model (scope as {hol_ctxt = {thy, ctxt, debug, ...}, card_assigns, ...}) auto_timeout free_names sel_names rel_table bounds prop = let val pool = Unsynchronized.ref [] - (* typ * int -> term *) fun free_type_assm (T, k) = let - (* int -> term *) fun atom j = nth_atom pool true T j k fun equation_for_atom j = HOLogic.eq_const T $ Bound 0 $ atom j val eqs = map equation_for_atom (index_seq 0 k) @@ -1093,14 +1006,12 @@ $ Abs ("x", T, foldl1 HOLogic.mk_disj eqs) val distinct_assm = distinctness_formula T (map atom (index_seq 0 k)) in s_conj (compreh_assm, distinct_assm) end - (* nut -> term *) fun free_name_assm name = HOLogic.mk_eq (Free (nickname_of name, type_of name), term_for_name pool scope sel_names rel_table bounds name) val freeT_assms = map free_type_assm (filter (is_TFree o fst) card_assigns) val model_assms = map free_name_assm free_names val assm = foldr1 s_conj (freeT_assms @ model_assms) - (* bool -> bool *) fun try_out negate = let val concl = (negate ? curry (op $) @{const Not}) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_mono.ML --- a/src/HOL/Tools/Nitpick/nitpick_mono.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_mono.ML Tue May 04 20:30:22 2010 +0200 @@ -54,55 +54,42 @@ exception MTYPE of string * mtyp list * typ list exception MTERM of string * mterm list -(* string -> unit *) fun print_g (_ : string) = () (* val print_g = tracing *) -(* var -> string *) val string_for_var = signed_string_of_int -(* string -> var list -> string *) fun string_for_vars sep [] = "0\<^bsub>" ^ sep ^ "\<^esub>" | string_for_vars sep xs = space_implode sep (map string_for_var xs) fun subscript_string_for_vars sep xs = if null xs then "" else "\<^bsub>" ^ string_for_vars sep xs ^ "\<^esub>" -(* sign -> string *) fun string_for_sign Plus = "+" | string_for_sign Minus = "-" -(* sign -> sign -> sign *) fun xor sn1 sn2 = if sn1 = sn2 then Plus else Minus -(* sign -> sign *) val negate = xor Minus -(* sign_atom -> string *) fun string_for_sign_atom (S sn) = string_for_sign sn | string_for_sign_atom (V x) = string_for_var x -(* literal -> string *) fun string_for_literal (x, sn) = string_for_var x ^ " = " ^ string_for_sign sn val bool_M = MType (@{type_name bool}, []) val dummy_M = MType (nitpick_prefix ^ "dummy", []) -(* mtyp -> bool *) fun is_MRec (MRec _) = true | is_MRec _ = false -(* mtyp -> mtyp * sign_atom * mtyp *) fun dest_MFun (MFun z) = z | dest_MFun M = raise MTYPE ("Nitpick_Mono.dest_MFun", [M], []) val no_prec = 100 -(* mtyp -> int *) fun precedence_of_mtype (MFun _) = 1 | precedence_of_mtype (MPair _) = 2 | precedence_of_mtype _ = no_prec -(* mtyp -> string *) val string_for_mtype = let - (* int -> mtyp -> string *) fun aux outer_prec M = let val prec = precedence_of_mtype M @@ -126,22 +113,17 @@ end in aux 0 end -(* mtyp -> mtyp list *) fun flatten_mtype (MPair (M1, M2)) = maps flatten_mtype [M1, M2] | flatten_mtype (MType (_, Ms)) = maps flatten_mtype Ms | flatten_mtype M = [M] -(* mterm -> bool *) fun precedence_of_mterm (MRaw _) = no_prec | precedence_of_mterm (MAbs _) = 1 | precedence_of_mterm (MApp _) = 2 -(* Proof.context -> mterm -> string *) fun string_for_mterm ctxt = let - (* mtype -> string *) fun mtype_annotation M = "\<^bsup>" ^ string_for_mtype M ^ "\<^esup>" - (* int -> mterm -> string *) fun aux outer_prec m = let val prec = precedence_of_mterm m @@ -158,7 +140,6 @@ end in aux 0 end -(* mterm -> mtyp *) fun mtype_of_mterm (MRaw (_, M)) = M | mtype_of_mterm (MAbs (_, _, M, a, m)) = MFun (M, a, mtype_of_mterm m) | mtype_of_mterm (MApp (m1, _)) = @@ -166,29 +147,24 @@ MFun (_, _, M12) => M12 | M1 => raise MTYPE ("Nitpick_Mono.mtype_of_mterm", [M1], []) -(* mterm -> mterm * mterm list *) fun strip_mcomb (MApp (m1, m2)) = strip_mcomb m1 ||> (fn ms => append ms [m2]) | strip_mcomb m = (m, []) -(* hol_context -> bool -> bool -> typ -> mdata *) fun initial_mdata hol_ctxt binarize no_harmless alpha_T = ({hol_ctxt = hol_ctxt, binarize = binarize, alpha_T = alpha_T, no_harmless = no_harmless, max_fresh = Unsynchronized.ref 0, datatype_mcache = Unsynchronized.ref [], constr_mcache = Unsynchronized.ref []} : mdata) -(* typ -> typ -> bool *) fun could_exist_alpha_subtype alpha_T (T as Type (_, Ts)) = T = alpha_T orelse (not (is_fp_iterator_type T) andalso exists (could_exist_alpha_subtype alpha_T) Ts) | could_exist_alpha_subtype alpha_T T = (T = alpha_T) -(* theory -> typ -> typ -> bool *) fun could_exist_alpha_sub_mtype _ (alpha_T as TFree _) T = could_exist_alpha_subtype alpha_T T | could_exist_alpha_sub_mtype thy alpha_T T = (T = alpha_T orelse is_datatype thy [(NONE, true)] T) -(* mtyp -> bool *) fun exists_alpha_sub_mtype MAlpha = true | exists_alpha_sub_mtype (MFun (M1, _, M2)) = exists exists_alpha_sub_mtype [M1, M2] @@ -197,7 +173,6 @@ | exists_alpha_sub_mtype (MType (_, Ms)) = exists exists_alpha_sub_mtype Ms | exists_alpha_sub_mtype (MRec _) = true -(* mtyp -> bool *) fun exists_alpha_sub_mtype_fresh MAlpha = true | exists_alpha_sub_mtype_fresh (MFun (_, V _, _)) = true | exists_alpha_sub_mtype_fresh (MFun (_, _, M2)) = @@ -208,11 +183,9 @@ exists exists_alpha_sub_mtype_fresh Ms | exists_alpha_sub_mtype_fresh (MRec _) = true -(* string * typ list -> mtyp list -> mtyp *) fun constr_mtype_for_binders z Ms = fold_rev (fn M => curry3 MFun M (S Minus)) Ms (MRec z) -(* ((string * typ list) * mtyp) list -> mtyp list -> mtyp -> mtyp *) fun repair_mtype _ _ MAlpha = MAlpha | repair_mtype cache seen (MFun (M1, a, M2)) = MFun (repair_mtype cache seen M1, a, repair_mtype cache seen M2) @@ -226,30 +199,24 @@ | M => if member (op =) seen M then MType (s, []) else repair_mtype cache (M :: seen) M -(* ((string * typ list) * mtyp) list Unsynchronized.ref -> unit *) fun repair_datatype_mcache cache = let - (* (string * typ list) * mtyp -> unit *) fun repair_one (z, M) = Unsynchronized.change cache (AList.update (op =) (z, repair_mtype (!cache) [] M)) in List.app repair_one (rev (!cache)) end -(* (typ * mtyp) list -> (styp * mtyp) list Unsynchronized.ref -> unit *) fun repair_constr_mcache dtype_cache constr_mcache = let - (* styp * mtyp -> unit *) fun repair_one (x, M) = Unsynchronized.change constr_mcache (AList.update (op =) (x, repair_mtype dtype_cache [] M)) in List.app repair_one (!constr_mcache) end -(* typ -> bool *) fun is_fin_fun_supported_type @{typ prop} = true | is_fin_fun_supported_type @{typ bool} = true | is_fin_fun_supported_type (Type (@{type_name option}, _)) = true | is_fin_fun_supported_type _ = false -(* typ -> typ -> term -> term option *) fun fin_fun_body _ _ (t as @{term False}) = SOME t | fin_fun_body _ _ (t as Const (@{const_name None}, _)) = SOME t | fin_fun_body dom_T ran_T @@ -265,7 +232,6 @@ $ (Const (@{const_name unknown}, ran_T)) $ (t0 $ t1 $ t2 $ t3))) | fin_fun_body _ _ _ = NONE -(* mdata -> bool -> typ -> typ -> mtyp * sign_atom * mtyp *) fun fresh_mfun_for_fun_type (mdata as {max_fresh, ...} : mdata) all_minus T1 T2 = let @@ -277,12 +243,10 @@ else S Minus in (M1, a, M2) end -(* mdata -> bool -> typ -> mtyp *) and fresh_mtype_for_type (mdata as {hol_ctxt as {thy, ...}, binarize, alpha_T, datatype_mcache, constr_mcache, ...}) all_minus = let - (* typ -> mtyp *) fun do_type T = if T = alpha_T then MAlpha @@ -329,21 +293,17 @@ | _ => MType (Refute.string_of_typ T, []) in do_type end -(* mtyp -> mtyp list *) fun prodM_factors (MPair (M1, M2)) = maps prodM_factors [M1, M2] | prodM_factors M = [M] -(* mtyp -> mtyp list * mtyp *) fun curried_strip_mtype (MFun (M1, _, M2)) = curried_strip_mtype M2 |>> append (prodM_factors M1) | curried_strip_mtype M = ([], M) -(* string -> mtyp -> mtyp *) fun sel_mtype_from_constr_mtype s M = let val (arg_Ms, dataM) = curried_strip_mtype M in MFun (dataM, S Minus, case sel_no_from_name s of ~1 => bool_M | n => nth arg_Ms n) end -(* mdata -> styp -> mtyp *) fun mtype_for_constr (mdata as {hol_ctxt = {thy, ...}, alpha_T, constr_mcache, ...}) (x as (_, T)) = if could_exist_alpha_sub_mtype thy alpha_T T then @@ -362,14 +322,11 @@ x |> binarized_and_boxed_constr_for_sel hol_ctxt binarize |> mtype_for_constr mdata |> sel_mtype_from_constr_mtype s -(* literal list -> sign_atom -> sign_atom *) fun resolve_sign_atom lits (V x) = x |> AList.lookup (op =) lits |> Option.map S |> the_default (V x) | resolve_sign_atom _ a = a -(* literal list -> mtyp -> mtyp *) fun resolve_mtype lits = let - (* mtyp -> mtyp *) fun aux MAlpha = MAlpha | aux (MFun (M1, a, M2)) = MFun (aux M1, resolve_sign_atom lits a, aux M2) | aux (MPair Mp) = MPair (pairself aux Mp) @@ -384,24 +341,19 @@ type constraint_set = literal list * comp list * sign_expr list -(* comp_op -> string *) fun string_for_comp_op Eq = "=" | string_for_comp_op Leq = "\" -(* sign_expr -> string *) fun string_for_sign_expr [] = "\" | string_for_sign_expr lits = space_implode " \ " (map string_for_literal lits) -(* literal -> literal list option -> literal list option *) fun do_literal _ NONE = NONE | do_literal (x, sn) (SOME lits) = case AList.lookup (op =) lits x of SOME sn' => if sn = sn' then SOME lits else NONE | NONE => SOME ((x, sn) :: lits) -(* comp_op -> var list -> sign_atom -> sign_atom -> literal list * comp list - -> (literal list * comp list) option *) fun do_sign_atom_comp Eq [] a1 a2 (accum as (lits, comps)) = (case (a1, a2) of (S sn1, S sn2) => if sn1 = sn2 then SOME accum else NONE @@ -419,8 +371,6 @@ | do_sign_atom_comp cmp xs a1 a2 (lits, comps) = SOME (lits, insert (op =) (a1, a2, cmp, xs) comps) -(* comp -> var list -> mtyp -> mtyp -> (literal list * comp list) option - -> (literal list * comp list) option *) fun do_mtype_comp _ _ _ _ NONE = NONE | do_mtype_comp _ _ MAlpha MAlpha accum = accum | do_mtype_comp Eq xs (MFun (M11, a1, M12)) (MFun (M21, a2, M22)) @@ -450,7 +400,6 @@ raise MTYPE ("Nitpick_Mono.do_mtype_comp (" ^ string_for_comp_op cmp ^ ")", [M1, M2], []) -(* comp_op -> mtyp -> mtyp -> constraint_set -> constraint_set *) fun add_mtype_comp cmp M1 M2 ((lits, comps, sexps) : constraint_set) = (print_g ("*** Add " ^ string_for_mtype M1 ^ " " ^ string_for_comp_op cmp ^ " " ^ string_for_mtype M2); @@ -458,12 +407,9 @@ NONE => (print_g "**** Unsolvable"; raise UNSOLVABLE ()) | SOME (lits, comps) => (lits, comps, sexps)) -(* mtyp -> mtyp -> constraint_set -> constraint_set *) val add_mtypes_equal = add_mtype_comp Eq val add_is_sub_mtype = add_mtype_comp Leq -(* sign -> sign_expr -> mtyp -> (literal list * sign_expr list) option - -> (literal list * sign_expr list) option *) fun do_notin_mtype_fv _ _ _ NONE = NONE | do_notin_mtype_fv Minus _ MAlpha accum = accum | do_notin_mtype_fv Plus [] MAlpha _ = NONE @@ -499,7 +445,6 @@ | do_notin_mtype_fv _ _ M _ = raise MTYPE ("Nitpick_Mono.do_notin_mtype_fv", [M], []) -(* sign -> mtyp -> constraint_set -> constraint_set *) fun add_notin_mtype_fv sn M ((lits, comps, sexps) : constraint_set) = (print_g ("*** Add " ^ string_for_mtype M ^ " is " ^ (case sn of Minus => "concrete" | Plus => "complete") ^ "."); @@ -507,31 +452,23 @@ NONE => (print_g "**** Unsolvable"; raise UNSOLVABLE ()) | SOME (lits, sexps) => (lits, comps, sexps)) -(* mtyp -> constraint_set -> constraint_set *) val add_mtype_is_concrete = add_notin_mtype_fv Minus val add_mtype_is_complete = add_notin_mtype_fv Plus val bool_from_minus = true -(* sign -> bool *) fun bool_from_sign Plus = not bool_from_minus | bool_from_sign Minus = bool_from_minus -(* bool -> sign *) fun sign_from_bool b = if b = bool_from_minus then Minus else Plus -(* literal -> PropLogic.prop_formula *) fun prop_for_literal (x, sn) = (not (bool_from_sign sn) ? PropLogic.Not) (PropLogic.BoolVar x) -(* sign_atom -> PropLogic.prop_formula *) fun prop_for_sign_atom_eq (S sn', sn) = if sn = sn' then PropLogic.True else PropLogic.False | prop_for_sign_atom_eq (V x, sn) = prop_for_literal (x, sn) -(* sign_expr -> PropLogic.prop_formula *) fun prop_for_sign_expr xs = PropLogic.exists (map prop_for_literal xs) -(* var list -> sign -> PropLogic.prop_formula *) fun prop_for_exists_eq xs sn = PropLogic.exists (map (fn x => prop_for_literal (x, sn)) xs) -(* comp -> PropLogic.prop_formula *) fun prop_for_comp (a1, a2, Eq, []) = PropLogic.SAnd (prop_for_comp (a1, a2, Leq, []), prop_for_comp (a2, a1, Leq, [])) @@ -541,7 +478,6 @@ | prop_for_comp (a1, a2, cmp, xs) = PropLogic.SOr (prop_for_exists_eq xs Minus, prop_for_comp (a1, a2, cmp, [])) -(* var -> (int -> bool option) -> literal list -> literal list *) fun literals_from_assignments max_var assigns lits = fold (fn x => fn accum => if AList.defined (op =) lits x then @@ -550,18 +486,15 @@ SOME b => (x, sign_from_bool b) :: accum | NONE => accum) (max_var downto 1) lits -(* comp -> string *) fun string_for_comp (a1, a2, cmp, xs) = string_for_sign_atom a1 ^ " " ^ string_for_comp_op cmp ^ subscript_string_for_vars " \ " xs ^ " " ^ string_for_sign_atom a2 -(* literal list -> comp list -> sign_expr list -> unit *) fun print_problem lits comps sexps = print_g ("*** Problem:\n" ^ cat_lines (map string_for_literal lits @ map string_for_comp comps @ map string_for_sign_expr sexps)) -(* literal list -> unit *) fun print_solution lits = let val (pos, neg) = List.partition (curry (op =) Plus o snd) lits in print_g ("*** Solution:\n" ^ @@ -569,10 +502,8 @@ "-: " ^ commas (map (string_for_var o fst) neg)) end -(* var -> constraint_set -> literal list option *) fun solve max_var (lits, comps, sexps) = let - (* (int -> bool option) -> literal list option *) fun do_assigns assigns = SOME (literals_from_assignments max_var assigns lits |> tap print_solution) @@ -607,27 +538,21 @@ val initial_gamma = {bound_Ts = [], bound_Ms = [], frees = [], consts = []} -(* typ -> mtyp -> mtype_context -> mtype_context *) fun push_bound T M {bound_Ts, bound_Ms, frees, consts} = {bound_Ts = T :: bound_Ts, bound_Ms = M :: bound_Ms, frees = frees, consts = consts} -(* mtype_context -> mtype_context *) fun pop_bound {bound_Ts, bound_Ms, frees, consts} = {bound_Ts = tl bound_Ts, bound_Ms = tl bound_Ms, frees = frees, consts = consts} handle List.Empty => initial_gamma (* FIXME: needed? *) -(* mdata -> term -> accumulator -> mterm * accumulator *) fun consider_term (mdata as {hol_ctxt as {thy, ctxt, stds, fast_descrs, def_table, ...}, alpha_T, max_fresh, ...}) = let - (* typ -> mtyp *) val mtype_for = fresh_mtype_for_type mdata false - (* mtyp -> mtyp *) fun plus_set_mtype_for_dom M = MFun (M, S (if exists_alpha_sub_mtype M then Plus else Minus), bool_M) - (* typ -> accumulator -> mterm * accumulator *) fun do_all T (gamma, cset) = let val abs_M = mtype_for (domain_type (domain_type T)) @@ -656,7 +581,6 @@ let val set_T = domain_type T val set_M = mtype_for set_T - (* typ -> mtyp *) fun custom_mtype_for (T as Type (@{type_name fun}, [T1, T2])) = if T = set_T then set_M else MFun (custom_mtype_for T1, S Minus, custom_mtype_for T2) @@ -664,20 +588,16 @@ in (custom_mtype_for T, (gamma, cset |> add_mtype_is_concrete set_M)) end - (* typ -> accumulator -> mtyp * accumulator *) fun do_pair_constr T accum = case mtype_for (nth_range_type 2 T) of M as MPair (a_M, b_M) => (MFun (a_M, S Minus, MFun (b_M, S Minus, M)), accum) | M => raise MTYPE ("Nitpick_Mono.consider_term.do_pair_constr", [M], []) - (* int -> typ -> accumulator -> mtyp * accumulator *) fun do_nth_pair_sel n T = case mtype_for (domain_type T) of M as MPair (a_M, b_M) => pair (MFun (M, S Minus, if n = 0 then a_M else b_M)) | M => raise MTYPE ("Nitpick_Mono.consider_term.do_nth_pair_sel", [M], []) - (* term -> string -> typ -> term -> term -> term -> accumulator - -> mterm * accumulator *) fun do_bounded_quantifier t0 abs_s abs_T connective_t bound_t body_t accum = let val abs_M = mtype_for abs_T @@ -697,7 +617,6 @@ MApp (bound_m, MRaw (Bound 0, M1))), body_m))), accum) end - (* term -> accumulator -> mterm * accumulator *) and do_term t (accum as (gamma as {bound_Ts, bound_Ms, frees, consts}, cset)) = (case t of @@ -747,7 +666,6 @@ | @{const_name converse} => let val x = Unsynchronized.inc max_fresh - (* typ -> mtyp *) fun mtype_for_set T = MFun (mtype_for (domain_type T), V x, bool_M) val ab_set_M = domain_type T |> mtype_for_set @@ -757,7 +675,6 @@ | @{const_name rel_comp} => let val x = Unsynchronized.inc max_fresh - (* typ -> mtyp *) fun mtype_for_set T = MFun (mtype_for (domain_type T), V x, bool_M) val bc_set_M = domain_type T |> mtype_for_set @@ -783,7 +700,6 @@ | @{const_name Sigma} => let val x = Unsynchronized.inc max_fresh - (* typ -> mtyp *) fun mtype_for_set T = MFun (mtype_for (domain_type T), V x, bool_M) val a_set_T = domain_type T @@ -891,14 +807,12 @@ string_for_mterm ctxt m)) in do_term end -(* int -> mtyp -> accumulator -> accumulator *) fun force_minus_funs 0 _ = I | force_minus_funs n (M as MFun (M1, _, M2)) = add_mtypes_equal M (MFun (M1, S Minus, M2)) #> force_minus_funs (n - 1) M2 | force_minus_funs _ M = raise MTYPE ("Nitpick_Mono.force_minus_funs", [M], []) -(* mdata -> bool -> styp -> term -> term -> mterm * accumulator *) fun consider_general_equals mdata def (x as (_, T)) t1 t2 accum = let val (m1, accum) = consider_term mdata t1 accum @@ -918,17 +832,12 @@ accum) end -(* mdata -> sign -> term -> accumulator -> mterm * accumulator *) fun consider_general_formula (mdata as {hol_ctxt = {ctxt, ...}, ...}) = let - (* typ -> mtyp *) val mtype_for = fresh_mtype_for_type mdata false - (* term -> accumulator -> mterm * accumulator *) val do_term = consider_term mdata - (* sign -> term -> accumulator -> mterm * accumulator *) fun do_formula sn t accum = let - (* styp -> string -> typ -> term -> mterm * accumulator *) fun do_quantifier (quant_x as (quant_s, _)) abs_s abs_T body_t = let val abs_M = mtype_for abs_T @@ -944,7 +853,6 @@ MAbs (abs_s, abs_T, abs_M, S Minus, body_m)), accum |>> pop_bound) end - (* styp -> term -> term -> mterm * accumulator *) fun do_equals x t1 t2 = case sn of Plus => do_term t accum @@ -1005,7 +913,6 @@ [@{const_name ord_class.less}, @{const_name ord_class.less_eq}] val bounteous_consts = [@{const_name bisim}] -(* mdata -> term -> bool *) fun is_harmless_axiom ({no_harmless = true, ...} : mdata) _ = false | is_harmless_axiom {hol_ctxt = {thy, stds, fast_descrs, ...}, ...} t = Term.add_consts t [] @@ -1013,12 +920,10 @@ |> (forall (member (op =) harmless_consts o original_name o fst) orf exists (member (op =) bounteous_consts o fst)) -(* mdata -> term -> accumulator -> mterm * accumulator *) fun consider_nondefinitional_axiom mdata t = if is_harmless_axiom mdata t then pair (MRaw (t, dummy_M)) else consider_general_formula mdata Plus t -(* mdata -> term -> accumulator -> mterm * accumulator *) fun consider_definitional_axiom (mdata as {hol_ctxt = {thy, ...}, ...}) t = if not (is_constr_pattern_formula thy t) then consider_nondefinitional_axiom mdata t @@ -1026,11 +931,8 @@ pair (MRaw (t, dummy_M)) else let - (* typ -> mtyp *) val mtype_for = fresh_mtype_for_type mdata false - (* term -> accumulator -> mterm * accumulator *) val do_term = consider_term mdata - (* term -> string -> typ -> term -> accumulator -> mterm * accumulator *) fun do_all quant_t abs_s abs_T body_t accum = let val abs_M = mtype_for abs_T @@ -1043,7 +945,6 @@ MAbs (abs_s, abs_T, abs_M, S Minus, body_m)), accum |>> pop_bound) end - (* term -> term -> term -> accumulator -> mterm * accumulator *) and do_conjunction t0 t1 t2 accum = let val (m1, accum) = do_formula t1 accum @@ -1058,7 +959,6 @@ in (MApp (MApp (MRaw (t0, mtype_for (fastype_of t0)), m1), m2), accum) end - (* term -> accumulator -> accumulator *) and do_formula t accum = case t of (t0 as Const (@{const_name all}, _)) $ Abs (s1, T1, t1) => @@ -1083,22 +983,17 @@ \do_formula", [t]) in do_formula t end -(* Proof.context -> literal list -> term -> mtyp -> string *) fun string_for_mtype_of_term ctxt lits t M = Syntax.string_of_term ctxt t ^ " : " ^ string_for_mtype (resolve_mtype lits M) -(* theory -> literal list -> mtype_context -> unit *) fun print_mtype_context ctxt lits ({frees, consts, ...} : mtype_context) = map (fn (x, M) => string_for_mtype_of_term ctxt lits (Free x) M) frees @ map (fn (x, M) => string_for_mtype_of_term ctxt lits (Const x) M) consts |> cat_lines |> print_g -(* ('a -> 'b -> 'c * 'd) -> 'a -> 'c list * 'b -> 'c list * 'd *) fun amass f t (ms, accum) = let val (m, accum) = f t accum in (m :: ms, accum) end -(* string -> bool -> hol_context -> bool -> typ -> term list * term list - -> (literal list * (mterm list * mterm list) * (styp * mtyp) list) option *) fun infer which no_harmless (hol_ctxt as {ctxt, ...}) binarize alpha_T (nondef_ts, def_ts) = let @@ -1127,15 +1022,11 @@ | MTERM (loc, ms) => raise BAD (loc, commas (map (string_for_mterm ctxt) ms)) -(* hol_context -> bool -> typ -> term list * term list -> bool *) val formulas_monotonic = is_some oooo infer "Monotonicity" false -(* typ -> typ -> styp *) fun fin_fun_constr T1 T2 = (@{const_name FinFun}, (T1 --> T2) --> Type (@{type_name fin_fun}, [T1, T2])) -(* hol_context -> bool -> (typ option * bool option) list -> typ - -> term list * term list -> term list * term list *) fun finitize_funs (hol_ctxt as {thy, stds, fast_descrs, constr_cache, ...}) binarize finitizes alpha_T tsp = case infer "Finiteness" true hol_ctxt binarize alpha_T tsp of @@ -1144,12 +1035,10 @@ tsp else let - (* typ -> sign_atom -> bool *) fun should_finitize T a = case triple_lookup (type_match thy) finitizes T of SOME (SOME false) => false | _ => resolve_sign_atom lits a = S Plus - (* typ -> mtyp -> typ *) fun type_from_mtype T M = case (M, T) of (MAlpha, _) => T @@ -1161,12 +1050,10 @@ | (MType _, _) => T | _ => raise MTYPE ("Nitpick_Mono.finitize_funs.type_from_mtype", [M], [T]) - (* styp -> styp *) fun finitize_constr (x as (s, T)) = (s, case AList.lookup (op =) constr_mtypes x of SOME M => type_from_mtype T M | NONE => T) - (* typ list -> mterm -> term *) fun term_from_mterm Ts m = case m of MRaw (t, M) => diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_nut.ML --- a/src/HOL/Tools/Nitpick/nitpick_nut.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_nut.ML Tue May 04 20:30:22 2010 +0200 @@ -205,7 +205,6 @@ exception NUT of string * nut list -(* cst -> string *) fun string_for_cst Unity = "Unity" | string_for_cst False = "False" | string_for_cst True = "True" @@ -225,7 +224,6 @@ | string_for_cst NatToInt = "NatToInt" | string_for_cst IntToNat = "IntToNat" -(* op1 -> string *) fun string_for_op1 Not = "Not" | string_for_op1 Finite = "Finite" | string_for_op1 Converse = "Converse" @@ -237,7 +235,6 @@ | string_for_op1 Second = "Second" | string_for_op1 Cast = "Cast" -(* op2 -> string *) fun string_for_op2 All = "All" | string_for_op2 Exist = "Exist" | string_for_op2 Or = "Or" @@ -258,14 +255,11 @@ | string_for_op2 Apply = "Apply" | string_for_op2 Lambda = "Lambda" -(* op3 -> string *) fun string_for_op3 Let = "Let" | string_for_op3 If = "If" -(* int -> Proof.context -> nut -> string *) fun basic_string_for_nut indent ctxt u = let - (* nut -> string *) val sub = basic_string_for_nut (indent + 1) ctxt in (if indent = 0 then "" else "\n" ^ implode (replicate (2 * indent) " ")) ^ @@ -313,17 +307,14 @@ Syntax.string_of_typ ctxt T ^ " " ^ string_for_rep R) ^ ")" end -(* Proof.context -> nut -> string *) val string_for_nut = basic_string_for_nut 0 -(* nut -> bool *) fun inline_nut (Op1 _) = false | inline_nut (Op2 _) = false | inline_nut (Op3 _) = false | inline_nut (Tuple (_, _, us)) = forall inline_nut us | inline_nut _ = true -(* nut -> typ *) fun type_of (Cst (_, T, _)) = T | type_of (Op1 (_, T, _, _)) = T | type_of (Op2 (_, T, _, _, _)) = T @@ -338,7 +329,6 @@ | type_of (RelReg (_, T, _)) = T | type_of (FormulaReg (_, T, _)) = T -(* nut -> rep *) fun rep_of (Cst (_, _, R)) = R | rep_of (Op1 (_, _, R, _)) = R | rep_of (Op2 (_, _, R, _, _)) = R @@ -353,7 +343,6 @@ | rep_of (RelReg (_, _, R)) = R | rep_of (FormulaReg (_, _, R)) = R -(* nut -> string *) fun nickname_of (BoundName (_, _, _, nick)) = nick | nickname_of (FreeName (s, _, _)) = s | nickname_of (ConstName (s, _, _)) = s @@ -361,7 +350,6 @@ | nickname_of (FreeRel (_, _, _, nick)) = nick | nickname_of u = raise NUT ("Nitpick_Nut.nickname_of", [u]) -(* nut -> bool *) fun is_skolem_name u = space_explode name_sep (nickname_of u) |> exists (String.isPrefix skolem_prefix) @@ -369,11 +357,9 @@ fun is_eval_name u = String.isPrefix eval_prefix (nickname_of u) handle NUT ("Nitpick_Nut.nickname_of", _) => false -(* cst -> nut -> bool *) fun is_Cst cst (Cst (cst', _, _)) = (cst = cst') | is_Cst _ _ = false -(* (nut -> 'a -> 'a) -> nut -> 'a -> 'a *) fun fold_nut f u = case u of Op1 (_, _, _, u1) => fold_nut f u1 @@ -382,7 +368,6 @@ | Tuple (_, _, us) => fold (fold_nut f) us | Construct (us', _, _, us) => fold (fold_nut f) us #> fold (fold_nut f) us' | _ => f u -(* (nut -> nut) -> nut -> nut *) fun map_nut f u = case u of Op1 (oper, T, R, u1) => Op1 (oper, T, R, map_nut f u1) @@ -394,7 +379,6 @@ Construct (map (map_nut f) us', T, R, map (map_nut f) us) | _ => f u -(* nut * nut -> order *) fun name_ord (BoundName (j1, _, _, _), BoundName (j2, _, _, _)) = int_ord (j1, j2) | name_ord (BoundName _, _) = LESS @@ -411,24 +395,19 @@ | ord => ord) | name_ord (u1, u2) = raise NUT ("Nitpick_Nut.name_ord", [u1, u2]) -(* nut -> nut -> int *) fun num_occs_in_nut needle_u stack_u = fold_nut (fn u => if u = needle_u then Integer.add 1 else I) stack_u 0 -(* nut -> nut -> bool *) val is_subterm_of = not_equal 0 oo num_occs_in_nut -(* nut -> nut -> nut -> nut *) fun substitute_in_nut needle_u needle_u' = map_nut (fn u => if u = needle_u then needle_u' else u) -(* nut -> nut list * nut list -> nut list * nut list *) val add_free_and_const_names = fold_nut (fn u => case u of FreeName _ => apfst (insert (op =) u) | ConstName _ => apsnd (insert (op =) u) | _ => I) -(* nut -> rep -> nut *) fun modify_name_rep (BoundName (j, T, _, nick)) R = BoundName (j, T, R, nick) | modify_name_rep (FreeName (s, T, _)) R = FreeName (s, T, R) | modify_name_rep (ConstName (s, T, _)) R = ConstName (s, T, R) @@ -436,18 +415,15 @@ structure NameTable = Table(type key = nut val ord = name_ord) -(* 'a NameTable.table -> nut -> 'a *) fun the_name table name = case NameTable.lookup table name of SOME u => u | NONE => raise NUT ("Nitpick_Nut.the_name", [name]) -(* nut NameTable.table -> nut -> KK.n_ary_index *) fun the_rel table name = case the_name table name of FreeRel (x, _, _, _) => x | u => raise NUT ("Nitpick_Nut.the_rel", [u]) -(* typ * term -> typ * term *) fun mk_fst (_, Const (@{const_name Pair}, T) $ t1 $ _) = (domain_type T, t1) | mk_fst (T, t) = let val res_T = fst (HOLogic.dest_prodT T) in @@ -459,23 +435,17 @@ let val res_T = snd (HOLogic.dest_prodT T) in (res_T, Const (@{const_name snd}, T --> res_T) $ t) end -(* typ * term -> (typ * term) list *) fun factorize (z as (Type (@{type_name "*"}, _), _)) = maps factorize [mk_fst z, mk_snd z] | factorize z = [z] -(* hol_context -> op2 -> term -> nut *) fun nut_from_term (hol_ctxt as {thy, stds, fast_descrs, ...}) eq = let - (* string list -> typ list -> term -> nut *) fun aux eq ss Ts t = let - (* term -> nut *) val sub = aux Eq ss Ts val sub' = aux eq ss Ts - (* string -> typ -> term -> nut *) fun sub_abs s T = aux eq (s :: ss) (T :: Ts) - (* typ -> term -> term -> nut *) fun sub_equals T t1 t2 = let val (binder_Ts, body_T) = strip_type (domain_type T) @@ -498,7 +468,6 @@ else Op2 (eq, bool_T, Any, aux Eq ss Ts t1, aux Eq ss Ts t2) end - (* op2 -> string -> typ -> term -> nut *) fun do_quantifier quant s T t1 = let val bound_u = BoundName (length Ts, T, Any, s) @@ -509,21 +478,18 @@ else body_u end - (* term -> term list -> nut *) fun do_apply t0 ts = let val (ts', t2) = split_last ts val t1 = list_comb (t0, ts') val T1 = fastype_of1 (Ts, t1) in Op2 (Apply, range_type T1, Any, sub t1, sub t2) end - (* op2 -> string -> styp -> term -> nut *) fun do_description_operator oper undef_s (x as (_, T)) t1 = if fast_descrs then Op2 (oper, range_type T, Any, sub t1, sub (Const (undef_s, range_type T))) else do_apply (Const x) [t1] - (* styp -> term list -> nut *) fun do_construct (x as (_, T)) ts = case num_binder_types T - length ts of 0 => Construct (map ((fn (s', T') => ConstName (s', T', Any)) @@ -716,21 +682,16 @@ end in aux eq [] [] end -(* scope -> typ -> rep *) fun rep_for_abs_fun scope T = let val (R1, R2) = best_non_opt_symmetric_reps_for_fun_type scope T in Func (R1, (card_of_rep R1 <> card_of_rep R2 ? Opt) R2) end -(* scope -> nut -> nut list * rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_rep_for_free_var scope v (vs, table) = let val R = best_non_opt_set_rep_for_type scope (type_of v) val v = modify_name_rep v R in (v :: vs, NameTable.update (v, R) table) end -(* scope -> bool -> nut -> nut list * rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_rep_for_const (scope as {hol_ctxt = {thy, ...}, ...}) all_exact v (vs, table) = let @@ -756,16 +717,11 @@ val v = modify_name_rep v R in (v :: vs, NameTable.update (v, R) table) end -(* scope -> nut list -> rep NameTable.table -> nut list * rep NameTable.table *) fun choose_reps_for_free_vars scope vs table = fold (choose_rep_for_free_var scope) vs ([], table) -(* scope -> bool -> nut list -> rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_reps_for_consts scope all_exact vs table = fold (choose_rep_for_const scope all_exact) vs ([], table) -(* scope -> styp -> int -> nut list * rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_rep_for_nth_sel_for_constr (scope as {hol_ctxt, binarize, ...}) (x as (_, T)) n (vs, table) = let @@ -778,21 +734,15 @@ best_opt_set_rep_for_type scope T' |> unopt_rep val v = ConstName (s', T', R') in (v :: vs, NameTable.update (v, R') table) end -(* scope -> styp -> nut list * rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_rep_for_sels_for_constr scope (x as (_, T)) = fold_rev (choose_rep_for_nth_sel_for_constr scope x) (~1 upto num_sels_for_constr_type T - 1) -(* scope -> dtype_spec -> nut list * rep NameTable.table - -> nut list * rep NameTable.table *) fun choose_rep_for_sels_of_datatype _ ({deep = false, ...} : dtype_spec) = I | choose_rep_for_sels_of_datatype scope {constrs, ...} = fold_rev (choose_rep_for_sels_for_constr scope o #const) constrs -(* scope -> rep NameTable.table -> nut list * rep NameTable.table *) fun choose_reps_for_all_sels (scope as {datatypes, ...}) = fold (choose_rep_for_sels_of_datatype scope) datatypes o pair [] -(* scope -> nut -> rep NameTable.table -> rep NameTable.table *) fun choose_rep_for_bound_var scope v table = let val R = best_one_rep_for_type scope (type_of v) in NameTable.update (v, R) table @@ -802,7 +752,6 @@ three-valued logic, it would evaluate to a unrepresentable value ("Unrep") according to the HOL semantics. For example, "Suc n" is constructive if "n" is representable or "Unrep", because unknown implies "Unrep". *) -(* nut -> bool *) fun is_constructive u = is_Cst Unrep u orelse (not (is_fun_type (type_of u)) andalso not (is_opt_rep (rep_of u))) orelse @@ -817,14 +766,11 @@ | Construct (_, _, _, us) => forall is_constructive us | _ => false -(* nut -> nut *) fun optimize_unit u = if rep_of u = Unit then Cst (Unity, type_of u, Unit) else u -(* typ -> rep -> nut *) fun unknown_boolean T R = Cst (case R of Formula Pos => False | Formula Neg => True | _ => Unknown, T, R) -(* nut -> bool *) fun is_fully_representable_set u = not (is_opt_rep (rep_of u)) andalso case u of @@ -835,7 +781,6 @@ forall is_fully_representable_set [u1, u2] | _ => false -(* op1 -> typ -> rep -> nut -> nut *) fun s_op1 oper T R u1 = ((if oper = Not then if is_Cst True u1 then Cst (False, T, R) @@ -845,7 +790,6 @@ raise SAME ()) handle SAME () => Op1 (oper, T, R, u1)) |> optimize_unit -(* op2 -> typ -> rep -> nut -> nut -> nut *) fun s_op2 oper T R u1 u2 = ((case oper of Or => @@ -886,7 +830,6 @@ | _ => raise SAME ()) handle SAME () => Op2 (oper, T, R, u1, u2)) |> optimize_unit -(* op3 -> typ -> rep -> nut -> nut -> nut -> nut *) fun s_op3 oper T R u1 u2 u3 = ((case oper of Let => @@ -897,12 +840,10 @@ | _ => raise SAME ()) handle SAME () => Op3 (oper, T, R, u1, u2, u3)) |> optimize_unit -(* typ -> rep -> nut list -> nut *) fun s_tuple T R us = (if exists (is_Cst Unrep) us then Cst (Unrep, T, R) else Tuple (T, R, us)) |> optimize_unit -(* theory -> nut -> nut *) fun optimize_nut u = case u of Op1 (oper, T, R, u1) => s_op1 oper T R (optimize_nut u1) @@ -914,35 +855,26 @@ | Construct (us', T, R, us) => Construct (us', T, R, map optimize_nut us) | _ => optimize_unit u -(* (nut -> 'a) -> nut -> 'a list *) fun untuple f (Tuple (_, _, us)) = maps (untuple f) us | untuple f u = if rep_of u = Unit then [] else [f u] -(* scope -> bool -> rep NameTable.table -> bool -> nut -> nut *) fun choose_reps_in_nut (scope as {card_assigns, bits, datatypes, ofs, ...}) unsound table def = let val bool_atom_R = Atom (2, offset_of_type ofs bool_T) - (* polarity -> bool -> rep *) fun bool_rep polar opt = if polar = Neut andalso opt then Opt bool_atom_R else Formula polar - (* nut -> nut -> nut *) fun triad u1 u2 = s_op2 Triad (type_of u1) (Opt bool_atom_R) u1 u2 - (* (polarity -> nut) -> nut *) fun triad_fn f = triad (f Pos) (f Neg) - (* rep NameTable.table -> bool -> polarity -> nut -> nut -> nut *) fun unrepify_nut_in_nut table def polar needle_u = let val needle_T = type_of needle_u in substitute_in_nut needle_u (Cst (if is_fun_type needle_T then Unknown else Unrep, needle_T, Any)) #> aux table def polar end - (* rep NameTable.table -> bool -> polarity -> nut -> nut *) and aux table def polar u = let - (* bool -> polarity -> nut -> nut *) val gsub = aux table - (* nut -> nut *) val sub = gsub false Neut in case u of @@ -1050,15 +982,12 @@ let val u1' = sub u1 val u2' = sub u2 - (* unit -> nut *) fun non_opt_case () = s_op2 Eq T (Formula polar) u1' u2' - (* unit -> nut *) fun opt_opt_case () = if polar = Neut then triad_fn (fn polar => s_op2 Eq T (Formula polar) u1' u2') else non_opt_case () - (* nut -> nut *) fun hybrid_case u = (* hackish optimization *) if is_constructive u then s_op2 Eq T (Formula Neut) u1' u2' @@ -1275,35 +1204,27 @@ |> optimize_unit in aux table def Pos end -(* int -> KK.n_ary_index list -> KK.n_ary_index list - -> int * KK.n_ary_index list *) fun fresh_n_ary_index n [] ys = (0, (n, 1) :: ys) | fresh_n_ary_index n ((m, j) :: xs) ys = if m = n then (j, ys @ ((m, j + 1) :: xs)) else fresh_n_ary_index n xs ((m, j) :: ys) -(* int -> name_pool -> int * name_pool *) fun fresh_rel n {rels, vars, formula_reg, rel_reg} = let val (j, rels') = fresh_n_ary_index n rels [] in (j, {rels = rels', vars = vars, formula_reg = formula_reg, rel_reg = rel_reg}) end -(* int -> name_pool -> int * name_pool *) fun fresh_var n {rels, vars, formula_reg, rel_reg} = let val (j, vars') = fresh_n_ary_index n vars [] in (j, {rels = rels, vars = vars', formula_reg = formula_reg, rel_reg = rel_reg}) end -(* int -> name_pool -> int * name_pool *) fun fresh_formula_reg {rels, vars, formula_reg, rel_reg} = (formula_reg, {rels = rels, vars = vars, formula_reg = formula_reg + 1, rel_reg = rel_reg}) -(* int -> name_pool -> int * name_pool *) fun fresh_rel_reg {rels, vars, formula_reg, rel_reg} = (rel_reg, {rels = rels, vars = vars, formula_reg = formula_reg, rel_reg = rel_reg + 1}) -(* nut -> nut list * name_pool * nut NameTable.table - -> nut list * name_pool * nut NameTable.table *) fun rename_plain_var v (ws, pool, table) = let val is_formula = (rep_of v = Formula Neut) @@ -1313,7 +1234,6 @@ val w = constr (j, type_of v, rep_of v) in (w :: ws, pool, NameTable.update (v, w) table) end -(* typ -> rep -> nut list -> nut *) fun shape_tuple (T as Type (@{type_name "*"}, [T1, T2])) (R as Struct [R1, R2]) us = let val arity1 = arity_of_rep R1 in @@ -1327,8 +1247,6 @@ | shape_tuple T Unit [] = Cst (Unity, T, Unit) | shape_tuple _ _ us = raise NUT ("Nitpick_Nut.shape_tuple", us) -(* bool -> nut -> nut list * name_pool * nut NameTable.table - -> nut list * name_pool * nut NameTable.table *) fun rename_n_ary_var rename_free v (ws, pool, table) = let val T = type_of v @@ -1370,15 +1288,12 @@ in (w :: ws, pool, NameTable.update (v, w) table) end end -(* nut list -> name_pool -> nut NameTable.table - -> nut list * name_pool * nut NameTable.table *) fun rename_free_vars vs pool table = let val vs = filter (not_equal Unit o rep_of) vs val (vs, pool, table) = fold (rename_n_ary_var true) vs ([], pool, table) in (rev vs, pool, table) end -(* name_pool -> nut NameTable.table -> nut -> nut *) fun rename_vars_in_nut pool table u = case u of Cst _ => u diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_peephole.ML --- a/src/HOL/Tools/Nitpick/nitpick_peephole.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_peephole.ML Tue May 04 20:30:22 2010 +0200 @@ -14,11 +14,11 @@ type decl = Kodkod.decl type expr_assign = Kodkod.expr_assign - type name_pool = { - rels: n_ary_index list, - vars: n_ary_index list, - formula_reg: int, - rel_reg: int} + type name_pool = + {rels: n_ary_index list, + vars: n_ary_index list, + formula_reg: int, + rel_reg: int} val initial_pool : name_pool val not3_rel : n_ary_index @@ -51,39 +51,38 @@ val num_seq : int -> int -> int_expr list val s_and : formula -> formula -> formula - type kodkod_constrs = { - kk_all: decl list -> formula -> formula, - kk_exist: decl list -> formula -> formula, - kk_formula_let: expr_assign list -> formula -> formula, - kk_formula_if: formula -> formula -> formula -> formula, - kk_or: formula -> formula -> formula, - kk_not: formula -> formula, - kk_iff: formula -> formula -> formula, - kk_implies: formula -> formula -> formula, - kk_and: formula -> formula -> formula, - kk_subset: rel_expr -> rel_expr -> formula, - kk_rel_eq: rel_expr -> rel_expr -> formula, - kk_no: rel_expr -> formula, - kk_lone: rel_expr -> formula, - kk_one: rel_expr -> formula, - kk_some: rel_expr -> formula, - kk_rel_let: expr_assign list -> rel_expr -> rel_expr, - kk_rel_if: formula -> rel_expr -> rel_expr -> rel_expr, - kk_union: rel_expr -> rel_expr -> rel_expr, - kk_difference: rel_expr -> rel_expr -> rel_expr, - kk_override: rel_expr -> rel_expr -> rel_expr, - kk_intersect: rel_expr -> rel_expr -> rel_expr, - kk_product: rel_expr -> rel_expr -> rel_expr, - kk_join: rel_expr -> rel_expr -> rel_expr, - kk_closure: rel_expr -> rel_expr, - kk_reflexive_closure: rel_expr -> rel_expr, - kk_comprehension: decl list -> formula -> rel_expr, - kk_project: rel_expr -> int_expr list -> rel_expr, - kk_project_seq: rel_expr -> int -> int -> rel_expr, - kk_not3: rel_expr -> rel_expr, - kk_nat_less: rel_expr -> rel_expr -> rel_expr, - kk_int_less: rel_expr -> rel_expr -> rel_expr - } + type kodkod_constrs = + {kk_all: decl list -> formula -> formula, + kk_exist: decl list -> formula -> formula, + kk_formula_let: expr_assign list -> formula -> formula, + kk_formula_if: formula -> formula -> formula -> formula, + kk_or: formula -> formula -> formula, + kk_not: formula -> formula, + kk_iff: formula -> formula -> formula, + kk_implies: formula -> formula -> formula, + kk_and: formula -> formula -> formula, + kk_subset: rel_expr -> rel_expr -> formula, + kk_rel_eq: rel_expr -> rel_expr -> formula, + kk_no: rel_expr -> formula, + kk_lone: rel_expr -> formula, + kk_one: rel_expr -> formula, + kk_some: rel_expr -> formula, + kk_rel_let: expr_assign list -> rel_expr -> rel_expr, + kk_rel_if: formula -> rel_expr -> rel_expr -> rel_expr, + kk_union: rel_expr -> rel_expr -> rel_expr, + kk_difference: rel_expr -> rel_expr -> rel_expr, + kk_override: rel_expr -> rel_expr -> rel_expr, + kk_intersect: rel_expr -> rel_expr -> rel_expr, + kk_product: rel_expr -> rel_expr -> rel_expr, + kk_join: rel_expr -> rel_expr -> rel_expr, + kk_closure: rel_expr -> rel_expr, + kk_reflexive_closure: rel_expr -> rel_expr, + kk_comprehension: decl list -> formula -> rel_expr, + kk_project: rel_expr -> int_expr list -> rel_expr, + kk_project_seq: rel_expr -> int -> int -> rel_expr, + kk_not3: rel_expr -> rel_expr, + kk_nat_less: rel_expr -> rel_expr -> rel_expr, + kk_int_less: rel_expr -> rel_expr -> rel_expr} val kodkod_constrs : bool -> int -> int -> int -> kodkod_constrs end; @@ -94,11 +93,11 @@ open Kodkod open Nitpick_Util -type name_pool = { - rels: n_ary_index list, - vars: n_ary_index list, - formula_reg: int, - rel_reg: int} +type name_pool = + {rels: n_ary_index list, + vars: n_ary_index list, + formula_reg: int, + rel_reg: int} (* If you add new built-in relations, make sure to increment the counters here as well to avoid name clashes (which fortunately would be detected by @@ -125,40 +124,31 @@ val lcm_rel = (3, 11) val norm_frac_rel = (4, 0) -(* int -> bool -> rel_expr *) fun atom_for_bool j0 = Atom o Integer.add j0 o int_from_bool -(* bool -> formula *) fun formula_for_bool b = if b then True else False -(* int * int -> int -> int *) fun atom_for_nat (k, j0) n = if n < 0 orelse n >= k then ~1 else n + j0 -(* int -> int *) fun min_int_for_card k = ~k div 2 + 1 fun max_int_for_card k = k div 2 -(* int * int -> int -> int *) fun int_for_atom (k, j0) j = let val j = j - j0 in if j <= max_int_for_card k then j else j - k end fun atom_for_int (k, j0) n = if n < min_int_for_card k orelse n > max_int_for_card k then ~1 else if n < 0 then n + k + j0 else n + j0 -(* int -> int -> bool *) fun is_twos_complement_representable bits n = let val max = reasonable_power 2 bits in n >= ~ max andalso n < max end -(* rel_expr -> bool *) fun is_none_product (Product (r1, r2)) = is_none_product r1 orelse is_none_product r2 | is_none_product None = true | is_none_product _ = false -(* rel_expr -> bool *) fun is_one_rel_expr (Atom _) = true | is_one_rel_expr (AtomSeq (1, _)) = true | is_one_rel_expr (Var _) = true | is_one_rel_expr _ = false -(* rel_expr -> bool *) fun inline_rel_expr (Product (r1, r2)) = inline_rel_expr r1 andalso inline_rel_expr r2 | inline_rel_expr Iden = true @@ -172,7 +162,6 @@ | inline_rel_expr (RelReg _) = true | inline_rel_expr _ = false -(* rel_expr -> rel_expr -> bool option *) fun rel_expr_equal None (Atom _) = SOME false | rel_expr_equal None (AtomSeq (k, _)) = SOME (k = 0) | rel_expr_equal (Atom _) None = SOME false @@ -183,7 +172,6 @@ | rel_expr_equal (AtomSeq x1) (AtomSeq x2) = SOME (x1 = x2) | rel_expr_equal r1 r2 = if r1 = r2 then SOME true else NONE -(* rel_expr -> rel_expr -> bool option *) fun rel_expr_intersects (Atom j1) (Atom j2) = SOME (j1 = j2) | rel_expr_intersects (Atom j) (AtomSeq (k, j0)) = SOME (j < j0 + k) | rel_expr_intersects (AtomSeq (k, j0)) (Atom j) = SOME (j < j0 + k) @@ -192,84 +180,71 @@ | rel_expr_intersects r1 r2 = if is_none_product r1 orelse is_none_product r2 then SOME false else NONE -(* int -> rel_expr *) fun empty_n_ary_rel 0 = raise ARG ("Nitpick_Peephole.empty_n_ary_rel", "0") | empty_n_ary_rel n = funpow (n - 1) (curry Product None) None -(* decl -> rel_expr *) fun decl_one_set (DeclOne (_, r)) = r | decl_one_set _ = raise ARG ("Nitpick_Peephole.decl_one_set", "not \"DeclOne\"") -(* int_expr -> bool *) fun is_Num (Num _) = true | is_Num _ = false -(* int_expr -> int *) fun dest_Num (Num k) = k | dest_Num _ = raise ARG ("Nitpick_Peephole.dest_Num", "not \"Num\"") -(* int -> int -> int_expr list *) fun num_seq j0 n = map Num (index_seq j0 n) -(* rel_expr -> rel_expr -> bool *) fun occurs_in_union r (Union (r1, r2)) = occurs_in_union r r1 orelse occurs_in_union r r2 | occurs_in_union r r' = (r = r') -(* rel_expr -> rel_expr -> rel_expr *) fun s_and True f2 = f2 | s_and False _ = False | s_and f1 True = f1 | s_and _ False = False | s_and f1 f2 = And (f1, f2) -type kodkod_constrs = { - kk_all: decl list -> formula -> formula, - kk_exist: decl list -> formula -> formula, - kk_formula_let: expr_assign list -> formula -> formula, - kk_formula_if: formula -> formula -> formula -> formula, - kk_or: formula -> formula -> formula, - kk_not: formula -> formula, - kk_iff: formula -> formula -> formula, - kk_implies: formula -> formula -> formula, - kk_and: formula -> formula -> formula, - kk_subset: rel_expr -> rel_expr -> formula, - kk_rel_eq: rel_expr -> rel_expr -> formula, - kk_no: rel_expr -> formula, - kk_lone: rel_expr -> formula, - kk_one: rel_expr -> formula, - kk_some: rel_expr -> formula, - kk_rel_let: expr_assign list -> rel_expr -> rel_expr, - kk_rel_if: formula -> rel_expr -> rel_expr -> rel_expr, - kk_union: rel_expr -> rel_expr -> rel_expr, - kk_difference: rel_expr -> rel_expr -> rel_expr, - kk_override: rel_expr -> rel_expr -> rel_expr, - kk_intersect: rel_expr -> rel_expr -> rel_expr, - kk_product: rel_expr -> rel_expr -> rel_expr, - kk_join: rel_expr -> rel_expr -> rel_expr, - kk_closure: rel_expr -> rel_expr, - kk_reflexive_closure: rel_expr -> rel_expr, - kk_comprehension: decl list -> formula -> rel_expr, - kk_project: rel_expr -> int_expr list -> rel_expr, - kk_project_seq: rel_expr -> int -> int -> rel_expr, - kk_not3: rel_expr -> rel_expr, - kk_nat_less: rel_expr -> rel_expr -> rel_expr, - kk_int_less: rel_expr -> rel_expr -> rel_expr -} +type kodkod_constrs = + {kk_all: decl list -> formula -> formula, + kk_exist: decl list -> formula -> formula, + kk_formula_let: expr_assign list -> formula -> formula, + kk_formula_if: formula -> formula -> formula -> formula, + kk_or: formula -> formula -> formula, + kk_not: formula -> formula, + kk_iff: formula -> formula -> formula, + kk_implies: formula -> formula -> formula, + kk_and: formula -> formula -> formula, + kk_subset: rel_expr -> rel_expr -> formula, + kk_rel_eq: rel_expr -> rel_expr -> formula, + kk_no: rel_expr -> formula, + kk_lone: rel_expr -> formula, + kk_one: rel_expr -> formula, + kk_some: rel_expr -> formula, + kk_rel_let: expr_assign list -> rel_expr -> rel_expr, + kk_rel_if: formula -> rel_expr -> rel_expr -> rel_expr, + kk_union: rel_expr -> rel_expr -> rel_expr, + kk_difference: rel_expr -> rel_expr -> rel_expr, + kk_override: rel_expr -> rel_expr -> rel_expr, + kk_intersect: rel_expr -> rel_expr -> rel_expr, + kk_product: rel_expr -> rel_expr -> rel_expr, + kk_join: rel_expr -> rel_expr -> rel_expr, + kk_closure: rel_expr -> rel_expr, + kk_reflexive_closure: rel_expr -> rel_expr, + kk_comprehension: decl list -> formula -> rel_expr, + kk_project: rel_expr -> int_expr list -> rel_expr, + kk_project_seq: rel_expr -> int -> int -> rel_expr, + kk_not3: rel_expr -> rel_expr, + kk_nat_less: rel_expr -> rel_expr -> rel_expr, + kk_int_less: rel_expr -> rel_expr -> rel_expr} (* We assume throughout that Kodkod variables have a "one" constraint. This is always the case if Kodkod's skolemization is disabled. *) -(* bool -> int -> int -> int -> kodkod_constrs *) fun kodkod_constrs optim nat_card int_card main_j0 = let - (* bool -> int *) val from_bool = atom_for_bool main_j0 - (* int -> rel_expr *) fun from_nat n = Atom (n + main_j0) - (* int -> int *) fun to_nat j = j - main_j0 val to_int = int_for_atom (int_card, main_j0) - (* decl list -> formula -> formula *) fun s_all _ True = True | s_all _ False = False | s_all [] f = f @@ -281,12 +256,10 @@ | s_exist ds (Exist (ds', f)) = Exist (ds @ ds', f) | s_exist ds f = Exist (ds, f) - (* expr_assign list -> formula -> formula *) fun s_formula_let _ True = True | s_formula_let _ False = False | s_formula_let assigns f = FormulaLet (assigns, f) - (* formula -> formula *) fun s_not True = False | s_not False = True | s_not (All (ds, f)) = Exist (ds, s_not f) @@ -299,7 +272,6 @@ | s_not (Some r) = No r | s_not f = Not f - (* formula -> formula -> formula *) fun s_or True _ = True | s_or False f2 = f2 | s_or _ True = True @@ -316,7 +288,6 @@ | s_implies f1 False = s_not f1 | s_implies f1 f2 = if f1 = f2 then True else Implies (f1, f2) - (* formula -> formula -> formula -> formula *) fun s_formula_if True f2 _ = f2 | s_formula_if False _ f3 = f3 | s_formula_if f1 True f3 = s_or f1 f3 @@ -325,7 +296,6 @@ | s_formula_if f1 f2 False = s_and f1 f2 | s_formula_if f f1 f2 = FormulaIf (f, f1, f2) - (* rel_expr -> int_expr list -> rel_expr *) fun s_project r is = (case r of Project (r1, is') => @@ -340,7 +310,6 @@ else Project (r, is) end - (* (rel_expr -> formula) -> rel_expr -> formula *) fun s_xone xone r = if is_one_rel_expr r then True @@ -348,7 +317,6 @@ 1 => xone r | arity => foldl1 And (map (xone o s_project r o single o Num) (index_seq 0 arity)) - (* rel_expr -> formula *) fun s_no None = True | s_no (Product (r1, r2)) = s_or (s_no r1) (s_no r2) | s_no (Intersect (Closure (Rel x), Iden)) = Acyclic x @@ -362,13 +330,11 @@ | s_some (Product (r1, r2)) = s_and (s_some r1) (s_some r2) | s_some r = if is_one_rel_expr r then True else Some r - (* rel_expr -> rel_expr *) fun s_not3 (Atom j) = Atom (if j = main_j0 then j + 1 else j - 1) | s_not3 (r as Join (r1, r2)) = if r2 = Rel not3_rel then r1 else Join (r, Rel not3_rel) | s_not3 r = Join (r, Rel not3_rel) - (* rel_expr -> rel_expr -> formula *) fun s_rel_eq r1 r2 = (case (r1, r2) of (Join (r11, Rel x), _) => @@ -427,12 +393,10 @@ else if forall is_one_rel_expr [r1, r2] then s_rel_eq r1 r2 else Subset (r1, r2) - (* expr_assign list -> rel_expr -> rel_expr *) fun s_rel_let [b as AssignRelReg (x', r')] (r as RelReg x) = if x = x' then r' else RelLet ([b], r) | s_rel_let bs r = RelLet (bs, r) - (* formula -> rel_expr -> rel_expr -> rel_expr *) fun s_rel_if f r1 r2 = (case (f, r1, r2) of (True, _, _) => r1 @@ -443,7 +407,6 @@ | _ => raise SAME ()) handle SAME () => if r1 = r2 then r1 else RelIf (f, r1, r2) - (* rel_expr -> rel_expr -> rel_expr *) fun s_union r1 (Union (r21, r22)) = s_union (s_union r1 r21) r22 | s_union r1 r2 = if is_none_product r1 then r2 @@ -561,14 +524,12 @@ handle SAME () => List.foldr Join r22 [r1, r21]) | s_join r1 r2 = Join (r1, r2) - (* rel_expr -> rel_expr *) fun s_closure Iden = Iden | s_closure r = if is_none_product r then r else Closure r fun s_reflexive_closure Iden = Iden | s_reflexive_closure r = if is_none_product r then Iden else ReflexiveClosure r - (* decl list -> formula -> rel_expr *) fun s_comprehension ds False = empty_n_ary_rel (length ds) | s_comprehension ds True = fold1 s_product (map decl_one_set ds) | s_comprehension [d as DeclOne ((1, j1), r)] @@ -579,10 +540,8 @@ Comprehension ([d], f) | s_comprehension ds f = Comprehension (ds, f) - (* rel_expr -> int -> int -> rel_expr *) fun s_project_seq r = let - (* int -> rel_expr -> int -> int -> rel_expr *) fun aux arity r j0 n = if j0 = 0 andalso arity = n then r @@ -595,7 +554,6 @@ val arity1 = arity - arity2 val n1 = Int.min (nat_minus arity1 j0, n) val n2 = n - n1 - (* unit -> rel_expr *) fun one () = aux arity1 r1 j0 n1 fun two () = aux arity2 r2 (nat_minus j0 arity1) n2 in @@ -607,17 +565,13 @@ | _ => s_project r (num_seq j0 n) in aux (arity_of_rel_expr r) r end - (* rel_expr -> rel_expr -> rel_expr *) fun s_nat_less (Atom j1) (Atom j2) = from_bool (j1 < j2) | s_nat_less r1 r2 = fold s_join [r1, r2] (Rel nat_less_rel) fun s_int_less (Atom j1) (Atom j2) = from_bool (to_int j1 < to_int j2) | s_int_less r1 r2 = fold s_join [r1, r2] (Rel int_less_rel) - (* rel_expr -> int -> int -> rel_expr *) fun d_project_seq r j0 n = Project (r, num_seq j0 n) - (* rel_expr -> rel_expr *) fun d_not3 r = Join (r, Rel not3_rel) - (* rel_expr -> rel_expr -> rel_expr *) fun d_nat_less r1 r2 = List.foldl Join (Rel nat_less_rel) [r1, r2] fun d_int_less r1 r2 = List.foldl Join (Rel int_less_rel) [r1, r2] in diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_preproc.ML --- a/src/HOL/Tools/Nitpick/nitpick_preproc.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_preproc.ML Tue May 04 20:30:22 2010 +0200 @@ -21,7 +21,6 @@ open Nitpick_HOL open Nitpick_Mono -(* polarity -> string -> bool *) fun is_positive_existential polar quant_s = (polar = Pos andalso quant_s = @{const_name Ex}) orelse (polar = Neg andalso quant_s <> @{const_name Ex}) @@ -33,10 +32,8 @@ binary coding. *) val binary_int_threshold = 3 -(* bool -> term -> bool *) val may_use_binary_ints = let - (* bool -> term -> bool *) fun aux def (Const (@{const_name "=="}, _) $ t1 $ t2) = aux def t1 andalso aux false t2 | aux def (@{const "==>"} $ t1 $ t2) = aux false t1 andalso aux def t2 @@ -52,10 +49,8 @@ | aux def (Abs (_, _, t')) = aux def t' | aux _ _ = true in aux end -(* term -> bool *) val should_use_binary_ints = let - (* term -> bool *) fun aux (t1 $ t2) = aux t1 orelse aux t2 | aux (Const (s, T)) = ((s = @{const_name times} orelse s = @{const_name div}) andalso @@ -70,10 +65,8 @@ (** Uncurrying **) -(* theory -> term -> int Termtab.tab -> int Termtab.tab *) fun add_to_uncurry_table thy t = let - (* term -> term list -> int Termtab.tab -> int Termtab.tab *) fun aux (t1 $ t2) args table = let val table = aux t2 [] table in aux t1 (t2 :: args) table end | aux (Abs (_, _, t')) _ table = aux t' [] table @@ -83,18 +76,15 @@ is_sel s orelse s = @{const_name Sigma} then table else - Termtab.map_default (t, 65536) (curry Int.min (length args)) table + Termtab.map_default (t, 65536) (Integer.min (length args)) table | aux _ _ table = table in aux t [] end -(* int -> int -> string *) fun uncurry_prefix_for k j = uncurry_prefix ^ string_of_int k ^ "@" ^ string_of_int j ^ name_sep -(* int Termtab.tab term -> term *) fun uncurry_term table t = let - (* term -> term list -> term *) fun aux (t1 $ t2) args = aux t1 (aux t2 [] :: args) | aux (Abs (s, T, t')) args = betapplys (Abs (s, T, aux t' []), args) | aux (t as Const (s, T)) args = @@ -131,17 +121,14 @@ (** Boxing **) -(* hol_context -> bool -> term -> term *) fun box_fun_and_pair_in_term (hol_ctxt as {thy, stds, fast_descrs, ...}) def orig_t = let - (* typ -> typ *) fun box_relational_operator_type (Type (@{type_name fun}, Ts)) = Type (@{type_name fun}, map box_relational_operator_type Ts) | box_relational_operator_type (Type (@{type_name "*"}, Ts)) = Type (@{type_name "*"}, map (box_type hol_ctxt InPair) Ts) | box_relational_operator_type T = T - (* indexname * typ -> typ * term -> typ option list -> typ option list *) fun add_boxed_types_for_var (z as (_, T)) (T', t') = case t' of Var z' => z' = z ? insert (op =) T' @@ -152,7 +139,6 @@ | _ => raise TYPE ("Nitpick_Preproc.box_fun_and_pair_in_term.\ \add_boxed_types_for_var", [T'], [])) | _ => exists_subterm (curry (op =) (Var z)) t' ? insert (op =) T - (* typ list -> typ list -> term -> indexname * typ -> typ *) fun box_var_in_def new_Ts old_Ts t (z as (_, T)) = case t of @{const Trueprop} $ t1 => box_var_in_def new_Ts old_Ts t1 z @@ -170,8 +156,6 @@ else T | _ => T - (* typ list -> typ list -> polarity -> string -> typ -> string -> typ - -> term -> term *) and do_quantifier new_Ts old_Ts polar quant_s quant_T abs_s abs_T t = let val abs_T' = @@ -185,7 +169,6 @@ $ Abs (abs_s, abs_T', t |> do_term (abs_T' :: new_Ts) (abs_T :: old_Ts) polar) end - (* typ list -> typ list -> string -> typ -> term -> term -> term *) and do_equals new_Ts old_Ts s0 T0 t1 t2 = let val (t1, t2) = pairself (do_term new_Ts old_Ts Neut) (t1, t2) @@ -195,12 +178,10 @@ list_comb (Const (s0, T --> T --> body_type T0), map2 (coerce_term hol_ctxt new_Ts T) [T1, T2] [t1, t2]) end - (* string -> typ -> term *) and do_description_operator s T = let val T1 = box_type hol_ctxt InFunLHS (range_type T) in Const (s, (T1 --> bool_T) --> T1) end - (* typ list -> typ list -> polarity -> term -> term *) and do_term new_Ts old_Ts polar t = case t of Const (s0 as @{const_name all}, T0) $ Abs (s1, T1, t1) => @@ -302,21 +283,16 @@ val val_var_prefix = nitpick_prefix ^ "v" -(* typ list -> int -> int -> int -> term -> term *) fun fresh_value_var Ts k n j t = Var ((val_var_prefix ^ nat_subscript (n - j), k), fastype_of1 (Ts, t)) -(* typ list -> term -> bool *) fun has_heavy_bounds_or_vars Ts t = let - (* typ list -> bool *) fun aux [] = false | aux [T] = is_fun_type T orelse is_pair_type T | aux _ = true in aux (map snd (Term.add_vars t []) @ map (nth Ts) (loose_bnos t)) end -(* hol_context -> typ list -> bool -> int -> int -> term -> term list - -> term list -> term * term list *) fun pull_out_constr_comb ({thy, stds, ...} : hol_context) Ts relax k level t args seen = let val t_comb = list_comb (t, args) in @@ -336,18 +312,15 @@ | _ => (t_comb, seen) end -(* (term -> term) -> typ list -> int -> term list -> term list *) fun equations_for_pulled_out_constrs mk_eq Ts k seen = let val n = length seen in map2 (fn j => fn t => mk_eq (fresh_value_var Ts k n j t, t)) (index_seq 0 n) seen end -(* hol_context -> bool -> term -> term *) fun pull_out_universal_constrs hol_ctxt def t = let val k = maxidx_of_term t + 1 - (* typ list -> bool -> term -> term list -> term list -> term * term list *) fun do_term Ts def t args seen = case t of (t0 as Const (@{const_name "=="}, _)) $ t1 $ t2 => @@ -367,8 +340,6 @@ do_term Ts def t1 (t2 :: args) seen end | _ => pull_out_constr_comb hol_ctxt Ts def k 0 t args seen - (* typ list -> bool -> bool -> term -> term -> term -> term list - -> term * term list *) and do_eq_or_imp Ts eq def t0 t1 t2 seen = let val (t2, seen) = if eq andalso def then (t2, seen) @@ -381,22 +352,18 @@ seen, concl) end -(* term -> term -> term *) fun mk_exists v t = HOLogic.exists_const (fastype_of v) $ lambda v (incr_boundvars 1 t) -(* hol_context -> term -> term *) fun pull_out_existential_constrs hol_ctxt t = let val k = maxidx_of_term t + 1 - (* typ list -> int -> term -> term list -> term list -> term * term list *) fun aux Ts num_exists t args seen = case t of (t0 as Const (@{const_name Ex}, _)) $ Abs (s1, T1, t1) => let val (t1, seen') = aux (T1 :: Ts) (num_exists + 1) t1 [] [] val n = length seen' - (* unit -> term list *) fun vars () = map2 (fresh_value_var Ts k n) (index_seq 0 n) seen' in (equations_for_pulled_out_constrs HOLogic.mk_eq Ts k seen' @@ -421,7 +388,6 @@ val let_var_prefix = nitpick_prefix ^ "l" val let_inline_threshold = 32 -(* int -> typ -> term -> (term -> term) -> term *) fun hol_let n abs_T body_T f t = if n * size_of_term t <= let_inline_threshold then f t @@ -431,14 +397,11 @@ $ t $ abs_var z (incr_boundvars 1 (f (Var z))) end -(* hol_context -> bool -> term -> term *) fun destroy_pulled_out_constrs (hol_ctxt as {thy, stds, ...}) axiom t = let - (* styp -> int *) val num_occs_of_var = fold_aterms (fn Var z => (fn f => fn z' => f z' |> z = z' ? Integer.add 1) | _ => I) t (K 0) - (* bool -> term -> term *) fun aux careful ((t0 as Const (@{const_name "=="}, _)) $ t1 $ t2) = aux_eq careful true t0 t1 t2 | aux careful ((t0 as @{const "==>"}) $ t1 $ t2) = @@ -450,7 +413,6 @@ | aux careful (Abs (s, T, t')) = Abs (s, T, aux careful t') | aux careful (t1 $ t2) = aux careful t1 $ aux careful t2 | aux _ t = t - (* bool -> bool -> term -> term -> term -> term *) and aux_eq careful pass1 t0 t1 t2 = ((if careful then raise SAME () @@ -485,7 +447,6 @@ |> body_type (type_of t0) = prop_T ? HOLogic.mk_Trueprop) handle SAME () => if pass1 then aux_eq careful false t0 t2 t1 else t0 $ aux false t2 $ aux false t1 - (* styp -> term -> int -> typ -> term -> term *) and sel_eq x t n nth_T nth_t = HOLogic.eq_const nth_T $ nth_t $ select_nth_constr_arg thy stds x t n nth_T @@ -494,7 +455,6 @@ (** Destruction of universal and existential equalities **) -(* term -> term *) fun curry_assms (@{const "==>"} $ (@{const Trueprop} $ (@{const "op &"} $ t1 $ t2)) $ t3) = curry_assms (Logic.list_implies ([t1, t2] |> map HOLogic.mk_Trueprop, t3)) @@ -502,15 +462,12 @@ @{const "==>"} $ curry_assms t1 $ curry_assms t2 | curry_assms t = t -(* term -> term *) val destroy_universal_equalities = let - (* term list -> (indexname * typ) list -> term -> term *) fun aux prems zs t = case t of @{const "==>"} $ t1 $ t2 => aux_implies prems zs t1 t2 | _ => Logic.list_implies (rev prems, t) - (* term list -> (indexname * typ) list -> term -> term -> term *) and aux_implies prems zs t1 t2 = case t1 of Const (@{const_name "=="}, _) $ Var z $ t' => aux_eq prems zs z t' t1 t2 @@ -519,8 +476,6 @@ | @{const Trueprop} $ (Const (@{const_name "op ="}, _) $ t' $ Var z) => aux_eq prems zs z t' t1 t2 | _ => aux (t1 :: prems) (Term.add_vars t1 zs) t2 - (* term list -> (indexname * typ) list -> indexname * typ -> term -> term - -> term -> term *) and aux_eq prems zs z t' t1 t2 = if not (member (op =) zs z) andalso not (exists_subterm (curry (op =) (Var z)) t') then @@ -529,15 +484,11 @@ aux (t1 :: prems) (Term.add_vars t1 zs) t2 in aux [] [] end -(* theory -> (typ option * bool) list -> int -> term list -> term list - -> (term * term list) option *) fun find_bound_assign thy stds j = let - (* term list -> term list -> (term * term list) option *) fun do_term _ [] = NONE | do_term seen (t :: ts) = let - (* bool -> term -> term -> (term * term list) option *) fun do_eq pass1 t1 t2 = (if loose_bvar1 (t2, j) then if pass1 then do_eq false t2 t1 else raise SAME () @@ -559,10 +510,8 @@ end in do_term end -(* int -> term -> term -> term *) fun subst_one_bound j arg t = let - (* term * int -> term *) fun aux (Bound i, lev) = if i < lev then raise SAME () else if i = lev then incr_boundvars (lev - j) arg @@ -574,10 +523,8 @@ | aux _ = raise SAME () in aux (t, j) handle SAME () => t end -(* hol_context -> term -> term *) fun destroy_existential_equalities ({thy, stds, ...} : hol_context) = let - (* string list -> typ list -> term list -> term *) fun kill [] [] ts = foldr1 s_conj ts | kill (s :: ss) (T :: Ts) ts = (case find_bound_assign thy stds (length ss) [] ts of @@ -589,7 +536,6 @@ Const (@{const_name Ex}, (T --> bool_T) --> bool_T) $ Abs (s, T, kill ss Ts ts)) | kill _ _ _ = raise UnequalLengths - (* string list -> typ list -> term -> term *) fun gather ss Ts (Const (@{const_name Ex}, _) $ Abs (s1, T1, t1)) = gather (ss @ [s1]) (Ts @ [T1]) t1 | gather [] [] (Abs (s, T, t1)) = Abs (s, T, gather [] [] t1) @@ -600,20 +546,15 @@ (** Skolemization **) -(* int -> int -> string *) fun skolem_prefix_for k j = skolem_prefix ^ string_of_int k ^ "@" ^ string_of_int j ^ name_sep -(* hol_context -> int -> term -> term *) fun skolemize_term_and_more (hol_ctxt as {thy, def_table, skolems, ...}) skolem_depth = let - (* int list -> int list *) val incrs = map (Integer.add 1) - (* string list -> typ list -> int list -> int -> polarity -> term -> term *) fun aux ss Ts js depth polar t = let - (* string -> typ -> string -> typ -> term -> term *) fun do_quantifier quant_s quant_T abs_s abs_T t = if not (loose_bvar1 (t, 0)) then aux ss Ts js depth polar (incr_boundvars ~1 t) @@ -679,7 +620,6 @@ else (ubfp_prefix, @{const "op &"}, @{const_name semilattice_inf_class.inf}) - (* unit -> term *) fun pos () = unrolled_inductive_pred_const hol_ctxt gfp x |> aux ss Ts js depth polar fun neg () = Const (pref ^ s, T) @@ -693,7 +633,6 @@ val ((trunk_arg_Ts, rump_arg_T), body_T) = T |> strip_type |>> split_last val set_T = rump_arg_T --> body_T - (* (unit -> term) -> term *) fun app f = list_comb (f (), map Bound (length trunk_arg_Ts - 1 downto 0)) @@ -717,21 +656,18 @@ (** Function specialization **) -(* term -> term list *) fun params_in_equation (@{const "==>"} $ _ $ t2) = params_in_equation t2 | params_in_equation (@{const Trueprop} $ t1) = params_in_equation t1 | params_in_equation (Const (@{const_name "op ="}, _) $ t1 $ _) = snd (strip_comb t1) | params_in_equation _ = [] -(* styp -> styp -> int list -> term list -> term list -> term -> term *) fun specialize_fun_axiom x x' fixed_js fixed_args extra_args t = let val k = fold Integer.max (map maxidx_of_term (fixed_args @ extra_args)) 0 + 1 val t = map_aterms (fn Var ((s, i), T) => Var ((s, k + i), T) | t' => t') t val fixed_params = filter_indices fixed_js (params_in_equation t) - (* term list -> term -> term *) fun aux args (Abs (s, T, t)) = list_comb (Abs (s, T, aux [] t), args) | aux args (t1 $ t2) = aux (aux [] t2 :: args) t1 | aux args t = @@ -743,10 +679,8 @@ end in aux [] t end -(* hol_context -> styp -> (int * term option) list *) fun static_args_in_term ({ersatz_table, ...} : hol_context) x t = let - (* term -> term list -> term list -> term list list *) fun fun_calls (Abs (_, _, t)) _ = fun_calls t [] | fun_calls (t1 $ t2) args = fun_calls t2 [] #> fun_calls t1 (t2 :: args) | fun_calls t args = @@ -756,7 +690,6 @@ SOME s'' => x = (s'', T') | NONE => false) | _ => false) ? cons args - (* term list list -> term list list -> term list -> term list list *) fun call_sets [] [] vs = [vs] | call_sets [] uss vs = vs :: call_sets uss [] [] | call_sets ([] :: _) _ _ = [] @@ -773,12 +706,10 @@ | [t as Free _] => cons (j, SOME t) | _ => I) indexed_sets [] end -(* hol_context -> styp -> term list -> (int * term option) list *) fun static_args_in_terms hol_ctxt x = map (static_args_in_term hol_ctxt x) #> fold1 (OrdList.inter (prod_ord int_ord (option_ord Term_Ord.term_ord))) -(* (int * term option) list -> (int * term) list -> int list *) fun overlapping_indices [] _ = [] | overlapping_indices _ [] = [] | overlapping_indices (ps1 as (j1, t1) :: ps1') (ps2 as (j2, t2) :: ps2') = @@ -786,7 +717,6 @@ else if j1 > j2 then overlapping_indices ps1 ps2' else overlapping_indices ps1' ps2' |> the_default t2 t1 = t2 ? cons j1 -(* typ list -> term -> bool *) fun is_eligible_arg Ts t = let val bad_Ts = map snd (Term.add_vars t []) @ map (nth Ts) (loose_bnos t) in null bad_Ts orelse @@ -794,7 +724,6 @@ forall (not o is_higher_order_type) bad_Ts) end -(* int -> string *) fun special_prefix_for j = special_prefix ^ string_of_int j ^ name_sep (* If a constant's definition is picked up deeper than this threshold, we @@ -803,7 +732,6 @@ val bound_var_prefix = "b" -(* hol_context -> int -> term -> term *) fun specialize_consts_in_term (hol_ctxt as {specialize, simp_table, special_funs, ...}) depth t = if not specialize orelse depth > special_max_depth then @@ -812,7 +740,6 @@ let val blacklist = if depth = 0 then [] else case term_under_def t of Const x => [x] | _ => [] - (* term list -> typ list -> term -> term *) fun aux args Ts (Const (x as (s, T))) = ((if not (member (op =) blacklist x) andalso not (null args) andalso not (String.isPrefix special_prefix s) andalso @@ -836,7 +763,6 @@ val extra_args = map Var vars @ map Bound bound_js @ live_args val extra_Ts = map snd vars @ filter_indices bound_js Ts val k = maxidx_of_term t + 1 - (* int -> term *) fun var_for_bound_no j = Var ((bound_var_prefix ^ nat_subscript (find_index (curry (op =) j) bound_js @@ -880,7 +806,6 @@ val cong_var_prefix = "c" -(* typ -> special_triple -> special_triple -> term *) fun special_congruence_axiom T (js1, ts1, x1) (js2, ts2, x2) = let val (bounds1, bounds2) = pairself (map Var o special_bounds) (ts1, ts2) @@ -905,7 +830,6 @@ |> close_form (* TODO: needed? *) end -(* hol_context -> styp list -> term list *) fun special_congruence_axioms (hol_ctxt as {special_funs, ...}) xs = let val groups = @@ -914,14 +838,10 @@ |> AList.group (op =) |> filter_out (is_equational_fun_surely_complete hol_ctxt o fst) |> map (fn (x, zs) => (x, zs |> member (op =) xs x ? cons ([], [], x))) - (* special_triple -> int *) fun generality (js, _, _) = ~(length js) - (* special_triple -> special_triple -> bool *) fun is_more_specific (j1, t1, x1) (j2, t2, x2) = x1 <> x2 andalso OrdList.subset (prod_ord int_ord Term_Ord.term_ord) (j2 ~~ t2, j1 ~~ t1) - (* typ -> special_triple list -> special_triple list -> special_triple list - -> term list -> term list *) fun do_pass_1 _ [] [_] [_] = I | do_pass_1 T skipped _ [] = do_pass_2 T skipped | do_pass_1 T skipped all (z :: zs) = @@ -930,7 +850,6 @@ [] => do_pass_1 T (z :: skipped) all zs | (z' :: _) => cons (special_congruence_axiom T z z') #> do_pass_1 T skipped all zs - (* typ -> special_triple list -> term list -> term list *) and do_pass_2 _ [] = I | do_pass_2 T (z :: zs) = fold (cons o special_congruence_axiom T z) zs #> do_pass_2 T zs @@ -938,32 +857,23 @@ (** Axiom selection **) -(* 'a Symtab.table -> 'a list *) fun all_table_entries table = Symtab.fold (append o snd) table [] -(* const_table -> string -> const_table *) fun extra_table table s = Symtab.make [(s, all_table_entries table)] -(* int -> term -> term *) fun eval_axiom_for_term j t = Logic.mk_equals (Const (eval_prefix ^ string_of_int j, fastype_of t), t) -(* term -> bool *) val is_trivial_equation = the_default false o try (op aconv o Logic.dest_equals) (* Prevents divergence in case of cyclic or infinite axiom dependencies. *) val axioms_max_depth = 255 -(* hol_context -> term -> term list * term list * bool * bool *) fun axioms_for_term (hol_ctxt as {thy, ctxt, max_bisim_depth, stds, user_axioms, fast_descrs, evals, def_table, nondef_table, choice_spec_table, user_nondefs, ...}) t = let type accumulator = styp list * (term list * term list) - (* (term list * term list -> term list) - -> ((term list -> term list) -> term list * term list - -> term list * term list) - -> int -> term -> accumulator -> accumulator *) fun add_axiom get app depth t (accum as (xs, axs)) = let val t = t |> unfold_defs_in_term hol_ctxt @@ -977,7 +887,6 @@ else add_axioms_for_term (depth + 1) t' (xs, app (cons t') axs) end end - (* int -> term -> accumulator -> accumulator *) and add_def_axiom depth = add_axiom fst apfst depth and add_nondef_axiom depth = add_axiom snd apsnd depth and add_maybe_def_axiom depth t = @@ -986,7 +895,6 @@ and add_eq_axiom depth t = (if is_constr_pattern_formula thy t then add_def_axiom else add_nondef_axiom) depth t - (* int -> term -> accumulator -> accumulator *) and add_axioms_for_term depth t (accum as (xs, axs)) = case t of t1 $ t2 => accum |> fold (add_axioms_for_term depth) [t1, t2] @@ -1006,8 +914,8 @@ val class = Logic.class_of_const s val of_class = Logic.mk_of_class (TVar (("'a", 0), [class]), class) - val ax1 = try (Refute.specialize_type thy x) of_class - val ax2 = Option.map (Refute.specialize_type thy x o snd) + val ax1 = try (specialize_type thy x) of_class + val ax2 = Option.map (specialize_type thy x o snd) (Refute.get_classdef thy class) in fold (add_maybe_def_axiom depth) (map_filter I [ax1, ax2]) @@ -1058,7 +966,6 @@ | Bound _ => accum | Abs (_, T, t) => accum |> add_axioms_for_term depth t |> add_axioms_for_type depth T - (* int -> typ -> accumulator -> accumulator *) and add_axioms_for_type depth T = case T of Type (@{type_name fun}, Ts) => fold (add_axioms_for_type depth) Ts @@ -1080,7 +987,6 @@ (codatatype_bisim_axioms hol_ctxt T) else I) - (* int -> typ -> sort -> accumulator -> accumulator *) and add_axioms_for_sort depth T S = let val supers = Sign.complete_sort thy S @@ -1091,7 +997,7 @@ map (fn t => case Term.add_tvars t [] of [] => t | [(x, S)] => - Refute.monomorphic_term (Vartab.make [(x, (S, T))]) t + monomorphic_term (Vartab.make [(x, (S, T))]) t | _ => raise TERM ("Nitpick_Preproc.axioms_for_term.\ \add_axioms_for_sort", [t])) class_axioms @@ -1112,15 +1018,12 @@ (** Simplification of constructor/selector terms **) -(* theory -> term -> term *) fun simplify_constrs_and_sels thy t = let - (* term -> int -> term *) fun is_nth_sel_on t' n (Const (s, _) $ t) = (t = t' andalso is_sel_like_and_no_discr s andalso sel_no_from_name s = n) | is_nth_sel_on _ _ _ = false - (* term -> term list -> term *) fun do_term (Const (@{const_name Rep_Frac}, _) $ (Const (@{const_name Abs_Frac}, _) $ t1)) [] = do_term t1 [] | do_term (Const (@{const_name Abs_Frac}, _) @@ -1160,7 +1063,6 @@ (** Quantifier massaging: Distributing quantifiers **) -(* term -> term *) fun distribute_quantifiers t = case t of (t0 as Const (@{const_name All}, T0)) $ Abs (s, T1, t1) => @@ -1199,7 +1101,6 @@ (** Quantifier massaging: Pushing quantifiers inward **) -(* int -> int -> (int -> int) -> term -> term *) fun renumber_bounds j n f t = case t of t1 $ t2 => renumber_bounds j n f t1 $ renumber_bounds j n f t2 @@ -1214,10 +1115,8 @@ paper). *) val quantifier_cluster_threshold = 7 -(* term -> term *) val push_quantifiers_inward = let - (* string -> string list -> typ list -> term -> term *) fun aux quant_s ss Ts t = (case t of Const (s0, _) $ Abs (s1, T1, t1 as _ $ _) => @@ -1237,7 +1136,6 @@ else let val typical_card = 4 - (* ('a -> ''b list) -> 'a list -> ''b list *) fun big_union proj ps = fold (fold (insert (op =)) o proj) ps [] val (ts, connective) = strip_any_connective t @@ -1245,11 +1143,8 @@ map (bounded_card_of_type 65536 typical_card []) Ts val t_costs = map size_of_term ts val num_Ts = length Ts - (* int -> int *) val flip = curry (op -) (num_Ts - 1) val t_boundss = map (map flip o loose_bnos) ts - (* (int list * int) list -> int list - -> (int list * int) list *) fun merge costly_boundss [] = costly_boundss | merge costly_boundss (j :: js) = let @@ -1261,9 +1156,7 @@ val yeas_cost = Integer.sum (map snd yeas) * nth T_costs j in merge ((yeas_bounds, yeas_cost) :: nays) js end - (* (int list * int) list -> int list -> int *) val cost = Integer.sum o map snd oo merge - (* (int list * int) list -> int list -> int list *) fun heuristically_best_permutation _ [] = [] | heuristically_best_permutation costly_boundss js = let @@ -1287,14 +1180,12 @@ (index_seq 0 num_Ts) val ts = map (renumber_bounds 0 num_Ts (nth back_js o flip)) ts - (* (term * int list) list -> term *) fun mk_connection [] = raise ARG ("Nitpick_Preproc.push_quantifiers_inward.aux.\ \mk_connection", "") | mk_connection ts_cum_bounds = ts_cum_bounds |> map fst |> foldr1 (fn (t1, t2) => connective $ t1 $ t2) - (* (term * int list) list -> int list -> term *) fun build ts_cum_bounds [] = ts_cum_bounds |> mk_connection | build ts_cum_bounds (j :: js) = let @@ -1321,9 +1212,6 @@ (** Inference of finite functions **) -(* hol_context -> bool -> (typ option * bool option) list - -> (typ option * bool option) list -> term list * term list - -> term list * term list *) fun finitize_all_types_of_funs (hol_ctxt as {thy, ...}) binarize finitizes monos (nondef_ts, def_ts) = let @@ -1338,18 +1226,15 @@ (** Preprocessor entry point **) -(* hol_context -> (typ option * bool option) list - -> (typ option * bool option) list -> term - -> term list * term list * bool * bool * bool *) +val max_skolem_depth = 4 + fun preprocess_term (hol_ctxt as {thy, stds, binary_ints, destroy_constrs, - boxes, skolemize, uncurry, ...}) - finitizes monos t = + boxes, ...}) finitizes monos t = let - val skolem_depth = if skolemize then 4 else ~1 val (nondef_ts, def_ts, got_all_mono_user_axioms, no_poly_user_axioms) = t |> unfold_defs_in_term hol_ctxt |> close_form - |> skolemize_term_and_more hol_ctxt skolem_depth + |> skolemize_term_and_more hol_ctxt max_skolem_depth |> specialize_consts_in_term hol_ctxt 0 |> axioms_for_term hol_ctxt val binarize = @@ -1361,14 +1246,12 @@ (binary_ints = SOME true orelse exists should_use_binary_ints (nondef_ts @ def_ts)) val box = exists (not_equal (SOME false) o snd) boxes - val uncurry = uncurry andalso box val table = Termtab.empty - |> uncurry ? fold (add_to_uncurry_table thy) (nondef_ts @ def_ts) - (* bool -> term -> term *) + |> box ? fold (add_to_uncurry_table thy) (nondef_ts @ def_ts) fun do_rest def = binarize ? binarize_nat_and_int_in_term - #> uncurry ? uncurry_term table + #> box ? uncurry_term table #> box ? box_fun_and_pair_in_term hol_ctxt def #> destroy_constrs ? (pull_out_universal_constrs hol_ctxt def #> pull_out_existential_constrs hol_ctxt diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_rep.ML --- a/src/HOL/Tools/Nitpick/nitpick_rep.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_rep.ML Tue May 04 20:30:22 2010 +0200 @@ -77,18 +77,15 @@ exception REP of string * rep list -(* polarity -> string *) fun string_for_polarity Pos = "+" | string_for_polarity Neg = "-" | string_for_polarity Neut = "=" -(* rep -> string *) fun atomic_string_for_rep rep = let val s = string_for_rep rep in if String.isPrefix "[" s orelse not (is_substring_of " " s) then s else "(" ^ s ^ ")" end -(* rep -> string *) and string_for_rep Any = "X" | string_for_rep (Formula polar) = "F" ^ string_for_polarity polar | string_for_rep Unit = "U" @@ -101,7 +98,6 @@ atomic_string_for_rep R1 ^ " => " ^ string_for_rep R2 | string_for_rep (Opt R) = atomic_string_for_rep R ^ "?" -(* rep -> bool *) fun is_Func (Func _) = true | is_Func _ = false fun is_Opt (Opt _) = true @@ -110,7 +106,6 @@ | is_opt_rep (Opt _) = true | is_opt_rep _ = false -(* rep -> int *) fun card_of_rep Any = raise REP ("Nitpick_Rep.card_of_rep", [Any]) | card_of_rep (Formula _) = 2 | card_of_rep Unit = 1 @@ -140,7 +135,6 @@ Int.max (min_univ_card_of_rep R1, min_univ_card_of_rep R2) | min_univ_card_of_rep (Opt R) = min_univ_card_of_rep R -(* rep -> bool *) fun is_one_rep Unit = true | is_one_rep (Atom _) = true | is_one_rep (Struct _) = true @@ -149,10 +143,8 @@ fun is_lone_rep (Opt R) = is_one_rep R | is_lone_rep R = is_one_rep R -(* rep -> rep * rep *) fun dest_Func (Func z) = z | dest_Func R = raise REP ("Nitpick_Rep.dest_Func", [R]) -(* int Typtab.table -> typ -> (unit -> int) -> rep -> rep *) fun lazy_range_rep _ _ _ Unit = Unit | lazy_range_rep _ _ _ (Vect (_, R)) = R | lazy_range_rep _ _ _ (Func (_, R2)) = R2 @@ -164,19 +156,15 @@ Atom (ran_card (), offset_of_type ofs T2) | lazy_range_rep _ _ _ R = raise REP ("Nitpick_Rep.lazy_range_rep", [R]) -(* rep -> rep list *) fun binder_reps (Func (R1, R2)) = R1 :: binder_reps R2 | binder_reps _ = [] -(* rep -> rep *) fun body_rep (Func (_, R2)) = body_rep R2 | body_rep R = R -(* rep -> rep *) fun flip_rep_polarity (Formula polar) = Formula (flip_polarity polar) | flip_rep_polarity (Func (R1, R2)) = Func (R1, flip_rep_polarity R2) | flip_rep_polarity R = R -(* int Typtab.table -> rep -> rep *) fun one_rep _ _ Any = raise REP ("Nitpick_Rep.one_rep", [Any]) | one_rep _ _ (Atom x) = Atom x | one_rep _ _ (Struct Rs) = Struct Rs @@ -189,12 +177,10 @@ fun opt_rep ofs (Type (@{type_name fun}, [_, T2])) (Func (R1, R2)) = Func (R1, opt_rep ofs T2 R2) | opt_rep ofs T R = Opt (optable_rep ofs T R) -(* rep -> rep *) fun unopt_rep (Func (R1, R2)) = Func (R1, unopt_rep R2) | unopt_rep (Opt R) = R | unopt_rep R = R -(* polarity -> polarity -> polarity *) fun min_polarity polar1 polar2 = if polar1 = polar2 then polar1 @@ -208,7 +194,6 @@ (* It's important that Func is before Vect, because if the range is Opt we could lose information by converting a Func to a Vect. *) -(* rep -> rep -> rep *) fun min_rep (Opt R1) (Opt R2) = Opt (min_rep R1 R2) | min_rep (Opt R) _ = Opt R | min_rep _ (Opt R) = Opt R @@ -237,7 +222,6 @@ else if k1 > k2 then Vect (k2, R2) else Vect (k1, min_rep R1 R2) | min_rep R1 R2 = raise REP ("Nitpick_Rep.min_rep", [R1, R2]) -(* rep list -> rep list -> rep list *) and min_reps [] _ = [] | min_reps _ [] = [] | min_reps (R1 :: Rs1) (R2 :: Rs2) = @@ -245,7 +229,6 @@ else if min_rep R1 R2 = R1 then R1 :: Rs1 else R2 :: Rs2 -(* int -> rep -> int *) fun card_of_domain_from_rep ran_card R = case R of Unit => 1 @@ -255,14 +238,12 @@ | Opt R => card_of_domain_from_rep ran_card R | _ => raise REP ("Nitpick_Rep.card_of_domain_from_rep", [R]) -(* int Typtab.table -> typ -> rep -> rep *) fun rep_to_binary_rel_rep ofs T R = let val k = exact_root 2 (card_of_domain_from_rep 2 R) val j0 = offset_of_type ofs (fst (HOLogic.dest_prodT (domain_type T))) in Func (Struct [Atom (k, j0), Atom (k, j0)], Formula Neut) end -(* scope -> typ -> rep *) fun best_one_rep_for_type (scope as {card_assigns, ...} : scope) (Type (@{type_name fun}, [T1, T2])) = (case best_one_rep_for_type scope T2 of @@ -283,7 +264,6 @@ (* Datatypes are never represented by Unit, because it would confuse "nfa_transitions_for_ctor". *) -(* scope -> typ -> rep *) fun best_opt_set_rep_for_type scope (Type (@{type_name fun}, [T1, T2])) = Func (best_one_rep_for_type scope T1, best_opt_set_rep_for_type scope T2) | best_opt_set_rep_for_type (scope as {ofs, ...}) T = @@ -308,7 +288,6 @@ | best_non_opt_symmetric_reps_for_fun_type _ T = raise TYPE ("Nitpick_Rep.best_non_opt_symmetric_reps_for_fun_type", [T], []) -(* rep -> (int * int) list *) fun atom_schema_of_rep Any = raise REP ("Nitpick_Rep.atom_schema_of_rep", [Any]) | atom_schema_of_rep (Formula _) = [] | atom_schema_of_rep Unit = [] @@ -318,10 +297,8 @@ | atom_schema_of_rep (Func (R1, R2)) = atom_schema_of_rep R1 @ atom_schema_of_rep R2 | atom_schema_of_rep (Opt R) = atom_schema_of_rep R -(* rep list -> (int * int) list *) and atom_schema_of_reps Rs = maps atom_schema_of_rep Rs -(* typ -> rep -> typ list *) fun type_schema_of_rep _ (Formula _) = [] | type_schema_of_rep _ Unit = [] | type_schema_of_rep T (Atom _) = [T] @@ -333,12 +310,9 @@ type_schema_of_rep T1 R1 @ type_schema_of_rep T2 R2 | type_schema_of_rep T (Opt R) = type_schema_of_rep T R | type_schema_of_rep _ R = raise REP ("Nitpick_Rep.type_schema_of_rep", [R]) -(* typ list -> rep list -> typ list *) and type_schema_of_reps Ts Rs = flat (map2 type_schema_of_rep Ts Rs) -(* rep -> int list list *) val all_combinations_for_rep = all_combinations o atom_schema_of_rep -(* rep list -> int list list *) val all_combinations_for_reps = all_combinations o atom_schema_of_reps end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_scope.ML --- a/src/HOL/Tools/Nitpick/nitpick_scope.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_scope.ML Tue May 04 20:30:22 2010 +0200 @@ -10,32 +10,32 @@ type styp = Nitpick_Util.styp type hol_context = Nitpick_HOL.hol_context - type constr_spec = { - const: styp, - delta: int, - epsilon: int, - exclusive: bool, - explicit_max: int, - total: bool} + type constr_spec = + {const: styp, + delta: int, + epsilon: int, + exclusive: bool, + explicit_max: int, + total: bool} - type dtype_spec = { - typ: typ, - card: int, - co: bool, - standard: bool, - complete: bool * bool, - concrete: bool * bool, - deep: bool, - constrs: constr_spec list} + type dtype_spec = + {typ: typ, + card: int, + co: bool, + standard: bool, + complete: bool * bool, + concrete: bool * bool, + deep: bool, + constrs: constr_spec list} - type scope = { - hol_ctxt: hol_context, - binarize: bool, - card_assigns: (typ * int) list, - bits: int, - bisim_depth: int, - datatypes: dtype_spec list, - ofs: int Typtab.table} + type scope = + {hol_ctxt: hol_context, + binarize: bool, + card_assigns: (typ * int) list, + bits: int, + bisim_depth: int, + datatypes: dtype_spec list, + ofs: int Typtab.table} val datatype_spec : dtype_spec list -> typ -> dtype_spec option val constr_spec : dtype_spec list -> styp -> constr_spec @@ -49,7 +49,7 @@ val scopes_equivalent : scope * scope -> bool val scope_less_eq : scope -> scope -> bool val all_scopes : - hol_context -> bool -> int -> (typ option * int list) list + hol_context -> bool -> (typ option * int list) list -> (styp option * int list) list -> (styp option * int list) list -> int list -> int list -> typ list -> typ list -> typ list -> typ list -> int * scope list @@ -61,43 +61,41 @@ open Nitpick_Util open Nitpick_HOL -type constr_spec = { - const: styp, - delta: int, - epsilon: int, - exclusive: bool, - explicit_max: int, - total: bool} +type constr_spec = + {const: styp, + delta: int, + epsilon: int, + exclusive: bool, + explicit_max: int, + total: bool} -type dtype_spec = { - typ: typ, - card: int, - co: bool, - standard: bool, - complete: bool * bool, - concrete: bool * bool, - deep: bool, - constrs: constr_spec list} +type dtype_spec = + {typ: typ, + card: int, + co: bool, + standard: bool, + complete: bool * bool, + concrete: bool * bool, + deep: bool, + constrs: constr_spec list} -type scope = { - hol_ctxt: hol_context, - binarize: bool, - card_assigns: (typ * int) list, - bits: int, - bisim_depth: int, - datatypes: dtype_spec list, - ofs: int Typtab.table} +type scope = + {hol_ctxt: hol_context, + binarize: bool, + card_assigns: (typ * int) list, + bits: int, + bisim_depth: int, + datatypes: dtype_spec list, + ofs: int Typtab.table} datatype row_kind = Card of typ | Max of styp type row = row_kind * int list type block = row list -(* dtype_spec list -> typ -> dtype_spec option *) fun datatype_spec (dtypes : dtype_spec list) T = List.find (curry (op =) T o #typ) dtypes -(* dtype_spec list -> styp -> constr_spec *) fun constr_spec [] x = raise TERM ("Nitpick_Scope.constr_spec", [Const x]) | constr_spec ({constrs, ...} :: dtypes : dtype_spec list) (x as (s, T)) = case List.find (curry (op =) (s, body_type T) o (apsnd body_type o #const)) @@ -105,7 +103,6 @@ SOME c => c | NONE => constr_spec dtypes x -(* dtype_spec list -> bool -> typ -> bool *) fun is_complete_type dtypes facto (Type (@{type_name fun}, [T1, T2])) = is_concrete_type dtypes facto T1 andalso is_complete_type dtypes facto T2 | is_complete_type dtypes facto (Type (@{type_name fin_fun}, [T1, T2])) = @@ -128,19 +125,15 @@ and is_exact_type dtypes facto = is_complete_type dtypes facto andf is_concrete_type dtypes facto -(* int Typtab.table -> typ -> int *) fun offset_of_type ofs T = case Typtab.lookup ofs T of SOME j0 => j0 | NONE => Typtab.lookup ofs dummyT |> the_default 0 -(* scope -> typ -> int * int *) fun spec_of_type ({card_assigns, ofs, ...} : scope) T = (card_of_type card_assigns T handle TYPE ("Nitpick_HOL.card_of_type", _, _) => ~1, offset_of_type ofs T) -(* (string -> string) -> scope - -> string list * string list * string list * string list * string list *) fun quintuple_for_scope quote ({hol_ctxt = {thy, ctxt, stds, ...}, card_assigns, bits, bisim_depth, datatypes, ...} : scope) = @@ -180,7 +173,6 @@ maxes (), iters (), miscs ())) () end -(* scope -> bool -> Pretty.T list *) fun pretties_for_scope scope verbose = let val (primary_cards, secondary_cards, maxes, iters, bisim_depths) = @@ -194,14 +186,10 @@ else []) in - if null ss then - [] - else - Sledgehammer_Util.serial_commas "and" ss - |> map Pretty.str |> Pretty.breaks + if null ss then [] + else serial_commas "and" ss |> map Pretty.str |> Pretty.breaks end -(* scope -> string *) fun multiline_string_for_scope scope = let val (primary_cards, secondary_cards, maxes, iters, bisim_depths) = @@ -216,47 +204,35 @@ | lines => space_implode "\n" lines end -(* scope * scope -> bool *) fun scopes_equivalent (s1 : scope, s2 : scope) = #datatypes s1 = #datatypes s2 andalso #card_assigns s1 = #card_assigns s2 fun scope_less_eq (s1 : scope) (s2 : scope) = (s1, s2) |> pairself (map snd o #card_assigns) |> op ~~ |> forall (op <=) -(* row -> int *) fun rank_of_row (_, ks) = length ks -(* block -> int *) fun rank_of_block block = fold Integer.max (map rank_of_row block) 1 -(* int -> typ * int list -> typ * int list *) fun project_row column (y, ks) = (y, [nth ks (Int.min (column, length ks - 1))]) -(* int -> block -> block *) fun project_block (column, block) = map (project_row column) block -(* (''a * ''a -> bool) -> (''a option * int list) list -> ''a -> int list *) fun lookup_ints_assign eq assigns key = case triple_lookup eq assigns key of SOME ks => ks | NONE => raise ARG ("Nitpick_Scope.lookup_ints_assign", "") -(* theory -> (typ option * int list) list -> typ -> int list *) fun lookup_type_ints_assign thy assigns T = - map (curry Int.max 1) (lookup_ints_assign (type_match thy) assigns T) + map (Integer.max 1) (lookup_ints_assign (type_match thy) assigns T) handle ARG ("Nitpick_Scope.lookup_ints_assign", _) => raise TYPE ("Nitpick_Scope.lookup_type_ints_assign", [T], []) -(* theory -> (styp option * int list) list -> styp -> int list *) fun lookup_const_ints_assign thy assigns x = lookup_ints_assign (const_match thy) assigns x handle ARG ("Nitpick_Scope.lookup_ints_assign", _) => raise TERM ("Nitpick_Scope.lookup_const_ints_assign", [Const x]) -(* theory -> (styp option * int list) list -> styp -> row option *) fun row_for_constr thy maxes_assigns constr = SOME (Max constr, lookup_const_ints_assign thy maxes_assigns constr) handle TERM ("lookup_const_ints_assign", _) => NONE val max_bits = 31 (* Kodkod limit *) -(* hol_context -> bool -> (typ option * int list) list - -> (styp option * int list) list -> (styp option * int list) list -> int list - -> int list -> typ -> block *) fun block_for_type (hol_ctxt as {thy, ...}) binarize cards_assigns maxes_assigns iters_assigns bitss bisim_depths T = if T = @{typ unsigned_bit} then @@ -279,13 +255,9 @@ [_] => [] | constrs => map_filter (row_for_constr thy maxes_assigns) constrs) -(* hol_context -> bool -> (typ option * int list) list - -> (styp option * int list) list -> (styp option * int list) list -> int list - -> int list -> typ list -> typ list -> block list *) fun blocks_for_types hol_ctxt binarize cards_assigns maxes_assigns iters_assigns bitss bisim_depths mono_Ts nonmono_Ts = let - (* typ -> block *) val block_for = block_for_type hol_ctxt binarize cards_assigns maxes_assigns iters_assigns bitss bisim_depths val mono_block = maps block_for mono_Ts @@ -294,10 +266,8 @@ val sync_threshold = 5 -(* int list -> int list list *) fun all_combinations_ordered_smartly ks = let - (* int list -> int *) fun cost_with_monos [] = 0 | cost_with_monos (k :: ks) = if k < sync_threshold andalso forall (curry (op =) k) ks then @@ -318,16 +288,13 @@ |> sort (int_ord o pairself fst) |> map snd end -(* typ -> bool *) fun is_self_recursive_constr_type T = exists (exists_subtype (curry (op =) (body_type T))) (binder_types T) -(* (styp * int) list -> styp -> int *) fun constr_max maxes x = the_default ~1 (AList.lookup (op =) maxes x) type scope_desc = (typ * int) list * (styp * int) list -(* hol_context -> bool -> scope_desc -> typ * int -> bool *) fun is_surely_inconsistent_card_assign hol_ctxt binarize (card_assigns, max_assigns) (T, k) = case binarized_and_boxed_datatype_constrs hol_ctxt binarize T of @@ -338,22 +305,17 @@ map (Integer.prod o map (bounded_card_of_type k ~1 card_assigns) o binder_types o snd) xs val maxes = map (constr_max max_assigns) xs - (* int -> int -> int *) fun effective_max card ~1 = card | effective_max card max = Int.min (card, max) val max = map2 effective_max dom_cards maxes |> Integer.sum in max < k end -(* hol_context -> bool -> (typ * int) list -> (typ * int) list - -> (styp * int) list -> bool *) fun is_surely_inconsistent_scope_description hol_ctxt binarize seen rest max_assigns = exists (is_surely_inconsistent_card_assign hol_ctxt binarize (seen @ rest, max_assigns)) seen -(* hol_context -> bool -> scope_desc -> (typ * int) list option *) fun repair_card_assigns hol_ctxt binarize (card_assigns, max_assigns) = let - (* (typ * int) list -> (typ * int) list -> (typ * int) list option *) fun aux seen [] = SOME seen | aux _ ((_, 0) :: _) = NONE | aux seen ((T, k) :: rest) = @@ -367,7 +329,6 @@ handle SAME () => aux seen ((T, k - 1) :: rest) in aux [] (rev card_assigns) end -(* theory -> (typ * int) list -> typ * int -> typ * int *) fun repair_iterator_assign thy assigns (T as Type (_, Ts), k) = (T, if T = @{typ bisim_iterator} then let @@ -381,15 +342,12 @@ k) | repair_iterator_assign _ _ assign = assign -(* row -> scope_desc -> scope_desc *) fun add_row_to_scope_descriptor (kind, ks) (card_assigns, max_assigns) = case kind of Card T => ((T, the_single ks) :: card_assigns, max_assigns) | Max x => (card_assigns, (x, the_single ks) :: max_assigns) -(* block -> scope_desc *) fun scope_descriptor_from_block block = fold_rev add_row_to_scope_descriptor block ([], []) -(* hol_context -> bool -> block list -> int list -> scope_desc option *) fun scope_descriptor_from_combination (hol_ctxt as {thy, ...}) binarize blocks columns = let @@ -403,11 +361,8 @@ end handle Option.Option => NONE -(* (typ * int) list -> dtype_spec list -> int Typtab.table *) fun offset_table_for_card_assigns assigns dtypes = let - (* int -> (int * int) list -> (typ * int) list -> int Typtab.table - -> int Typtab.table *) fun aux next _ [] = Typtab.update_new (dummyT, next) | aux next reusable ((T, k) :: assigns) = if k = 1 orelse is_iterator_type T orelse is_integer_type T @@ -423,18 +378,14 @@ #> aux (next + k) ((k, next) :: reusable) assigns in aux 0 [] assigns Typtab.empty end -(* int -> (typ * int) list -> typ -> int *) fun domain_card max card_assigns = Integer.prod o map (bounded_card_of_type max max card_assigns) o binder_types -(* scope_desc -> bool -> int -> (int -> int) -> int -> int -> bool * styp - -> constr_spec list -> constr_spec list *) fun add_constr_spec (card_assigns, max_assigns) co card sum_dom_cards num_self_recs num_non_self_recs (self_rec, x as (_, T)) constrs = let val max = constr_max max_assigns x - (* unit -> int *) fun next_delta () = if null constrs then 0 else #epsilon (hd constrs) val {delta, epsilon, exclusive, total} = if max = 0 then @@ -470,7 +421,6 @@ explicit_max = max, total = total} :: constrs end -(* hol_context -> bool -> typ list -> (typ * int) list -> typ -> bool *) fun has_exact_card hol_ctxt facto finitizable_dataTs card_assigns T = let val card = card_of_type card_assigns T in card = bounded_exact_card_of_type hol_ctxt @@ -478,8 +428,6 @@ card_assigns T end -(* hol_context -> bool -> typ list -> typ list -> scope_desc -> typ * int - -> dtype_spec *) fun datatype_spec_from_scope_descriptor (hol_ctxt as {thy, stds, ...}) binarize deep_dataTs finitizable_dataTs (desc as (card_assigns, _)) (T, card) = let @@ -490,7 +438,6 @@ val self_recs = map (is_self_recursive_constr_type o snd) xs val (num_self_recs, num_non_self_recs) = List.partition I self_recs |> pairself length - (* bool -> bool *) fun is_complete facto = has_exact_card hol_ctxt facto finitizable_dataTs card_assigns T fun is_concrete facto = @@ -500,7 +447,6 @@ card_assigns) val complete = pair_from_fun is_complete val concrete = pair_from_fun is_concrete - (* int -> int *) fun sum_dom_cards max = map (domain_card max card_assigns o snd) xs |> Integer.sum val constrs = @@ -512,10 +458,8 @@ concrete = concrete, deep = deep, constrs = constrs} end -(* hol_context -> bool -> int -> typ list -> typ list -> scope_desc -> scope *) -fun scope_from_descriptor (hol_ctxt as {thy, stds, ...}) binarize sym_break - deep_dataTs finitizable_dataTs - (desc as (card_assigns, _)) = +fun scope_from_descriptor (hol_ctxt as {thy, stds, ...}) binarize deep_dataTs + finitizable_dataTs (desc as (card_assigns, _)) = let val datatypes = map (datatype_spec_from_scope_descriptor hol_ctxt binarize deep_dataTs @@ -529,12 +473,9 @@ in {hol_ctxt = hol_ctxt, binarize = binarize, card_assigns = card_assigns, datatypes = datatypes, bits = bits, bisim_depth = bisim_depth, - ofs = if sym_break <= 0 then Typtab.empty - else offset_table_for_card_assigns card_assigns datatypes} + ofs = offset_table_for_card_assigns card_assigns datatypes} end -(* theory -> typ list -> (typ option * int list) list - -> (typ option * int list) list *) fun repair_cards_assigns_wrt_boxing_etc _ _ [] = [] | repair_cards_assigns_wrt_boxing_etc thy Ts ((SOME T, ks) :: cards_assigns) = (if is_fun_type T orelse is_pair_type T then @@ -548,12 +489,9 @@ val max_scopes = 4096 val distinct_threshold = 512 -(* hol_context -> bool -> int -> (typ option * int list) list - -> (styp option * int list) list -> (styp option * int list) list -> int list - -> typ list -> typ list -> typ list ->typ list -> int * scope list *) -fun all_scopes (hol_ctxt as {thy, ...}) binarize sym_break cards_assigns - maxes_assigns iters_assigns bitss bisim_depths mono_Ts nonmono_Ts - deep_dataTs finitizable_dataTs = +fun all_scopes (hol_ctxt as {thy, ...}) binarize cards_assigns maxes_assigns + iters_assigns bitss bisim_depths mono_Ts nonmono_Ts deep_dataTs + finitizable_dataTs = let val cards_assigns = repair_cards_assigns_wrt_boxing_etc thy mono_Ts cards_assigns @@ -569,8 +507,8 @@ in (length all - length head, descs |> length descs <= distinct_threshold ? distinct (op =) - |> map (scope_from_descriptor hol_ctxt binarize sym_break - deep_dataTs finitizable_dataTs)) + |> map (scope_from_descriptor hol_ctxt binarize deep_dataTs + finitizable_dataTs)) end end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_tests.ML --- a/src/HOL/Tools/Nitpick/nitpick_tests.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_tests.ML Tue May 04 20:30:22 2010 +0200 @@ -292,7 +292,6 @@ *) ] -(* Proof.context -> string * nut -> Kodkod.problem *) fun problem_for_nut ctxt (name, u) = let val debug = false @@ -319,11 +318,10 @@ formula = formula} end -(* unit -> unit *) fun run_all_tests () = case Kodkod.solve_any_problem false NONE 0 1 (map (problem_for_nut @{context}) tests) of Kodkod.Normal ([], _, _) => () - | _ => error "Tests failed" + | _ => error "Tests failed." end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Nitpick/nitpick_util.ML --- a/src/HOL/Tools/Nitpick/nitpick_util.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_util.ML Tue May 04 20:30:22 2010 +0200 @@ -48,11 +48,17 @@ val is_substring_of : string -> string -> bool val plural_s : int -> string val plural_s_for_list : 'a list -> string + val serial_commas : string -> string list -> string list + val timestamp : unit -> string + val parse_bool_option : bool -> string -> string -> bool option + val parse_time_option : string -> string -> Time.time option val flip_polarity : polarity -> polarity val prop_T : typ val bool_T : typ val nat_T : typ val int_T : typ + val monomorphic_term : Type.tyenv -> term -> term + val specialize_type : theory -> (string * typ) -> term -> term val nat_subscript : int -> string val time_limit : Time.time option -> ('a -> 'b) -> 'a -> 'b val DETERM_TIMEOUT : Time.time option -> tactic -> tactic @@ -61,6 +67,8 @@ val pstrs : string -> Pretty.T list val unyxml : string -> string val maybe_quote : string -> string + val hashw : word * word -> word + val hashw_string : string * word -> word end; structure Nitpick_Util : NITPICK_UTIL = @@ -79,25 +87,18 @@ val nitpick_prefix = "Nitpick." -(* ('a * 'b * 'c -> 'd) -> 'a -> 'b -> 'c -> 'd *) fun curry3 f = fn x => fn y => fn z => f (x, y, z) -(* ('a -> 'b) -> ('a -> 'c) -> 'a -> 'b * 'c *) fun pairf f g x = (f x, g x) -(* (bool -> 'a) -> 'a * 'a *) fun pair_from_fun f = (f false, f true) -(* 'a * 'a -> bool -> 'a *) fun fun_from_pair (f, t) b = if b then t else f -(* bool -> int *) fun int_from_bool b = if b then 1 else 0 -(* int -> int -> int *) fun nat_minus i j = if i > j then i - j else 0 val max_exponent = 16384 -(* int -> int -> int *) fun reasonable_power _ 0 = 1 | reasonable_power a 1 = a | reasonable_power 0 _ = 0 @@ -114,7 +115,6 @@ c * c * reasonable_power a (b mod 2) end -(* int -> int -> int *) fun exact_log m n = let val r = Math.ln (Real.fromInt n) / Math.ln (Real.fromInt m) |> Real.round @@ -126,7 +126,6 @@ commas (map signed_string_of_int [m, n])) end -(* int -> int -> int *) fun exact_root m n = let val r = Math.pow (Real.fromInt n, 1.0 / (Real.fromInt m)) |> Real.round in if reasonable_power r m = n then @@ -136,22 +135,16 @@ commas (map signed_string_of_int [m, n])) end -(* ('a -> 'a -> 'a) -> 'a list -> 'a *) fun fold1 f = foldl1 (uncurry f) -(* int -> 'a list -> 'a list *) fun replicate_list 0 _ = [] | replicate_list n xs = xs @ replicate_list (n - 1) xs -(* int list -> int list *) fun offset_list ns = rev (tl (fold (fn x => fn xs => (x + hd xs) :: xs) ns [0])) -(* int -> int -> int list *) fun index_seq j0 n = if j0 < 0 then j0 downto j0 - n + 1 else j0 upto j0 + n - 1 -(* int list -> 'a list -> 'a list *) fun filter_indices js xs = let - (* int -> int list -> 'a list -> 'a list *) fun aux _ [] _ = [] | aux i (j :: js) (x :: xs) = if i = j then x :: aux (i + 1) js xs else aux (i + 1) (j :: js) xs @@ -160,7 +153,6 @@ in aux 0 js xs end fun filter_out_indices js xs = let - (* int -> int list -> 'a list -> 'a list *) fun aux _ [] xs = xs | aux i (j :: js) (x :: xs) = if i = j then aux (i + 1) js xs else x :: aux (i + 1) (j :: js) xs @@ -168,76 +160,66 @@ "indices unordered or out of range") in aux 0 js xs end -(* 'a list -> 'a list list -> 'a list list *) fun cartesian_product [] _ = [] | cartesian_product (x :: xs) yss = map (cons x) yss @ cartesian_product xs yss -(* 'a list list -> 'a list list *) fun n_fold_cartesian_product xss = fold_rev cartesian_product xss [[]] -(* ''a list -> (''a * ''a) list *) fun all_distinct_unordered_pairs_of [] = [] | all_distinct_unordered_pairs_of (x :: xs) = map (pair x) xs @ all_distinct_unordered_pairs_of xs -(* (int * int) list -> int -> int list *) val nth_combination = let - (* (int * int) list -> int -> int list * int *) fun aux [] n = ([], n) | aux ((k, j0) :: xs) n = let val (js, n) = aux xs n in ((n mod k) + j0 :: js, n div k) end in fst oo aux end -(* (int * int) list -> int list list *) val all_combinations = n_fold_cartesian_product o map (uncurry index_seq o swap) -(* 'a list -> 'a list list *) fun all_permutations [] = [[]] | all_permutations xs = maps (fn j => map (cons (nth xs j)) (all_permutations (nth_drop j xs))) (index_seq 0 (length xs)) -(* int -> 'a list -> 'a list list *) fun batch_list _ [] = [] | batch_list k xs = if length xs <= k then [xs] else List.take (xs, k) :: batch_list k (List.drop (xs, k)) -(* int list -> 'a list -> 'a list list *) fun chunk_list_unevenly _ [] = [] | chunk_list_unevenly [] ys = map single ys | chunk_list_unevenly (k :: ks) ys = let val (ys1, ys2) = chop k ys in ys1 :: chunk_list_unevenly ks ys2 end -(* ('a -> 'b -> 'c -> 'd) -> 'a list -> 'b list -> 'c list -> 'd list *) fun map3 _ [] [] [] = [] | map3 f (x :: xs) (y :: ys) (z :: zs) = f x y z :: map3 f xs ys zs | map3 _ _ _ _ = raise UnequalLengths -(* ('a * 'a -> bool) -> ('a option * 'b) list -> 'a -> 'b option *) fun double_lookup eq ps key = case AList.lookup (fn (SOME x, SOME y) => eq (x, y) | _ => false) ps (SOME key) of SOME z => SOME z | NONE => ps |> find_first (is_none o fst) |> Option.map snd -(* (''a * ''a -> bool) -> (''a option * 'b) list -> ''a -> 'b option *) fun triple_lookup _ [(NONE, z)] _ = SOME z | triple_lookup eq ps key = case AList.lookup (op =) ps (SOME key) of SOME z => SOME z | NONE => double_lookup eq ps key -(* string -> string -> bool *) fun is_substring_of needle stack = not (Substring.isEmpty (snd (Substring.position needle (Substring.full stack)))) -(* int -> string *) -fun plural_s n = if n = 1 then "" else "s" -(* 'a list -> string *) +val plural_s = Sledgehammer_Util.plural_s fun plural_s_for_list xs = plural_s (length xs) -(* polarity -> polarity *) +val serial_commas = Sledgehammer_Util.serial_commas + +val timestamp = Sledgehammer_Util.timestamp +val parse_bool_option = Sledgehammer_Util.parse_bool_option +val parse_time_option = Sledgehammer_Util.parse_time_option + fun flip_polarity Pos = Neg | flip_polarity Neg = Pos | flip_polarity Neut = Neut @@ -247,47 +229,38 @@ val nat_T = @{typ nat} val int_T = @{typ int} -(* string -> string *) +val monomorphic_term = Sledgehammer_Util.monomorphic_term +val specialize_type = Sledgehammer_Util.specialize_type + val subscript = implode o map (prefix "\<^isub>") o explode -(* int -> string *) fun nat_subscript n = (* cheap trick to ensure proper alphanumeric ordering for one- and two-digit numbers *) if n <= 9 then "\<^bsub>" ^ signed_string_of_int n ^ "\<^esub>" else subscript (string_of_int n) -(* Time.time option -> ('a -> 'b) -> 'a -> 'b *) fun time_limit NONE = I | time_limit (SOME delay) = TimeLimit.timeLimit delay -(* Time.time option -> tactic -> tactic *) fun DETERM_TIMEOUT delay tac st = Seq.of_list (the_list (time_limit delay (fn () => SINGLE tac st) ())) -(* ('a -> 'b) -> 'a -> 'b *) fun setmp_show_all_types f = setmp_CRITICAL show_all_types (! show_types orelse ! show_sorts orelse ! show_all_types) f val indent_size = 2 -(* string -> Pretty.T list *) val pstrs = Pretty.breaks o map Pretty.str o space_explode " " -(* XML.tree -> string *) -fun plain_string_from_xml_tree t = - Buffer.empty |> XML.add_content t |> Buffer.content -(* string -> string *) -val unyxml = plain_string_from_xml_tree o YXML.parse +val unyxml = Sledgehammer_Util.unyxml +val maybe_quote = Sledgehammer_Util.maybe_quote -(* string -> bool *) -val is_long_identifier = forall Syntax.is_identifier o space_explode "." -(* string -> string *) -fun maybe_quote y = - let val s = unyxml y in - y |> ((not (is_long_identifier (perhaps (try (unprefix "'")) s)) andalso - not (is_long_identifier (perhaps (try (unprefix "?")) s))) orelse - OuterKeyword.is_keyword s) ? quote - end +(* This hash function is recommended in Compilers: Principles, Techniques, and + Tools, by Aho, Sethi, and Ullman. The "hashpjw" function, which they + particularly recommend, triggers a bug in versions of Poly/ML up to 4.2.0. *) +fun hashw (u, w) = Word.+ (u, Word.* (0w65599, w)) +fun hashw_char (c, w) = hashw (Word.fromInt (Char.ord c), w) +fun hashw_string (s:string, w) = CharVector.foldl hashw_char w s end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_aux.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_aux.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_aux.ML Tue May 04 20:30:22 2010 +0200 @@ -778,7 +778,7 @@ let val (_, args) = strip_comb atom in rewrite_args args end - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val (((T_insts, t_insts), [intro']), ctxt1) = Variable.import false [intro] ctxt val intro_t = prop_of intro' val concl = Logic.strip_imp_concl intro_t @@ -860,7 +860,8 @@ fun peephole_optimisation thy intro = let - val process = MetaSimplifier.rewrite_rule (Predicate_Compile_Simps.get (ProofContext.init thy)) + val process = + MetaSimplifier.rewrite_rule (Predicate_Compile_Simps.get (ProofContext.init_global thy)) fun process_False intro_t = if member (op =) (Logic.strip_imp_prems intro_t) @{prop "False"} then NONE else SOME intro_t fun process_True intro_t = diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML Tue May 04 20:30:22 2010 +0200 @@ -529,16 +529,19 @@ fun instantiate i n {context = ctxt, params = p, prems = prems, asms = a, concl = cl, schematics = s} = let + fun term_pair_of (ix, (ty,t)) = (Var (ix,ty), t) + fun inst_of_matches tts = fold (Pattern.match thy) tts (Vartab.empty, Vartab.empty) + |> snd |> Vartab.dest |> map (pairself (cterm_of thy) o term_pair_of) val (cases, (eqs, prems)) = apsnd (chop (nargs - nparams)) (chop n prems) val case_th = MetaSimplifier.simplify true - (@{thm Predicate.eq_is_eq} :: map meta_eq_of eqs) - (nth cases (i - 1)) + (@{thm Predicate.eq_is_eq} :: map meta_eq_of eqs) (nth cases (i - 1)) val prems' = maps (dest_conjunct_prem o MetaSimplifier.simplify true tuple_rew_rules) prems val pats = map (swap o HOLogic.dest_eq o HOLogic.dest_Trueprop) (take nargs (prems_of case_th)) - val (_, tenv) = fold (Pattern.match thy) pats (Vartab.empty, Vartab.empty) - fun term_pair_of (ix, (ty,t)) = (Var (ix,ty), t) - val inst = map (pairself (cterm_of thy) o term_pair_of) (Vartab.dest tenv) - val thesis = Thm.instantiate ([], inst) case_th OF (replicate nargs @{thm refl}) OF prems' + val case_th' = Thm.instantiate ([], inst_of_matches pats) case_th + OF (replicate nargs @{thm refl}) + val thesis = + Thm.instantiate ([], inst_of_matches (prems_of case_th' ~~ map prop_of prems')) case_th' + OF prems' in (rtac thesis 1) end @@ -577,7 +580,7 @@ fun replace_eqs (Const ("Trueprop", _) $ (Const ("op =", T) $ lhs $ rhs)) = HOLogic.mk_Trueprop (Const (@{const_name Predicate.eq}, T) $ lhs $ rhs) | replace_eqs t = t - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val ((_, [elimrule]), ctxt') = Variable.import false [elimrule] ctxt val prems = Thm.prems_of elimrule val nargs = length (snd (strip_comb (HOLogic.dest_Trueprop (hd prems)))) @@ -605,7 +608,7 @@ val no_compilation = ([], ([], [])) fun fetch_pred_data thy name = - case try (Inductive.the_inductive (ProofContext.init thy)) name of + case try (Inductive.the_inductive (ProofContext.init_global thy)) name of SOME (info as (_, result)) => let fun is_intro_of intro = @@ -618,7 +621,7 @@ val pre_elim = nth (#elims result) index val pred = nth (#preds result) index val nparams = length (Inductive.params_of (#raw_induct result)) - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val elim_t = mk_casesrule ctxt pred intros val elim = prove_casesrule ctxt (pred, (pre_elim, nparams)) elim_t @@ -633,7 +636,7 @@ in PredData.map (Graph.map_node name (map_pred_data add)) end fun is_inductive_predicate thy name = - is_some (try (Inductive.the_inductive (ProofContext.init thy)) name) + is_some (try (Inductive.the_inductive (ProofContext.init_global thy)) name) fun depending_preds_of thy (key, value) = let @@ -685,7 +688,7 @@ val pred = Const (constname, T) val pre_elim = (Drule.export_without_context o Skip_Proof.make_thm thy) - (mk_casesrule (ProofContext.init thy) pred pre_intros) + (mk_casesrule (ProofContext.init_global thy) pred pre_intros) in register_predicate (constname, pre_intros, pre_elim) thy end fun defined_function_of compilation pred = @@ -1157,7 +1160,7 @@ fun is_possible_output thy vs t = forall (fn t => is_eqT (fastype_of t) andalso forall (member (op =) vs) (term_vs t)) - (non_invertible_subterms (ProofContext.init thy) t) + (non_invertible_subterms (ProofContext.init_global thy) t) andalso (forall (is_eqT o snd) (inter (fn ((f', _), f) => f = f') vs (Term.add_frees t []))) @@ -1364,7 +1367,7 @@ val modes = map (fn (s, ms) => (s, map (fn ((p, m), r) => m) ms)) modes' in (modes, modes) end val (in_ts, out_ts) = split_mode mode ts - val in_vs = maps (vars_of_destructable_term (ProofContext.init thy)) in_ts + val in_vs = maps (vars_of_destructable_term (ProofContext.init_global thy)) in_ts val out_vs = terms_vs out_ts fun known_vs_after p vs = (case p of Prem t => union (op =) vs (term_vs t) @@ -1936,7 +1939,7 @@ fun compile_pred options compilation_modifiers thy all_vs param_vs s T (pol, mode) moded_cls = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val compilation_modifiers = if pol then compilation_modifiers else negative_comp_modifiers_of compilation_modifiers val additional_arguments = Comp_Mod.additional_arguments compilation_modifiers @@ -2069,11 +2072,11 @@ val simprules = [defthm, @{thm eval_pred}, @{thm "split_beta"}, @{thm "fst_conv"}, @{thm "snd_conv"}, @{thm pair_collapse}] val unfolddef_tac = Simplifier.asm_full_simp_tac (HOL_basic_ss addsimps simprules) 1 - val introthm = Goal.prove (ProofContext.init thy) + val introthm = Goal.prove (ProofContext.init_global thy) (argnames @ hoarg_names' @ ["y"]) [] introtrm (fn _ => unfolddef_tac) val P = HOLogic.mk_Trueprop (Free ("P", HOLogic.boolT)); val elimtrm = Logic.list_implies ([funpropE, Logic.mk_implies (predpropE, P)], P) - val elimthm = Goal.prove (ProofContext.init thy) + val elimthm = Goal.prove (ProofContext.init_global thy) (argnames @ ["y", "P"]) [] elimtrm (fn _ => unfolddef_tac) val opt_neg_introthm = if is_all_input mode then @@ -2087,7 +2090,7 @@ Simplifier.asm_full_simp_tac (HOL_basic_ss addsimps (@{thm if_False} :: @{thm Predicate.not_pred_eq} :: simprules)) 1 THEN rtac @{thm Predicate.singleI} 1 - in SOME (Goal.prove (ProofContext.init thy) (argnames @ hoarg_names') [] + in SOME (Goal.prove (ProofContext.init_global thy) (argnames @ hoarg_names') [] neg_introtrm (fn _ => tac)) end else NONE @@ -2601,7 +2604,7 @@ fun prove_pred options thy clauses preds pred (pol, mode) (moded_clauses, compiled_term) = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val clauses = case AList.lookup (op =) clauses pred of SOME rs => rs | NONE => [] in Goal.prove ctxt (Term.add_free_names compiled_term []) [] compiled_term @@ -2664,7 +2667,7 @@ val preds = map (fn c => Const (c, Sign.the_const_type thy c)) prednames val (preds, intrs) = unify_consts thy preds intrs val ([preds, intrs], _) = fold_burrow (Variable.import_terms false) [preds, intrs] - (ProofContext.init thy) + (ProofContext.init_global thy) val preds = map dest_Const preds val all_vs = terms_vs intrs val all_modes = @@ -2741,7 +2744,7 @@ val nparams = nparams_of thy predname val elim' = (Drule.export_without_context o Skip_Proof.make_thm thy) - (mk_casesrule (ProofContext.init thy) nparams intros) + (mk_casesrule (ProofContext.init_global thy) nparams intros) in if not (Thm.equiv_thm (elim, elim')) then error "Introduction and elimination rules do not match!" @@ -2754,7 +2757,7 @@ fun add_code_equations thy preds result_thmss = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy fun add_code_equation (predname, T) (pred, result_thms) = let val full_mode = fold_rev (curry Fun) (map (K Input) (binder_types T)) Bool @@ -3044,7 +3047,7 @@ fun after_qed thms goal_ctxt = let val global_thms = ProofContext.export goal_ctxt - (ProofContext.init (ProofContext.theory_of goal_ctxt)) (map the_single thms) + (ProofContext.init_global (ProofContext.theory_of goal_ctxt)) (map the_single thms) in goal_ctxt |> Local_Theory.theory (fold set_elim global_thms #> ((case compilation options of @@ -3059,7 +3062,7 @@ ) options [const])) end in - Proof.theorem_i NONE after_qed (map (single o (rpair [])) cases_rules) lthy'' + Proof.theorem NONE after_qed (map (single o (rpair [])) cases_rules) lthy'' end; val code_pred = generic_code_pred (K I); @@ -3161,7 +3164,7 @@ | DSeq => dseq_comp_modifiers | Pos_Random_DSeq => pos_random_dseq_comp_modifiers | New_Pos_Random_DSeq => new_pos_random_dseq_comp_modifiers - val t_pred = compile_expr comp_modifiers (ProofContext.init thy) + val t_pred = compile_expr comp_modifiers (ProofContext.init_global thy) (body, deriv) additional_arguments; val T_pred = dest_predT compfuns (fastype_of t_pred) val arrange = split_lambda (HOLogic.mk_tuple outargs) output_tuple @@ -3229,14 +3232,14 @@ (Code_Eval.eval NONE ("Predicate_Compile_Core.new_random_dseq_stats_eval_ref", new_random_dseq_stats_eval_ref) (fn proc => fn g => fn nrandom => fn size => fn s => fn depth => g nrandom size s depth - |> Lazy_Sequence.map (apfst proc)) + |> Lazy_Sequence.mapa (apfst proc)) thy t' [] nrandom size seed depth)))) else rpair NONE (fst (Lazy_Sequence.yieldn k (Code_Eval.eval NONE ("Predicate_Compile_Core.new_random_dseq_eval_ref", new_random_dseq_eval_ref) (fn proc => fn g => fn nrandom => fn size => fn s => fn depth => g nrandom size s depth - |> Lazy_Sequence.map proc) + |> Lazy_Sequence.mapa proc) thy t' [] nrandom size seed depth))) end | _ => diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_data.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_data.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_data.ML Tue May 04 20:30:22 2010 +0200 @@ -129,7 +129,7 @@ fun split_all_pairs thy th = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val ((_, [th']), ctxt') = Variable.import true [th] ctxt val t = prop_of th' val frees = Term.add_frees t [] @@ -153,7 +153,7 @@ fun inline_equations thy th = let - val inline_defs = Predicate_Compile_Inline_Defs.get (ProofContext.init thy) + val inline_defs = Predicate_Compile_Inline_Defs.get (ProofContext.init_global thy) val th' = (Simplifier.full_simplify (HOL_basic_ss addsimps inline_defs)) th (*val _ = print_step options ("Inlining " ^ (Syntax.string_of_term_global thy (prop_of th)) @@ -188,7 +188,7 @@ tracing ("getting specification of " ^ Syntax.string_of_term_global thy t ^ " with type " ^ Syntax.string_of_typ_global thy (fastype_of t)) else () - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy fun filtering th = if is_equationlike th andalso defining_const_of_equation (normalize_equation thy th) = fst (dest_Const t) then diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_fun.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_fun.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_fun.ML Tue May 04 20:30:22 2010 +0200 @@ -195,7 +195,7 @@ SOME raw_split_thm => let val (f, args) = strip_comb t - val split_thm = prepare_split_thm (ProofContext.init thy) raw_split_thm + val split_thm = prepare_split_thm (ProofContext.init_global thy) raw_split_thm val (assms, concl) = Logic.strip_horn (prop_of split_thm) val (P, [split_t]) = strip_comb (HOLogic.dest_Trueprop concl) val subst = Pattern.match thy (split_t, t) (Vartab.empty, Vartab.empty) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_pred.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_pred.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_pred.ML Tue May 04 20:30:22 2010 +0200 @@ -64,7 +64,7 @@ fun instantiated_case_rewrites thy Tcon = let val rew_ths = case_rewrites thy Tcon - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy fun instantiate th = let val f = (fst (strip_comb (fst (HOLogic.dest_eq (HOLogic.dest_Trueprop (prop_of th)))))) @@ -128,9 +128,10 @@ | SOME raw_split_thm => let val (f, args) = strip_comb atom - val split_thm = prepare_split_thm (ProofContext.init thy) raw_split_thm + val split_thm = prepare_split_thm (ProofContext.init_global thy) raw_split_thm (* TODO: contextify things - this line is to unvarify the split_thm *) - (*val ((_, [isplit_thm]), _) = Variable.import true [split_thm] (ProofContext.init thy)*) + (*val ((_, [isplit_thm]), _) = + Variable.import true [split_thm] (ProofContext.init_global thy)*) val (assms, concl) = Logic.strip_horn (prop_of split_thm) val (P, [split_t]) = strip_comb (HOLogic.dest_Trueprop concl) val Tcons = datatype_names_of_case_name thy (fst (dest_Const f)) @@ -188,7 +189,7 @@ fun flatten_intros constname intros thy = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val ((_, intros), ctxt') = Variable.import true intros ctxt val (intros', (local_defs, thy')) = (fold_map o fold_map_atoms) (flatten constname) (map prop_of intros) ([], thy) @@ -206,7 +207,7 @@ fun introrulify thy ths = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val ((_, ths'), ctxt') = Variable.import true ths ctxt fun introrulify' th = let @@ -277,7 +278,7 @@ SOME specss => (specss, thy) | NONE =>*) let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val intros = if forall is_pred_equation specs then map (map_term thy (maps_premises (split_conjs thy))) (introrulify thy (map rewrite specs)) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_quickcheck.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_quickcheck.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_quickcheck.ML Tue May 04 20:30:22 2010 +0200 @@ -200,7 +200,7 @@ (map HOLogic.mk_Trueprop (prems @ [HOLogic.mk_not concl]), HOLogic.mk_Trueprop (list_comb (Const (full_constname, constT), map Free vs'))) val tac = fn _ => Skip_Proof.cheat_tac thy1 - val intro = Goal.prove (ProofContext.init thy1) (map fst vs') [] t tac + val intro = Goal.prove (ProofContext.init_global thy1) (map fst vs') [] t tac val thy2 = Context.theory_map (Predicate_Compile_Alternative_Defs.add_thm intro) thy1 val (thy3, preproc_time) = cpu_time "predicate preprocessing" (fn () => Predicate_Compile.preprocess options const thy2) @@ -267,7 +267,7 @@ Code_Eval.eval (SOME target) ("Predicate_Compile_Quickcheck.new_test_ref", new_test_ref) (fn proc => fn g => fn nrandom => fn size => fn s => fn depth => - g nrandom size s depth |> (Lazy_Sequence.map o map) proc) + g nrandom size s depth |> (Lazy_Sequence.mapa o map) proc) thy4 qc_term [] in fn size => fn nrandom => fn depth => Option.map fst (Lazy_Sequence.yield diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Predicate_Compile/predicate_compile_specialisation.ML --- a/src/HOL/Tools/Predicate_Compile/predicate_compile_specialisation.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_specialisation.ML Tue May 04 20:30:22 2010 +0200 @@ -65,7 +65,7 @@ fun specialise_intros black_list (pred, intros) pats thy = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val maxidx = fold (Term.maxidx_term o prop_of) intros ~1 val pats = map (Logic.incr_indexes ([], maxidx + 1)) pats val (((pred, intros), pats), ctxt') = import (pred, intros) pats ctxt diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Qelim/cooper.ML --- a/src/HOL/Tools/Qelim/cooper.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Qelim/cooper.ML Tue May 04 20:30:22 2010 +0200 @@ -536,7 +536,7 @@ structure Coopereif = struct -open GeneratedCooper; +open Generated_Cooper; fun cooper s = raise Cooper.COOPER ("Cooper oracle failed", ERROR s); fun i_of_term vs t = case t diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Qelim/generated_cooper.ML --- a/src/HOL/Tools/Qelim/generated_cooper.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Qelim/generated_cooper.ML Tue May 04 20:30:22 2010 +0200 @@ -1,49 +1,263 @@ -(* Title: HOL/Tools/Qelim/generated_cooper.ML +(* Generated from Cooper.thy; DO NOT EDIT! *) -This file is generated from HOL/Decision_Procs/Cooper.thy. DO NOT EDIT. -*) - -structure GeneratedCooper = -struct +structure Generated_Cooper : sig + type 'a eq + val eq : 'a eq -> 'a -> 'a -> bool + val eqa : 'a eq -> 'a -> 'a -> bool + val leta : 'a -> ('a -> 'b) -> 'b + val suc : IntInf.int -> IntInf.int + datatype num = C of IntInf.int | Bound of IntInf.int | + Cn of IntInf.int * IntInf.int * num | Neg of num | Add of num * num | + Sub of num * num | Mul of IntInf.int * num + datatype fm = T | F | Lt of num | Le of num | Gt of num | Ge of num | + Eq of num | NEq of num | Dvd of IntInf.int * num | NDvd of IntInf.int * num + | Not of fm | And of fm * fm | Or of fm * fm | Imp of fm * fm | + Iff of fm * fm | E of fm | A of fm | Closed of IntInf.int | + NClosed of IntInf.int + val map : ('a -> 'b) -> 'a list -> 'b list + val append : 'a list -> 'a list -> 'a list + val disjuncts : fm -> fm list + val fm_case : + 'a -> 'a -> (num -> 'a) -> + (num -> 'a) -> + (num -> 'a) -> + (num -> 'a) -> + (num -> 'a) -> + (num -> 'a) -> + (IntInf.int -> num -> 'a) -> + (IntInf.int -> num -> 'a) -> + (fm -> 'a) -> + (fm -> fm -> 'a) -> + (fm -> fm -> 'a) -> + (fm -> fm -> 'a) -> +(fm -> fm -> 'a) -> + (fm -> 'a) -> + (fm -> 'a) -> (IntInf.int -> 'a) -> (IntInf.int -> 'a) -> fm -> 'a + val eq_num : num -> num -> bool + val eq_fm : fm -> fm -> bool + val djf : ('a -> fm) -> 'a -> fm -> fm + val foldr : ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b + val evaldjf : ('a -> fm) -> 'a list -> fm + val dj : (fm -> fm) -> fm -> fm + val disj : fm -> fm -> fm + val minus_nat : IntInf.int -> IntInf.int -> IntInf.int + val decrnum : num -> num + val decr : fm -> fm + val concat_map : ('a -> 'b list) -> 'a list -> 'b list + val numsubst0 : num -> num -> num + val subst0 : num -> fm -> fm + val minusinf : fm -> fm + val eq_int : IntInf.int eq + val zero_int : IntInf.int + type 'a zero + val zero : 'a zero -> 'a + val zero_inta : IntInf.int zero + type 'a times + val times : 'a times -> 'a -> 'a -> 'a + type 'a no_zero_divisors + val times_no_zero_divisors : 'a no_zero_divisors -> 'a times + val zero_no_zero_divisors : 'a no_zero_divisors -> 'a zero + val times_int : IntInf.int times + val no_zero_divisors_int : IntInf.int no_zero_divisors + type 'a one + val one : 'a one -> 'a + type 'a zero_neq_one + val one_zero_neq_one : 'a zero_neq_one -> 'a one + val zero_zero_neq_one : 'a zero_neq_one -> 'a zero + type 'a semigroup_mult + val times_semigroup_mult : 'a semigroup_mult -> 'a times + type 'a plus + val plus : 'a plus -> 'a -> 'a -> 'a + type 'a semigroup_add + val plus_semigroup_add : 'a semigroup_add -> 'a plus + type 'a ab_semigroup_add + val semigroup_add_ab_semigroup_add : 'a ab_semigroup_add -> 'a semigroup_add + type 'a semiring + val ab_semigroup_add_semiring : 'a semiring -> 'a ab_semigroup_add + val semigroup_mult_semiring : 'a semiring -> 'a semigroup_mult + type 'a mult_zero + val times_mult_zero : 'a mult_zero -> 'a times + val zero_mult_zero : 'a mult_zero -> 'a zero + type 'a monoid_add + val semigroup_add_monoid_add : 'a monoid_add -> 'a semigroup_add + val zero_monoid_add : 'a monoid_add -> 'a zero + type 'a comm_monoid_add + val ab_semigroup_add_comm_monoid_add : + 'a comm_monoid_add -> 'a ab_semigroup_add + val monoid_add_comm_monoid_add : 'a comm_monoid_add -> 'a monoid_add + type 'a semiring_0 + val comm_monoid_add_semiring_0 : 'a semiring_0 -> 'a comm_monoid_add + val mult_zero_semiring_0 : 'a semiring_0 -> 'a mult_zero + val semiring_semiring_0 : 'a semiring_0 -> 'a semiring + type 'a power + val one_power : 'a power -> 'a one + val times_power : 'a power -> 'a times + type 'a monoid_mult + val semigroup_mult_monoid_mult : 'a monoid_mult -> 'a semigroup_mult + val power_monoid_mult : 'a monoid_mult -> 'a power + type 'a semiring_1 + val monoid_mult_semiring_1 : 'a semiring_1 -> 'a monoid_mult + val semiring_0_semiring_1 : 'a semiring_1 -> 'a semiring_0 + val zero_neq_one_semiring_1 : 'a semiring_1 -> 'a zero_neq_one + type 'a cancel_semigroup_add + val semigroup_add_cancel_semigroup_add : + 'a cancel_semigroup_add -> 'a semigroup_add + type 'a cancel_ab_semigroup_add + val ab_semigroup_add_cancel_ab_semigroup_add : + 'a cancel_ab_semigroup_add -> 'a ab_semigroup_add + val cancel_semigroup_add_cancel_ab_semigroup_add : + 'a cancel_ab_semigroup_add -> 'a cancel_semigroup_add + type 'a cancel_comm_monoid_add + val cancel_ab_semigroup_add_cancel_comm_monoid_add : + 'a cancel_comm_monoid_add -> 'a cancel_ab_semigroup_add + val comm_monoid_add_cancel_comm_monoid_add : + 'a cancel_comm_monoid_add -> 'a comm_monoid_add + type 'a semiring_0_cancel + val cancel_comm_monoid_add_semiring_0_cancel : + 'a semiring_0_cancel -> 'a cancel_comm_monoid_add + val semiring_0_semiring_0_cancel : 'a semiring_0_cancel -> 'a semiring_0 + type 'a semiring_1_cancel + val semiring_0_cancel_semiring_1_cancel : + 'a semiring_1_cancel -> 'a semiring_0_cancel + val semiring_1_semiring_1_cancel : 'a semiring_1_cancel -> 'a semiring_1 + type 'a dvd + val times_dvd : 'a dvd -> 'a times + type 'a ab_semigroup_mult + val semigroup_mult_ab_semigroup_mult : + 'a ab_semigroup_mult -> 'a semigroup_mult + type 'a comm_semiring + val ab_semigroup_mult_comm_semiring : 'a comm_semiring -> 'a ab_semigroup_mult + val semiring_comm_semiring : 'a comm_semiring -> 'a semiring + type 'a comm_semiring_0 + val comm_semiring_comm_semiring_0 : 'a comm_semiring_0 -> 'a comm_semiring + val semiring_0_comm_semiring_0 : 'a comm_semiring_0 -> 'a semiring_0 + type 'a comm_monoid_mult + val ab_semigroup_mult_comm_monoid_mult : + 'a comm_monoid_mult -> 'a ab_semigroup_mult + val monoid_mult_comm_monoid_mult : 'a comm_monoid_mult -> 'a monoid_mult + type 'a comm_semiring_1 + val comm_monoid_mult_comm_semiring_1 : + 'a comm_semiring_1 -> 'a comm_monoid_mult + val comm_semiring_0_comm_semiring_1 : 'a comm_semiring_1 -> 'a comm_semiring_0 + val dvd_comm_semiring_1 : 'a comm_semiring_1 -> 'a dvd + val semiring_1_comm_semiring_1 : 'a comm_semiring_1 -> 'a semiring_1 + type 'a comm_semiring_0_cancel + val comm_semiring_0_comm_semiring_0_cancel : + 'a comm_semiring_0_cancel -> 'a comm_semiring_0 + val semiring_0_cancel_comm_semiring_0_cancel : + 'a comm_semiring_0_cancel -> 'a semiring_0_cancel + type 'a comm_semiring_1_cancel + val comm_semiring_0_cancel_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a comm_semiring_0_cancel + val comm_semiring_1_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a comm_semiring_1 + val semiring_1_cancel_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a semiring_1_cancel + type 'a diva + val dvd_div : 'a diva -> 'a dvd + val diva : 'a diva -> 'a -> 'a -> 'a + val moda : 'a diva -> 'a -> 'a -> 'a + type 'a semiring_div + val div_semiring_div : 'a semiring_div -> 'a diva + val comm_semiring_1_cancel_semiring_div : + 'a semiring_div -> 'a comm_semiring_1_cancel + val no_zero_divisors_semiring_div : 'a semiring_div -> 'a no_zero_divisors + val one_int : IntInf.int + val one_inta : IntInf.int one + val zero_neq_one_int : IntInf.int zero_neq_one + val semigroup_mult_int : IntInf.int semigroup_mult + val plus_int : IntInf.int plus + val semigroup_add_int : IntInf.int semigroup_add + val ab_semigroup_add_int : IntInf.int ab_semigroup_add + val semiring_int : IntInf.int semiring + val mult_zero_int : IntInf.int mult_zero + val monoid_add_int : IntInf.int monoid_add + val comm_monoid_add_int : IntInf.int comm_monoid_add + val semiring_0_int : IntInf.int semiring_0 + val power_int : IntInf.int power + val monoid_mult_int : IntInf.int monoid_mult + val semiring_1_int : IntInf.int semiring_1 + val cancel_semigroup_add_int : IntInf.int cancel_semigroup_add + val cancel_ab_semigroup_add_int : IntInf.int cancel_ab_semigroup_add + val cancel_comm_monoid_add_int : IntInf.int cancel_comm_monoid_add + val semiring_0_cancel_int : IntInf.int semiring_0_cancel + val semiring_1_cancel_int : IntInf.int semiring_1_cancel + val dvd_int : IntInf.int dvd + val ab_semigroup_mult_int : IntInf.int ab_semigroup_mult + val comm_semiring_int : IntInf.int comm_semiring + val comm_semiring_0_int : IntInf.int comm_semiring_0 + val comm_monoid_mult_int : IntInf.int comm_monoid_mult + val comm_semiring_1_int : IntInf.int comm_semiring_1 + val comm_semiring_0_cancel_int : IntInf.int comm_semiring_0_cancel + val comm_semiring_1_cancel_int : IntInf.int comm_semiring_1_cancel + val abs_int : IntInf.int -> IntInf.int + val split : ('a -> 'b -> 'c) -> 'a * 'b -> 'c + val sgn_int : IntInf.int -> IntInf.int + val apsnd : ('a -> 'b) -> 'c * 'a -> 'c * 'b + val divmod_int : IntInf.int -> IntInf.int -> IntInf.int * IntInf.int + val snd : 'a * 'b -> 'b + val mod_int : IntInf.int -> IntInf.int -> IntInf.int + val fst : 'a * 'b -> 'a + val div_int : IntInf.int -> IntInf.int -> IntInf.int + val div_inta : IntInf.int diva + val semiring_div_int : IntInf.int semiring_div + val dvd : 'a semiring_div * 'a eq -> 'a -> 'a -> bool + val num_case : + (IntInf.int -> 'a) -> + (IntInf.int -> 'a) -> + (IntInf.int -> IntInf.int -> num -> 'a) -> + (num -> 'a) -> + (num -> num -> 'a) -> + (num -> num -> 'a) -> (IntInf.int -> num -> 'a) -> num -> 'a + val nummul : IntInf.int -> num -> num + val numneg : num -> num + val numadd : num * num -> num + val numsub : num -> num -> num + val simpnum : num -> num + val nota : fm -> fm + val iffa : fm -> fm -> fm + val impa : fm -> fm -> fm + val conj : fm -> fm -> fm + val simpfm : fm -> fm + val iupt : IntInf.int -> IntInf.int -> IntInf.int list + val mirror : fm -> fm + val size_list : 'a list -> IntInf.int + val alpha : fm -> num list + val beta : fm -> num list + val eq_numa : num eq + val member : 'a eq -> 'a -> 'a list -> bool + val remdups : 'a eq -> 'a list -> 'a list + val gcd_int : IntInf.int -> IntInf.int -> IntInf.int + val lcm_int : IntInf.int -> IntInf.int -> IntInf.int + val delta : fm -> IntInf.int + val a_beta : fm -> IntInf.int -> fm + val zeta : fm -> IntInf.int + val zsplit0 : num -> IntInf.int * num + val zlfm : fm -> fm + val unita : fm -> fm * (num list * IntInf.int) + val cooper : fm -> fm + val prep : fm -> fm + val qelim : fm -> (fm -> fm) -> fm + val pa : fm -> fm +end = struct type 'a eq = {eq : 'a -> 'a -> bool}; -fun eq (A_:'a eq) = #eq A_; - -val eq_nat = {eq = (fn a => fn b => ((a : IntInf.int) = b))} : IntInf.int eq; - -fun eqop A_ a b = eq A_ a b; - -fun divmod n m = (if eqop eq_nat m 0 then (0, n) else IntInf.divMod (n, m)); - -fun snd (a, b) = b; +val eq = #eq : 'a eq -> 'a -> 'a -> bool; -fun mod_nat m n = snd (divmod m n); - -fun gcd m n = (if eqop eq_nat n 0 then m else gcd n (mod_nat m n)); - -fun fst (a, b) = a; - -fun div_nat m n = fst (divmod m n); - -fun lcm m n = div_nat (IntInf.* (m, n)) (gcd m n); +fun eqa A_ a b = eq A_ a b; fun leta s f = f s; -fun suc n = IntInf.+ (n, 1); - -datatype num = Mul of IntInf.int * num | Sub of num * num | Add of num * num | - Neg of num | Cn of IntInf.int * IntInf.int * num | Bound of IntInf.int | - C of IntInf.int; +fun suc n = IntInf.+ (n, (1 : IntInf.int)); -datatype fm = NClosed of IntInf.int | Closed of IntInf.int | A of fm | E of fm | - Iff of fm * fm | Imp of fm * fm | Or of fm * fm | And of fm * fm | Not of fm | - NDvd of IntInf.int * num | Dvd of IntInf.int * num | NEq of num | Eq of num | - Ge of num | Gt of num | Le of num | Lt of num | F | T; +datatype num = C of IntInf.int | Bound of IntInf.int | + Cn of IntInf.int * IntInf.int * num | Neg of num | Add of num * num | + Sub of num * num | Mul of IntInf.int * num; -fun abs_int i = (if IntInf.< (i, (0 : IntInf.int)) then IntInf.~ i else i); - -fun zlcm i j = - (lcm (IntInf.max (0, (abs_int i))) (IntInf.max (0, (abs_int j)))); +datatype fm = T | F | Lt of num | Le of num | Gt of num | Ge of num | Eq of num + | NEq of num | Dvd of IntInf.int * num | NDvd of IntInf.int * num | Not of fm + | And of fm * fm | Or of fm * fm | Imp of fm * fm | Iff of fm * fm | E of fm | + A of fm | Closed of IntInf.int | NClosed of IntInf.int; fun map f [] = [] | map f (x :: xs) = f x :: map f xs; @@ -110,449 +324,441 @@ | fm_case f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 T = f1; -fun eq_num (Mul (c, d)) (Sub (a, b)) = false - | eq_num (Mul (c, d)) (Add (a, b)) = false - | eq_num (Sub (c, d)) (Add (a, b)) = false - | eq_num (Mul (b, c)) (Neg a) = false - | eq_num (Sub (b, c)) (Neg a) = false - | eq_num (Add (b, c)) (Neg a) = false - | eq_num (Mul (d, e)) (Cn (a, b, c)) = false - | eq_num (Sub (d, e)) (Cn (a, b, c)) = false - | eq_num (Add (d, e)) (Cn (a, b, c)) = false - | eq_num (Neg d) (Cn (a, b, c)) = false - | eq_num (Mul (b, c)) (Bound a) = false - | eq_num (Sub (b, c)) (Bound a) = false - | eq_num (Add (b, c)) (Bound a) = false - | eq_num (Neg b) (Bound a) = false - | eq_num (Cn (b, c, d)) (Bound a) = false - | eq_num (Mul (b, c)) (C a) = false - | eq_num (Sub (b, c)) (C a) = false - | eq_num (Add (b, c)) (C a) = false - | eq_num (Neg b) (C a) = false - | eq_num (Cn (b, c, d)) (C a) = false - | eq_num (Bound b) (C a) = false - | eq_num (Sub (a, b)) (Mul (c, d)) = false - | eq_num (Add (a, b)) (Mul (c, d)) = false - | eq_num (Add (a, b)) (Sub (c, d)) = false - | eq_num (Neg a) (Mul (b, c)) = false - | eq_num (Neg a) (Sub (b, c)) = false - | eq_num (Neg a) (Add (b, c)) = false - | eq_num (Cn (a, b, c)) (Mul (d, e)) = false - | eq_num (Cn (a, b, c)) (Sub (d, e)) = false - | eq_num (Cn (a, b, c)) (Add (d, e)) = false - | eq_num (Cn (a, b, c)) (Neg d) = false - | eq_num (Bound a) (Mul (b, c)) = false - | eq_num (Bound a) (Sub (b, c)) = false - | eq_num (Bound a) (Add (b, c)) = false - | eq_num (Bound a) (Neg b) = false - | eq_num (Bound a) (Cn (b, c, d)) = false - | eq_num (C a) (Mul (b, c)) = false - | eq_num (C a) (Sub (b, c)) = false - | eq_num (C a) (Add (b, c)) = false - | eq_num (C a) (Neg b) = false - | eq_num (C a) (Cn (b, c, d)) = false - | eq_num (C a) (Bound b) = false - | eq_num (Mul (inta, num)) (Mul (int', num')) = - ((inta : IntInf.int) = int') andalso eq_num num num' - | eq_num (Sub (num1, num2)) (Sub (num1', num2')) = - eq_num num1 num1' andalso eq_num num2 num2' - | eq_num (Add (num1, num2)) (Add (num1', num2')) = - eq_num num1 num1' andalso eq_num num2 num2' - | eq_num (Neg num) (Neg num') = eq_num num num' - | eq_num (Cn (nat, inta, num)) (Cn (nat', int', num')) = - ((nat : IntInf.int) = nat') andalso - (((inta : IntInf.int) = int') andalso eq_num num num') - | eq_num (Bound nat) (Bound nat') = ((nat : IntInf.int) = nat') - | eq_num (C inta) (C int') = ((inta : IntInf.int) = int'); +fun eq_num (C intaa) (C inta) = ((intaa : IntInf.int) = inta) + | eq_num (Bound nata) (Bound nat) = ((nata : IntInf.int) = nat) + | eq_num (Cn (nata, intaa, numa)) (Cn (nat, inta, num)) = + ((nata : IntInf.int) = nat) andalso + (((intaa : IntInf.int) = inta) andalso eq_num numa num) + | eq_num (Neg numa) (Neg num) = eq_num numa num + | eq_num (Add (num1a, num2a)) (Add (num1, num2)) = + eq_num num1a num1 andalso eq_num num2a num2 + | eq_num (Sub (num1a, num2a)) (Sub (num1, num2)) = + eq_num num1a num1 andalso eq_num num2a num2 + | eq_num (Mul (intaa, numa)) (Mul (inta, num)) = + ((intaa : IntInf.int) = inta) andalso eq_num numa num + | eq_num (C inta) (Bound nat) = false + | eq_num (Bound nat) (C inta) = false + | eq_num (C intaa) (Cn (nat, inta, num)) = false + | eq_num (Cn (nat, intaa, num)) (C inta) = false + | eq_num (C inta) (Neg num) = false + | eq_num (Neg num) (C inta) = false + | eq_num (C inta) (Add (num1, num2)) = false + | eq_num (Add (num1, num2)) (C inta) = false + | eq_num (C inta) (Sub (num1, num2)) = false + | eq_num (Sub (num1, num2)) (C inta) = false + | eq_num (C intaa) (Mul (inta, num)) = false + | eq_num (Mul (intaa, num)) (C inta) = false + | eq_num (Bound nata) (Cn (nat, inta, num)) = false + | eq_num (Cn (nata, inta, num)) (Bound nat) = false + | eq_num (Bound nat) (Neg num) = false + | eq_num (Neg num) (Bound nat) = false + | eq_num (Bound nat) (Add (num1, num2)) = false + | eq_num (Add (num1, num2)) (Bound nat) = false + | eq_num (Bound nat) (Sub (num1, num2)) = false + | eq_num (Sub (num1, num2)) (Bound nat) = false + | eq_num (Bound nat) (Mul (inta, num)) = false + | eq_num (Mul (inta, num)) (Bound nat) = false + | eq_num (Cn (nat, inta, numa)) (Neg num) = false + | eq_num (Neg numa) (Cn (nat, inta, num)) = false + | eq_num (Cn (nat, inta, num)) (Add (num1, num2)) = false + | eq_num (Add (num1, num2)) (Cn (nat, inta, num)) = false + | eq_num (Cn (nat, inta, num)) (Sub (num1, num2)) = false + | eq_num (Sub (num1, num2)) (Cn (nat, inta, num)) = false + | eq_num (Cn (nat, intaa, numa)) (Mul (inta, num)) = false + | eq_num (Mul (intaa, numa)) (Cn (nat, inta, num)) = false + | eq_num (Neg num) (Add (num1, num2)) = false + | eq_num (Add (num1, num2)) (Neg num) = false + | eq_num (Neg num) (Sub (num1, num2)) = false + | eq_num (Sub (num1, num2)) (Neg num) = false + | eq_num (Neg numa) (Mul (inta, num)) = false + | eq_num (Mul (inta, numa)) (Neg num) = false + | eq_num (Add (num1a, num2a)) (Sub (num1, num2)) = false + | eq_num (Sub (num1a, num2a)) (Add (num1, num2)) = false + | eq_num (Add (num1, num2)) (Mul (inta, num)) = false + | eq_num (Mul (inta, num)) (Add (num1, num2)) = false + | eq_num (Sub (num1, num2)) (Mul (inta, num)) = false + | eq_num (Mul (inta, num)) (Sub (num1, num2)) = false; -fun eq_fm (NClosed b) (Closed a) = false - | eq_fm (NClosed b) (A a) = false - | eq_fm (Closed b) (A a) = false - | eq_fm (NClosed b) (E a) = false - | eq_fm (Closed b) (E a) = false - | eq_fm (A b) (E a) = false - | eq_fm (NClosed c) (Iff (a, b)) = false - | eq_fm (Closed c) (Iff (a, b)) = false - | eq_fm (A c) (Iff (a, b)) = false - | eq_fm (E c) (Iff (a, b)) = false - | eq_fm (NClosed c) (Imp (a, b)) = false - | eq_fm (Closed c) (Imp (a, b)) = false - | eq_fm (A c) (Imp (a, b)) = false - | eq_fm (E c) (Imp (a, b)) = false - | eq_fm (Iff (c, d)) (Imp (a, b)) = false - | eq_fm (NClosed c) (Or (a, b)) = false - | eq_fm (Closed c) (Or (a, b)) = false - | eq_fm (A c) (Or (a, b)) = false - | eq_fm (E c) (Or (a, b)) = false - | eq_fm (Iff (c, d)) (Or (a, b)) = false - | eq_fm (Imp (c, d)) (Or (a, b)) = false - | eq_fm (NClosed c) (And (a, b)) = false - | eq_fm (Closed c) (And (a, b)) = false - | eq_fm (A c) (And (a, b)) = false - | eq_fm (E c) (And (a, b)) = false - | eq_fm (Iff (c, d)) (And (a, b)) = false - | eq_fm (Imp (c, d)) (And (a, b)) = false - | eq_fm (Or (c, d)) (And (a, b)) = false - | eq_fm (NClosed b) (Not a) = false - | eq_fm (Closed b) (Not a) = false - | eq_fm (A b) (Not a) = false - | eq_fm (E b) (Not a) = false - | eq_fm (Iff (b, c)) (Not a) = false - | eq_fm (Imp (b, c)) (Not a) = false - | eq_fm (Or (b, c)) (Not a) = false - | eq_fm (And (b, c)) (Not a) = false - | eq_fm (NClosed c) (NDvd (a, b)) = false - | eq_fm (Closed c) (NDvd (a, b)) = false - | eq_fm (A c) (NDvd (a, b)) = false - | eq_fm (E c) (NDvd (a, b)) = false - | eq_fm (Iff (c, d)) (NDvd (a, b)) = false - | eq_fm (Imp (c, d)) (NDvd (a, b)) = false - | eq_fm (Or (c, d)) (NDvd (a, b)) = false - | eq_fm (And (c, d)) (NDvd (a, b)) = false - | eq_fm (Not c) (NDvd (a, b)) = false - | eq_fm (NClosed c) (Dvd (a, b)) = false - | eq_fm (Closed c) (Dvd (a, b)) = false - | eq_fm (A c) (Dvd (a, b)) = false - | eq_fm (E c) (Dvd (a, b)) = false - | eq_fm (Iff (c, d)) (Dvd (a, b)) = false - | eq_fm (Imp (c, d)) (Dvd (a, b)) = false - | eq_fm (Or (c, d)) (Dvd (a, b)) = false - | eq_fm (And (c, d)) (Dvd (a, b)) = false - | eq_fm (Not c) (Dvd (a, b)) = false - | eq_fm (NDvd (c, d)) (Dvd (a, b)) = false - | eq_fm (NClosed b) (NEq a) = false - | eq_fm (Closed b) (NEq a) = false - | eq_fm (A b) (NEq a) = false - | eq_fm (E b) (NEq a) = false - | eq_fm (Iff (b, c)) (NEq a) = false - | eq_fm (Imp (b, c)) (NEq a) = false - | eq_fm (Or (b, c)) (NEq a) = false - | eq_fm (And (b, c)) (NEq a) = false - | eq_fm (Not b) (NEq a) = false - | eq_fm (NDvd (b, c)) (NEq a) = false - | eq_fm (Dvd (b, c)) (NEq a) = false - | eq_fm (NClosed b) (Eq a) = false - | eq_fm (Closed b) (Eq a) = false - | eq_fm (A b) (Eq a) = false - | eq_fm (E b) (Eq a) = false - | eq_fm (Iff (b, c)) (Eq a) = false - | eq_fm (Imp (b, c)) (Eq a) = false - | eq_fm (Or (b, c)) (Eq a) = false - | eq_fm (And (b, c)) (Eq a) = false - | eq_fm (Not b) (Eq a) = false - | eq_fm (NDvd (b, c)) (Eq a) = false - | eq_fm (Dvd (b, c)) (Eq a) = false - | eq_fm (NEq b) (Eq a) = false - | eq_fm (NClosed b) (Ge a) = false - | eq_fm (Closed b) (Ge a) = false - | eq_fm (A b) (Ge a) = false - | eq_fm (E b) (Ge a) = false - | eq_fm (Iff (b, c)) (Ge a) = false - | eq_fm (Imp (b, c)) (Ge a) = false - | eq_fm (Or (b, c)) (Ge a) = false - | eq_fm (And (b, c)) (Ge a) = false - | eq_fm (Not b) (Ge a) = false - | eq_fm (NDvd (b, c)) (Ge a) = false - | eq_fm (Dvd (b, c)) (Ge a) = false - | eq_fm (NEq b) (Ge a) = false - | eq_fm (Eq b) (Ge a) = false - | eq_fm (NClosed b) (Gt a) = false - | eq_fm (Closed b) (Gt a) = false - | eq_fm (A b) (Gt a) = false - | eq_fm (E b) (Gt a) = false - | eq_fm (Iff (b, c)) (Gt a) = false - | eq_fm (Imp (b, c)) (Gt a) = false - | eq_fm (Or (b, c)) (Gt a) = false - | eq_fm (And (b, c)) (Gt a) = false - | eq_fm (Not b) (Gt a) = false - | eq_fm (NDvd (b, c)) (Gt a) = false - | eq_fm (Dvd (b, c)) (Gt a) = false - | eq_fm (NEq b) (Gt a) = false - | eq_fm (Eq b) (Gt a) = false - | eq_fm (Ge b) (Gt a) = false - | eq_fm (NClosed b) (Le a) = false - | eq_fm (Closed b) (Le a) = false - | eq_fm (A b) (Le a) = false - | eq_fm (E b) (Le a) = false - | eq_fm (Iff (b, c)) (Le a) = false - | eq_fm (Imp (b, c)) (Le a) = false - | eq_fm (Or (b, c)) (Le a) = false - | eq_fm (And (b, c)) (Le a) = false - | eq_fm (Not b) (Le a) = false - | eq_fm (NDvd (b, c)) (Le a) = false - | eq_fm (Dvd (b, c)) (Le a) = false - | eq_fm (NEq b) (Le a) = false - | eq_fm (Eq b) (Le a) = false - | eq_fm (Ge b) (Le a) = false - | eq_fm (Gt b) (Le a) = false - | eq_fm (NClosed b) (Lt a) = false - | eq_fm (Closed b) (Lt a) = false - | eq_fm (A b) (Lt a) = false - | eq_fm (E b) (Lt a) = false - | eq_fm (Iff (b, c)) (Lt a) = false - | eq_fm (Imp (b, c)) (Lt a) = false - | eq_fm (Or (b, c)) (Lt a) = false - | eq_fm (And (b, c)) (Lt a) = false - | eq_fm (Not b) (Lt a) = false - | eq_fm (NDvd (b, c)) (Lt a) = false - | eq_fm (Dvd (b, c)) (Lt a) = false - | eq_fm (NEq b) (Lt a) = false - | eq_fm (Eq b) (Lt a) = false - | eq_fm (Ge b) (Lt a) = false - | eq_fm (Gt b) (Lt a) = false - | eq_fm (Le b) (Lt a) = false - | eq_fm (NClosed a) F = false - | eq_fm (Closed a) F = false - | eq_fm (A a) F = false - | eq_fm (E a) F = false - | eq_fm (Iff (a, b)) F = false - | eq_fm (Imp (a, b)) F = false - | eq_fm (Or (a, b)) F = false - | eq_fm (And (a, b)) F = false - | eq_fm (Not a) F = false - | eq_fm (NDvd (a, b)) F = false - | eq_fm (Dvd (a, b)) F = false - | eq_fm (NEq a) F = false - | eq_fm (Eq a) F = false - | eq_fm (Ge a) F = false - | eq_fm (Gt a) F = false - | eq_fm (Le a) F = false - | eq_fm (Lt a) F = false - | eq_fm (NClosed a) T = false - | eq_fm (Closed a) T = false - | eq_fm (A a) T = false - | eq_fm (E a) T = false - | eq_fm (Iff (a, b)) T = false - | eq_fm (Imp (a, b)) T = false - | eq_fm (Or (a, b)) T = false - | eq_fm (And (a, b)) T = false - | eq_fm (Not a) T = false - | eq_fm (NDvd (a, b)) T = false - | eq_fm (Dvd (a, b)) T = false - | eq_fm (NEq a) T = false - | eq_fm (Eq a) T = false - | eq_fm (Ge a) T = false - | eq_fm (Gt a) T = false - | eq_fm (Le a) T = false - | eq_fm (Lt a) T = false +fun eq_fm T T = true + | eq_fm F F = true + | eq_fm (Lt numa) (Lt num) = eq_num numa num + | eq_fm (Le numa) (Le num) = eq_num numa num + | eq_fm (Gt numa) (Gt num) = eq_num numa num + | eq_fm (Ge numa) (Ge num) = eq_num numa num + | eq_fm (Eq numa) (Eq num) = eq_num numa num + | eq_fm (NEq numa) (NEq num) = eq_num numa num + | eq_fm (Dvd (intaa, numa)) (Dvd (inta, num)) = + ((intaa : IntInf.int) = inta) andalso eq_num numa num + | eq_fm (NDvd (intaa, numa)) (NDvd (inta, num)) = + ((intaa : IntInf.int) = inta) andalso eq_num numa num + | eq_fm (Not fma) (Not fm) = eq_fm fma fm + | eq_fm (And (fm1a, fm2a)) (And (fm1, fm2)) = + eq_fm fm1a fm1 andalso eq_fm fm2a fm2 + | eq_fm (Or (fm1a, fm2a)) (Or (fm1, fm2)) = + eq_fm fm1a fm1 andalso eq_fm fm2a fm2 + | eq_fm (Imp (fm1a, fm2a)) (Imp (fm1, fm2)) = + eq_fm fm1a fm1 andalso eq_fm fm2a fm2 + | eq_fm (Iff (fm1a, fm2a)) (Iff (fm1, fm2)) = + eq_fm fm1a fm1 andalso eq_fm fm2a fm2 + | eq_fm (E fma) (E fm) = eq_fm fma fm + | eq_fm (A fma) (A fm) = eq_fm fma fm + | eq_fm (Closed nata) (Closed nat) = ((nata : IntInf.int) = nat) + | eq_fm (NClosed nata) (NClosed nat) = ((nata : IntInf.int) = nat) + | eq_fm T F = false | eq_fm F T = false - | eq_fm (Closed a) (NClosed b) = false - | eq_fm (A a) (NClosed b) = false - | eq_fm (A a) (Closed b) = false - | eq_fm (E a) (NClosed b) = false - | eq_fm (E a) (Closed b) = false - | eq_fm (E a) (A b) = false - | eq_fm (Iff (a, b)) (NClosed c) = false - | eq_fm (Iff (a, b)) (Closed c) = false - | eq_fm (Iff (a, b)) (A c) = false - | eq_fm (Iff (a, b)) (E c) = false - | eq_fm (Imp (a, b)) (NClosed c) = false - | eq_fm (Imp (a, b)) (Closed c) = false - | eq_fm (Imp (a, b)) (A c) = false - | eq_fm (Imp (a, b)) (E c) = false - | eq_fm (Imp (a, b)) (Iff (c, d)) = false - | eq_fm (Or (a, b)) (NClosed c) = false - | eq_fm (Or (a, b)) (Closed c) = false - | eq_fm (Or (a, b)) (A c) = false - | eq_fm (Or (a, b)) (E c) = false - | eq_fm (Or (a, b)) (Iff (c, d)) = false - | eq_fm (Or (a, b)) (Imp (c, d)) = false - | eq_fm (And (a, b)) (NClosed c) = false - | eq_fm (And (a, b)) (Closed c) = false - | eq_fm (And (a, b)) (A c) = false - | eq_fm (And (a, b)) (E c) = false - | eq_fm (And (a, b)) (Iff (c, d)) = false - | eq_fm (And (a, b)) (Imp (c, d)) = false - | eq_fm (And (a, b)) (Or (c, d)) = false - | eq_fm (Not a) (NClosed b) = false - | eq_fm (Not a) (Closed b) = false - | eq_fm (Not a) (A b) = false - | eq_fm (Not a) (E b) = false - | eq_fm (Not a) (Iff (b, c)) = false - | eq_fm (Not a) (Imp (b, c)) = false - | eq_fm (Not a) (Or (b, c)) = false - | eq_fm (Not a) (And (b, c)) = false - | eq_fm (NDvd (a, b)) (NClosed c) = false - | eq_fm (NDvd (a, b)) (Closed c) = false - | eq_fm (NDvd (a, b)) (A c) = false - | eq_fm (NDvd (a, b)) (E c) = false - | eq_fm (NDvd (a, b)) (Iff (c, d)) = false - | eq_fm (NDvd (a, b)) (Imp (c, d)) = false - | eq_fm (NDvd (a, b)) (Or (c, d)) = false - | eq_fm (NDvd (a, b)) (And (c, d)) = false - | eq_fm (NDvd (a, b)) (Not c) = false - | eq_fm (Dvd (a, b)) (NClosed c) = false - | eq_fm (Dvd (a, b)) (Closed c) = false - | eq_fm (Dvd (a, b)) (A c) = false - | eq_fm (Dvd (a, b)) (E c) = false - | eq_fm (Dvd (a, b)) (Iff (c, d)) = false - | eq_fm (Dvd (a, b)) (Imp (c, d)) = false - | eq_fm (Dvd (a, b)) (Or (c, d)) = false - | eq_fm (Dvd (a, b)) (And (c, d)) = false - | eq_fm (Dvd (a, b)) (Not c) = false - | eq_fm (Dvd (a, b)) (NDvd (c, d)) = false - | eq_fm (NEq a) (NClosed b) = false - | eq_fm (NEq a) (Closed b) = false - | eq_fm (NEq a) (A b) = false - | eq_fm (NEq a) (E b) = false - | eq_fm (NEq a) (Iff (b, c)) = false - | eq_fm (NEq a) (Imp (b, c)) = false - | eq_fm (NEq a) (Or (b, c)) = false - | eq_fm (NEq a) (And (b, c)) = false - | eq_fm (NEq a) (Not b) = false - | eq_fm (NEq a) (NDvd (b, c)) = false - | eq_fm (NEq a) (Dvd (b, c)) = false - | eq_fm (Eq a) (NClosed b) = false - | eq_fm (Eq a) (Closed b) = false - | eq_fm (Eq a) (A b) = false - | eq_fm (Eq a) (E b) = false - | eq_fm (Eq a) (Iff (b, c)) = false - | eq_fm (Eq a) (Imp (b, c)) = false - | eq_fm (Eq a) (Or (b, c)) = false - | eq_fm (Eq a) (And (b, c)) = false - | eq_fm (Eq a) (Not b) = false - | eq_fm (Eq a) (NDvd (b, c)) = false - | eq_fm (Eq a) (Dvd (b, c)) = false - | eq_fm (Eq a) (NEq b) = false - | eq_fm (Ge a) (NClosed b) = false - | eq_fm (Ge a) (Closed b) = false - | eq_fm (Ge a) (A b) = false - | eq_fm (Ge a) (E b) = false - | eq_fm (Ge a) (Iff (b, c)) = false - | eq_fm (Ge a) (Imp (b, c)) = false - | eq_fm (Ge a) (Or (b, c)) = false - | eq_fm (Ge a) (And (b, c)) = false - | eq_fm (Ge a) (Not b) = false - | eq_fm (Ge a) (NDvd (b, c)) = false - | eq_fm (Ge a) (Dvd (b, c)) = false - | eq_fm (Ge a) (NEq b) = false - | eq_fm (Ge a) (Eq b) = false - | eq_fm (Gt a) (NClosed b) = false - | eq_fm (Gt a) (Closed b) = false - | eq_fm (Gt a) (A b) = false - | eq_fm (Gt a) (E b) = false - | eq_fm (Gt a) (Iff (b, c)) = false - | eq_fm (Gt a) (Imp (b, c)) = false - | eq_fm (Gt a) (Or (b, c)) = false - | eq_fm (Gt a) (And (b, c)) = false - | eq_fm (Gt a) (Not b) = false - | eq_fm (Gt a) (NDvd (b, c)) = false - | eq_fm (Gt a) (Dvd (b, c)) = false - | eq_fm (Gt a) (NEq b) = false - | eq_fm (Gt a) (Eq b) = false - | eq_fm (Gt a) (Ge b) = false - | eq_fm (Le a) (NClosed b) = false - | eq_fm (Le a) (Closed b) = false - | eq_fm (Le a) (A b) = false - | eq_fm (Le a) (E b) = false - | eq_fm (Le a) (Iff (b, c)) = false - | eq_fm (Le a) (Imp (b, c)) = false - | eq_fm (Le a) (Or (b, c)) = false - | eq_fm (Le a) (And (b, c)) = false - | eq_fm (Le a) (Not b) = false - | eq_fm (Le a) (NDvd (b, c)) = false - | eq_fm (Le a) (Dvd (b, c)) = false - | eq_fm (Le a) (NEq b) = false - | eq_fm (Le a) (Eq b) = false - | eq_fm (Le a) (Ge b) = false - | eq_fm (Le a) (Gt b) = false - | eq_fm (Lt a) (NClosed b) = false - | eq_fm (Lt a) (Closed b) = false - | eq_fm (Lt a) (A b) = false - | eq_fm (Lt a) (E b) = false - | eq_fm (Lt a) (Iff (b, c)) = false - | eq_fm (Lt a) (Imp (b, c)) = false - | eq_fm (Lt a) (Or (b, c)) = false - | eq_fm (Lt a) (And (b, c)) = false - | eq_fm (Lt a) (Not b) = false - | eq_fm (Lt a) (NDvd (b, c)) = false - | eq_fm (Lt a) (Dvd (b, c)) = false - | eq_fm (Lt a) (NEq b) = false - | eq_fm (Lt a) (Eq b) = false - | eq_fm (Lt a) (Ge b) = false - | eq_fm (Lt a) (Gt b) = false - | eq_fm (Lt a) (Le b) = false - | eq_fm F (NClosed a) = false - | eq_fm F (Closed a) = false - | eq_fm F (A a) = false - | eq_fm F (E a) = false - | eq_fm F (Iff (a, b)) = false - | eq_fm F (Imp (a, b)) = false - | eq_fm F (Or (a, b)) = false - | eq_fm F (And (a, b)) = false - | eq_fm F (Not a) = false - | eq_fm F (NDvd (a, b)) = false - | eq_fm F (Dvd (a, b)) = false - | eq_fm F (NEq a) = false - | eq_fm F (Eq a) = false - | eq_fm F (Ge a) = false - | eq_fm F (Gt a) = false - | eq_fm F (Le a) = false - | eq_fm F (Lt a) = false - | eq_fm T (NClosed a) = false - | eq_fm T (Closed a) = false - | eq_fm T (A a) = false - | eq_fm T (E a) = false - | eq_fm T (Iff (a, b)) = false - | eq_fm T (Imp (a, b)) = false - | eq_fm T (Or (a, b)) = false - | eq_fm T (And (a, b)) = false - | eq_fm T (Not a) = false - | eq_fm T (NDvd (a, b)) = false - | eq_fm T (Dvd (a, b)) = false - | eq_fm T (NEq a) = false - | eq_fm T (Eq a) = false - | eq_fm T (Ge a) = false - | eq_fm T (Gt a) = false - | eq_fm T (Le a) = false - | eq_fm T (Lt a) = false - | eq_fm T F = false - | eq_fm (NClosed nat) (NClosed nat') = ((nat : IntInf.int) = nat') - | eq_fm (Closed nat) (Closed nat') = ((nat : IntInf.int) = nat') - | eq_fm (A fm) (A fm') = eq_fm fm fm' - | eq_fm (E fm) (E fm') = eq_fm fm fm' - | eq_fm (Iff (fm1, fm2)) (Iff (fm1', fm2')) = - eq_fm fm1 fm1' andalso eq_fm fm2 fm2' - | eq_fm (Imp (fm1, fm2)) (Imp (fm1', fm2')) = - eq_fm fm1 fm1' andalso eq_fm fm2 fm2' - | eq_fm (Or (fm1, fm2)) (Or (fm1', fm2')) = - eq_fm fm1 fm1' andalso eq_fm fm2 fm2' - | eq_fm (And (fm1, fm2)) (And (fm1', fm2')) = - eq_fm fm1 fm1' andalso eq_fm fm2 fm2' - | eq_fm (Not fm) (Not fm') = eq_fm fm fm' - | eq_fm (NDvd (inta, num)) (NDvd (int', num')) = - ((inta : IntInf.int) = int') andalso eq_num num num' - | eq_fm (Dvd (inta, num)) (Dvd (int', num')) = - ((inta : IntInf.int) = int') andalso eq_num num num' - | eq_fm (NEq num) (NEq num') = eq_num num num' - | eq_fm (Eq num) (Eq num') = eq_num num num' - | eq_fm (Ge num) (Ge num') = eq_num num num' - | eq_fm (Gt num) (Gt num') = eq_num num num' - | eq_fm (Le num) (Le num') = eq_num num num' - | eq_fm (Lt num) (Lt num') = eq_num num num' - | eq_fm F F = true - | eq_fm T T = true; - -val eq_fma = {eq = eq_fm} : fm eq; + | eq_fm T (Lt num) = false + | eq_fm (Lt num) T = false + | eq_fm T (Le num) = false + | eq_fm (Le num) T = false + | eq_fm T (Gt num) = false + | eq_fm (Gt num) T = false + | eq_fm T (Ge num) = false + | eq_fm (Ge num) T = false + | eq_fm T (Eq num) = false + | eq_fm (Eq num) T = false + | eq_fm T (NEq num) = false + | eq_fm (NEq num) T = false + | eq_fm T (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) T = false + | eq_fm T (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) T = false + | eq_fm T (Not fm) = false + | eq_fm (Not fm) T = false + | eq_fm T (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) T = false + | eq_fm T (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) T = false + | eq_fm T (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) T = false + | eq_fm T (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) T = false + | eq_fm T (E fm) = false + | eq_fm (E fm) T = false + | eq_fm T (A fm) = false + | eq_fm (A fm) T = false + | eq_fm T (Closed nat) = false + | eq_fm (Closed nat) T = false + | eq_fm T (NClosed nat) = false + | eq_fm (NClosed nat) T = false + | eq_fm F (Lt num) = false + | eq_fm (Lt num) F = false + | eq_fm F (Le num) = false + | eq_fm (Le num) F = false + | eq_fm F (Gt num) = false + | eq_fm (Gt num) F = false + | eq_fm F (Ge num) = false + | eq_fm (Ge num) F = false + | eq_fm F (Eq num) = false + | eq_fm (Eq num) F = false + | eq_fm F (NEq num) = false + | eq_fm (NEq num) F = false + | eq_fm F (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) F = false + | eq_fm F (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) F = false + | eq_fm F (Not fm) = false + | eq_fm (Not fm) F = false + | eq_fm F (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) F = false + | eq_fm F (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) F = false + | eq_fm F (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) F = false + | eq_fm F (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) F = false + | eq_fm F (E fm) = false + | eq_fm (E fm) F = false + | eq_fm F (A fm) = false + | eq_fm (A fm) F = false + | eq_fm F (Closed nat) = false + | eq_fm (Closed nat) F = false + | eq_fm F (NClosed nat) = false + | eq_fm (NClosed nat) F = false + | eq_fm (Lt numa) (Le num) = false + | eq_fm (Le numa) (Lt num) = false + | eq_fm (Lt numa) (Gt num) = false + | eq_fm (Gt numa) (Lt num) = false + | eq_fm (Lt numa) (Ge num) = false + | eq_fm (Ge numa) (Lt num) = false + | eq_fm (Lt numa) (Eq num) = false + | eq_fm (Eq numa) (Lt num) = false + | eq_fm (Lt numa) (NEq num) = false + | eq_fm (NEq numa) (Lt num) = false + | eq_fm (Lt numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (Lt num) = false + | eq_fm (Lt numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (Lt num) = false + | eq_fm (Lt num) (Not fm) = false + | eq_fm (Not fm) (Lt num) = false + | eq_fm (Lt num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Lt num) = false + | eq_fm (Lt num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Lt num) = false + | eq_fm (Lt num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Lt num) = false + | eq_fm (Lt num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Lt num) = false + | eq_fm (Lt num) (E fm) = false + | eq_fm (E fm) (Lt num) = false + | eq_fm (Lt num) (A fm) = false + | eq_fm (A fm) (Lt num) = false + | eq_fm (Lt num) (Closed nat) = false + | eq_fm (Closed nat) (Lt num) = false + | eq_fm (Lt num) (NClosed nat) = false + | eq_fm (NClosed nat) (Lt num) = false + | eq_fm (Le numa) (Gt num) = false + | eq_fm (Gt numa) (Le num) = false + | eq_fm (Le numa) (Ge num) = false + | eq_fm (Ge numa) (Le num) = false + | eq_fm (Le numa) (Eq num) = false + | eq_fm (Eq numa) (Le num) = false + | eq_fm (Le numa) (NEq num) = false + | eq_fm (NEq numa) (Le num) = false + | eq_fm (Le numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (Le num) = false + | eq_fm (Le numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (Le num) = false + | eq_fm (Le num) (Not fm) = false + | eq_fm (Not fm) (Le num) = false + | eq_fm (Le num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Le num) = false + | eq_fm (Le num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Le num) = false + | eq_fm (Le num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Le num) = false + | eq_fm (Le num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Le num) = false + | eq_fm (Le num) (E fm) = false + | eq_fm (E fm) (Le num) = false + | eq_fm (Le num) (A fm) = false + | eq_fm (A fm) (Le num) = false + | eq_fm (Le num) (Closed nat) = false + | eq_fm (Closed nat) (Le num) = false + | eq_fm (Le num) (NClosed nat) = false + | eq_fm (NClosed nat) (Le num) = false + | eq_fm (Gt numa) (Ge num) = false + | eq_fm (Ge numa) (Gt num) = false + | eq_fm (Gt numa) (Eq num) = false + | eq_fm (Eq numa) (Gt num) = false + | eq_fm (Gt numa) (NEq num) = false + | eq_fm (NEq numa) (Gt num) = false + | eq_fm (Gt numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (Gt num) = false + | eq_fm (Gt numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (Gt num) = false + | eq_fm (Gt num) (Not fm) = false + | eq_fm (Not fm) (Gt num) = false + | eq_fm (Gt num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Gt num) = false + | eq_fm (Gt num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Gt num) = false + | eq_fm (Gt num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Gt num) = false + | eq_fm (Gt num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Gt num) = false + | eq_fm (Gt num) (E fm) = false + | eq_fm (E fm) (Gt num) = false + | eq_fm (Gt num) (A fm) = false + | eq_fm (A fm) (Gt num) = false + | eq_fm (Gt num) (Closed nat) = false + | eq_fm (Closed nat) (Gt num) = false + | eq_fm (Gt num) (NClosed nat) = false + | eq_fm (NClosed nat) (Gt num) = false + | eq_fm (Ge numa) (Eq num) = false + | eq_fm (Eq numa) (Ge num) = false + | eq_fm (Ge numa) (NEq num) = false + | eq_fm (NEq numa) (Ge num) = false + | eq_fm (Ge numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (Ge num) = false + | eq_fm (Ge numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (Ge num) = false + | eq_fm (Ge num) (Not fm) = false + | eq_fm (Not fm) (Ge num) = false + | eq_fm (Ge num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Ge num) = false + | eq_fm (Ge num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Ge num) = false + | eq_fm (Ge num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Ge num) = false + | eq_fm (Ge num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Ge num) = false + | eq_fm (Ge num) (E fm) = false + | eq_fm (E fm) (Ge num) = false + | eq_fm (Ge num) (A fm) = false + | eq_fm (A fm) (Ge num) = false + | eq_fm (Ge num) (Closed nat) = false + | eq_fm (Closed nat) (Ge num) = false + | eq_fm (Ge num) (NClosed nat) = false + | eq_fm (NClosed nat) (Ge num) = false + | eq_fm (Eq numa) (NEq num) = false + | eq_fm (NEq numa) (Eq num) = false + | eq_fm (Eq numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (Eq num) = false + | eq_fm (Eq numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (Eq num) = false + | eq_fm (Eq num) (Not fm) = false + | eq_fm (Not fm) (Eq num) = false + | eq_fm (Eq num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Eq num) = false + | eq_fm (Eq num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Eq num) = false + | eq_fm (Eq num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Eq num) = false + | eq_fm (Eq num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Eq num) = false + | eq_fm (Eq num) (E fm) = false + | eq_fm (E fm) (Eq num) = false + | eq_fm (Eq num) (A fm) = false + | eq_fm (A fm) (Eq num) = false + | eq_fm (Eq num) (Closed nat) = false + | eq_fm (Closed nat) (Eq num) = false + | eq_fm (Eq num) (NClosed nat) = false + | eq_fm (NClosed nat) (Eq num) = false + | eq_fm (NEq numa) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, numa)) (NEq num) = false + | eq_fm (NEq numa) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, numa)) (NEq num) = false + | eq_fm (NEq num) (Not fm) = false + | eq_fm (Not fm) (NEq num) = false + | eq_fm (NEq num) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (NEq num) = false + | eq_fm (NEq num) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (NEq num) = false + | eq_fm (NEq num) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (NEq num) = false + | eq_fm (NEq num) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (NEq num) = false + | eq_fm (NEq num) (E fm) = false + | eq_fm (E fm) (NEq num) = false + | eq_fm (NEq num) (A fm) = false + | eq_fm (A fm) (NEq num) = false + | eq_fm (NEq num) (Closed nat) = false + | eq_fm (Closed nat) (NEq num) = false + | eq_fm (NEq num) (NClosed nat) = false + | eq_fm (NClosed nat) (NEq num) = false + | eq_fm (Dvd (intaa, numa)) (NDvd (inta, num)) = false + | eq_fm (NDvd (intaa, numa)) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (Not fm) = false + | eq_fm (Not fm) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (E fm) = false + | eq_fm (E fm) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (A fm) = false + | eq_fm (A fm) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (Closed nat) = false + | eq_fm (Closed nat) (Dvd (inta, num)) = false + | eq_fm (Dvd (inta, num)) (NClosed nat) = false + | eq_fm (NClosed nat) (Dvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (Not fm) = false + | eq_fm (Not fm) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (E fm) = false + | eq_fm (E fm) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (A fm) = false + | eq_fm (A fm) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (Closed nat) = false + | eq_fm (Closed nat) (NDvd (inta, num)) = false + | eq_fm (NDvd (inta, num)) (NClosed nat) = false + | eq_fm (NClosed nat) (NDvd (inta, num)) = false + | eq_fm (Not fm) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Not fm) = false + | eq_fm (Not fm) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Not fm) = false + | eq_fm (Not fm) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Not fm) = false + | eq_fm (Not fm) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Not fm) = false + | eq_fm (Not fma) (E fm) = false + | eq_fm (E fma) (Not fm) = false + | eq_fm (Not fma) (A fm) = false + | eq_fm (A fma) (Not fm) = false + | eq_fm (Not fm) (Closed nat) = false + | eq_fm (Closed nat) (Not fm) = false + | eq_fm (Not fm) (NClosed nat) = false + | eq_fm (NClosed nat) (Not fm) = false + | eq_fm (And (fm1a, fm2a)) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1a, fm2a)) (And (fm1, fm2)) = false + | eq_fm (And (fm1a, fm2a)) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1a, fm2a)) (And (fm1, fm2)) = false + | eq_fm (And (fm1a, fm2a)) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1a, fm2a)) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (E fm) = false + | eq_fm (E fm) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (A fm) = false + | eq_fm (A fm) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (Closed nat) = false + | eq_fm (Closed nat) (And (fm1, fm2)) = false + | eq_fm (And (fm1, fm2)) (NClosed nat) = false + | eq_fm (NClosed nat) (And (fm1, fm2)) = false + | eq_fm (Or (fm1a, fm2a)) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1a, fm2a)) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1a, fm2a)) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1a, fm2a)) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (E fm) = false + | eq_fm (E fm) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (A fm) = false + | eq_fm (A fm) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (Closed nat) = false + | eq_fm (Closed nat) (Or (fm1, fm2)) = false + | eq_fm (Or (fm1, fm2)) (NClosed nat) = false + | eq_fm (NClosed nat) (Or (fm1, fm2)) = false + | eq_fm (Imp (fm1a, fm2a)) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1a, fm2a)) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (E fm) = false + | eq_fm (E fm) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (A fm) = false + | eq_fm (A fm) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (Closed nat) = false + | eq_fm (Closed nat) (Imp (fm1, fm2)) = false + | eq_fm (Imp (fm1, fm2)) (NClosed nat) = false + | eq_fm (NClosed nat) (Imp (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (E fm) = false + | eq_fm (E fm) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (A fm) = false + | eq_fm (A fm) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (Closed nat) = false + | eq_fm (Closed nat) (Iff (fm1, fm2)) = false + | eq_fm (Iff (fm1, fm2)) (NClosed nat) = false + | eq_fm (NClosed nat) (Iff (fm1, fm2)) = false + | eq_fm (E fma) (A fm) = false + | eq_fm (A fma) (E fm) = false + | eq_fm (E fm) (Closed nat) = false + | eq_fm (Closed nat) (E fm) = false + | eq_fm (E fm) (NClosed nat) = false + | eq_fm (NClosed nat) (E fm) = false + | eq_fm (A fm) (Closed nat) = false + | eq_fm (Closed nat) (A fm) = false + | eq_fm (A fm) (NClosed nat) = false + | eq_fm (NClosed nat) (A fm) = false + | eq_fm (Closed nata) (NClosed nat) = false + | eq_fm (NClosed nata) (Closed nat) = false; fun djf f p q = - (if eqop eq_fma q T then T - else (if eqop eq_fma q F then f p - else let - val a = f p; - in - (case a of T => T | F => q | Lt num => Or (f p, q) - | Le num => Or (f p, q) | Gt num => Or (f p, q) - | Ge num => Or (f p, q) | Eq num => Or (f p, q) - | NEq num => Or (f p, q) | Dvd (inta, num) => Or (f p, q) - | NDvd (inta, num) => Or (f p, q) | Not fm => Or (f p, q) - | And (fm1, fm2) => Or (f p, q) - | Or (fm1, fm2) => Or (f p, q) - | Imp (fm1, fm2) => Or (f p, q) - | Iff (fm1, fm2) => Or (f p, q) | E fm => Or (f p, q) - | A fm => Or (f p, q) | Closed nat => Or (f p, q) - | NClosed nat => Or (f p, q)) - end)); + (if eq_fm q T then T + else (if eq_fm q F then f p + else (case f p of T => T | F => q | Lt _ => Or (f p, q) + | Le _ => Or (f p, q) | Gt _ => Or (f p, q) + | Ge _ => Or (f p, q) | Eq _ => Or (f p, q) + | NEq _ => Or (f p, q) | Dvd (_, _) => Or (f p, q) + | NDvd (_, _) => Or (f p, q) | Not _ => Or (f p, q) + | And (_, _) => Or (f p, q) | Or (_, _) => Or (f p, q) + | Imp (_, _) => Or (f p, q) | Iff (_, _) => Or (f p, q) + | E _ => Or (f p, q) | A _ => Or (f p, q) + | Closed _ => Or (f p, q) | NClosed _ => Or (f p, q)))); fun foldr f [] a = a | foldr f (x :: xs) a = f x (foldr f xs a); @@ -562,18 +768,17 @@ fun dj f p = evaldjf f (disjuncts p); fun disj p q = - (if eqop eq_fma p T orelse eqop eq_fma q T then T - else (if eqop eq_fma p F then q - else (if eqop eq_fma q F then p else Or (p, q)))); + (if eq_fm p T orelse eq_fm q T then T + else (if eq_fm p F then q else (if eq_fm q F then p else Or (p, q)))); fun minus_nat n m = IntInf.max (0, (IntInf.- (n, m))); -fun decrnum (Bound n) = Bound (minus_nat n 1) +fun decrnum (Bound n) = Bound (minus_nat n (1 : IntInf.int)) | decrnum (Neg a) = Neg (decrnum a) | decrnum (Add (a, b)) = Add (decrnum a, decrnum b) | decrnum (Sub (a, b)) = Sub (decrnum a, decrnum b) | decrnum (Mul (c, a)) = Mul (c, decrnum a) - | decrnum (Cn (n, i, a)) = Cn (minus_nat n 1, i, decrnum a) + | decrnum (Cn (n, i, a)) = Cn (minus_nat n (1 : IntInf.int), i, decrnum a) | decrnum (C u) = C u; fun decr (Lt a) = Lt (decrnum a) @@ -596,20 +801,20 @@ | decr (Closed aq) = Closed aq | decr (NClosed ar) = NClosed ar; -fun concat [] = [] - | concat (x :: xs) = append x (concat xs); - -fun split f (a, b) = f a b; +fun concat_map f [] = [] + | concat_map f (x :: xs) = append (f x) (concat_map f xs); fun numsubst0 t (C c) = C c - | numsubst0 t (Bound n) = (if eqop eq_nat n 0 then t else Bound n) + | numsubst0 t (Bound n) = + (if ((n : IntInf.int) = (0 : IntInf.int)) then t else Bound n) | numsubst0 t (Neg a) = Neg (numsubst0 t a) | numsubst0 t (Add (a, b)) = Add (numsubst0 t a, numsubst0 t b) | numsubst0 t (Sub (a, b)) = Sub (numsubst0 t a, numsubst0 t b) | numsubst0 t (Mul (i, a)) = Mul (i, numsubst0 t a) | numsubst0 t (Cn (v, i, a)) = - (if eqop eq_nat v 0 then Add (Mul (i, t), numsubst0 t a) - else Cn (suc (minus_nat v 1), i, numsubst0 t a)); + (if ((v : IntInf.int) = (0 : IntInf.int)) + then Add (Mul (i, t), numsubst0 t a) + else Cn (suc (minus_nat v (1 : IntInf.int)), i, numsubst0 t a)); fun subst0 t T = T | subst0 t F = F @@ -679,49 +884,417 @@ | minusinf (Closed ap) = Closed ap | minusinf (NClosed aq) = NClosed aq | minusinf (Lt (Cn (cm, c, e))) = - (if eqop eq_nat cm 0 then T else Lt (Cn (suc (minus_nat cm 1), c, e))) + (if ((cm : IntInf.int) = (0 : IntInf.int)) then T + else Lt (Cn (suc (minus_nat cm (1 : IntInf.int)), c, e))) | minusinf (Le (Cn (dm, c, e))) = - (if eqop eq_nat dm 0 then T else Le (Cn (suc (minus_nat dm 1), c, e))) + (if ((dm : IntInf.int) = (0 : IntInf.int)) then T + else Le (Cn (suc (minus_nat dm (1 : IntInf.int)), c, e))) | minusinf (Gt (Cn (em, c, e))) = - (if eqop eq_nat em 0 then F else Gt (Cn (suc (minus_nat em 1), c, e))) + (if ((em : IntInf.int) = (0 : IntInf.int)) then F + else Gt (Cn (suc (minus_nat em (1 : IntInf.int)), c, e))) | minusinf (Ge (Cn (fm, c, e))) = - (if eqop eq_nat fm 0 then F else Ge (Cn (suc (minus_nat fm 1), c, e))) + (if ((fm : IntInf.int) = (0 : IntInf.int)) then F + else Ge (Cn (suc (minus_nat fm (1 : IntInf.int)), c, e))) | minusinf (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 then F else Eq (Cn (suc (minus_nat gm 1), c, e))) + (if ((gm : IntInf.int) = (0 : IntInf.int)) then F + else Eq (Cn (suc (minus_nat gm (1 : IntInf.int)), c, e))) | minusinf (NEq (Cn (hm, c, e))) = - (if eqop eq_nat hm 0 then T else NEq (Cn (suc (minus_nat hm 1), c, e))); + (if ((hm : IntInf.int) = (0 : IntInf.int)) then T + else NEq (Cn (suc (minus_nat hm (1 : IntInf.int)), c, e))); val eq_int = {eq = (fn a => fn b => ((a : IntInf.int) = b))} : IntInf.int eq; +val zero_int : IntInf.int = (0 : IntInf.int); + +type 'a zero = {zero : 'a}; +val zero = #zero : 'a zero -> 'a; + +val zero_inta = {zero = zero_int} : IntInf.int zero; + +type 'a times = {times : 'a -> 'a -> 'a}; +val times = #times : 'a times -> 'a -> 'a -> 'a; + +type 'a no_zero_divisors = + {times_no_zero_divisors : 'a times, zero_no_zero_divisors : 'a zero}; +val times_no_zero_divisors = #times_no_zero_divisors : + 'a no_zero_divisors -> 'a times; +val zero_no_zero_divisors = #zero_no_zero_divisors : + 'a no_zero_divisors -> 'a zero; + +val times_int = {times = (fn a => fn b => IntInf.* (a, b))} : IntInf.int times; + +val no_zero_divisors_int = + {times_no_zero_divisors = times_int, zero_no_zero_divisors = zero_inta} : + IntInf.int no_zero_divisors; + +type 'a one = {one : 'a}; +val one = #one : 'a one -> 'a; + +type 'a zero_neq_one = {one_zero_neq_one : 'a one, zero_zero_neq_one : 'a zero}; +val one_zero_neq_one = #one_zero_neq_one : 'a zero_neq_one -> 'a one; +val zero_zero_neq_one = #zero_zero_neq_one : 'a zero_neq_one -> 'a zero; + +type 'a semigroup_mult = {times_semigroup_mult : 'a times}; +val times_semigroup_mult = #times_semigroup_mult : + 'a semigroup_mult -> 'a times; + +type 'a plus = {plus : 'a -> 'a -> 'a}; +val plus = #plus : 'a plus -> 'a -> 'a -> 'a; + +type 'a semigroup_add = {plus_semigroup_add : 'a plus}; +val plus_semigroup_add = #plus_semigroup_add : 'a semigroup_add -> 'a plus; + +type 'a ab_semigroup_add = {semigroup_add_ab_semigroup_add : 'a semigroup_add}; +val semigroup_add_ab_semigroup_add = #semigroup_add_ab_semigroup_add : + 'a ab_semigroup_add -> 'a semigroup_add; + +type 'a semiring = + {ab_semigroup_add_semiring : 'a ab_semigroup_add, + semigroup_mult_semiring : 'a semigroup_mult}; +val ab_semigroup_add_semiring = #ab_semigroup_add_semiring : + 'a semiring -> 'a ab_semigroup_add; +val semigroup_mult_semiring = #semigroup_mult_semiring : + 'a semiring -> 'a semigroup_mult; + +type 'a mult_zero = {times_mult_zero : 'a times, zero_mult_zero : 'a zero}; +val times_mult_zero = #times_mult_zero : 'a mult_zero -> 'a times; +val zero_mult_zero = #zero_mult_zero : 'a mult_zero -> 'a zero; + +type 'a monoid_add = + {semigroup_add_monoid_add : 'a semigroup_add, zero_monoid_add : 'a zero}; +val semigroup_add_monoid_add = #semigroup_add_monoid_add : + 'a monoid_add -> 'a semigroup_add; +val zero_monoid_add = #zero_monoid_add : 'a monoid_add -> 'a zero; + +type 'a comm_monoid_add = + {ab_semigroup_add_comm_monoid_add : 'a ab_semigroup_add, + monoid_add_comm_monoid_add : 'a monoid_add}; +val ab_semigroup_add_comm_monoid_add = #ab_semigroup_add_comm_monoid_add : + 'a comm_monoid_add -> 'a ab_semigroup_add; +val monoid_add_comm_monoid_add = #monoid_add_comm_monoid_add : + 'a comm_monoid_add -> 'a monoid_add; + +type 'a semiring_0 = + {comm_monoid_add_semiring_0 : 'a comm_monoid_add, + mult_zero_semiring_0 : 'a mult_zero, semiring_semiring_0 : 'a semiring}; +val comm_monoid_add_semiring_0 = #comm_monoid_add_semiring_0 : + 'a semiring_0 -> 'a comm_monoid_add; +val mult_zero_semiring_0 = #mult_zero_semiring_0 : + 'a semiring_0 -> 'a mult_zero; +val semiring_semiring_0 = #semiring_semiring_0 : 'a semiring_0 -> 'a semiring; + +type 'a power = {one_power : 'a one, times_power : 'a times}; +val one_power = #one_power : 'a power -> 'a one; +val times_power = #times_power : 'a power -> 'a times; + +type 'a monoid_mult = + {semigroup_mult_monoid_mult : 'a semigroup_mult, + power_monoid_mult : 'a power}; +val semigroup_mult_monoid_mult = #semigroup_mult_monoid_mult : + 'a monoid_mult -> 'a semigroup_mult; +val power_monoid_mult = #power_monoid_mult : 'a monoid_mult -> 'a power; + +type 'a semiring_1 = + {monoid_mult_semiring_1 : 'a monoid_mult, + semiring_0_semiring_1 : 'a semiring_0, + zero_neq_one_semiring_1 : 'a zero_neq_one}; +val monoid_mult_semiring_1 = #monoid_mult_semiring_1 : + 'a semiring_1 -> 'a monoid_mult; +val semiring_0_semiring_1 = #semiring_0_semiring_1 : + 'a semiring_1 -> 'a semiring_0; +val zero_neq_one_semiring_1 = #zero_neq_one_semiring_1 : + 'a semiring_1 -> 'a zero_neq_one; + +type 'a cancel_semigroup_add = + {semigroup_add_cancel_semigroup_add : 'a semigroup_add}; +val semigroup_add_cancel_semigroup_add = #semigroup_add_cancel_semigroup_add : + 'a cancel_semigroup_add -> 'a semigroup_add; + +type 'a cancel_ab_semigroup_add = + {ab_semigroup_add_cancel_ab_semigroup_add : 'a ab_semigroup_add, + cancel_semigroup_add_cancel_ab_semigroup_add : 'a cancel_semigroup_add}; +val ab_semigroup_add_cancel_ab_semigroup_add = + #ab_semigroup_add_cancel_ab_semigroup_add : + 'a cancel_ab_semigroup_add -> 'a ab_semigroup_add; +val cancel_semigroup_add_cancel_ab_semigroup_add = + #cancel_semigroup_add_cancel_ab_semigroup_add : + 'a cancel_ab_semigroup_add -> 'a cancel_semigroup_add; + +type 'a cancel_comm_monoid_add = + {cancel_ab_semigroup_add_cancel_comm_monoid_add : 'a cancel_ab_semigroup_add, + comm_monoid_add_cancel_comm_monoid_add : 'a comm_monoid_add}; +val cancel_ab_semigroup_add_cancel_comm_monoid_add = + #cancel_ab_semigroup_add_cancel_comm_monoid_add : + 'a cancel_comm_monoid_add -> 'a cancel_ab_semigroup_add; +val comm_monoid_add_cancel_comm_monoid_add = + #comm_monoid_add_cancel_comm_monoid_add : + 'a cancel_comm_monoid_add -> 'a comm_monoid_add; + +type 'a semiring_0_cancel = + {cancel_comm_monoid_add_semiring_0_cancel : 'a cancel_comm_monoid_add, + semiring_0_semiring_0_cancel : 'a semiring_0}; +val cancel_comm_monoid_add_semiring_0_cancel = + #cancel_comm_monoid_add_semiring_0_cancel : + 'a semiring_0_cancel -> 'a cancel_comm_monoid_add; +val semiring_0_semiring_0_cancel = #semiring_0_semiring_0_cancel : + 'a semiring_0_cancel -> 'a semiring_0; + +type 'a semiring_1_cancel = + {semiring_0_cancel_semiring_1_cancel : 'a semiring_0_cancel, + semiring_1_semiring_1_cancel : 'a semiring_1}; +val semiring_0_cancel_semiring_1_cancel = #semiring_0_cancel_semiring_1_cancel : + 'a semiring_1_cancel -> 'a semiring_0_cancel; +val semiring_1_semiring_1_cancel = #semiring_1_semiring_1_cancel : + 'a semiring_1_cancel -> 'a semiring_1; + +type 'a dvd = {times_dvd : 'a times}; +val times_dvd = #times_dvd : 'a dvd -> 'a times; + +type 'a ab_semigroup_mult = + {semigroup_mult_ab_semigroup_mult : 'a semigroup_mult}; +val semigroup_mult_ab_semigroup_mult = #semigroup_mult_ab_semigroup_mult : + 'a ab_semigroup_mult -> 'a semigroup_mult; + +type 'a comm_semiring = + {ab_semigroup_mult_comm_semiring : 'a ab_semigroup_mult, + semiring_comm_semiring : 'a semiring}; +val ab_semigroup_mult_comm_semiring = #ab_semigroup_mult_comm_semiring : + 'a comm_semiring -> 'a ab_semigroup_mult; +val semiring_comm_semiring = #semiring_comm_semiring : + 'a comm_semiring -> 'a semiring; + +type 'a comm_semiring_0 = + {comm_semiring_comm_semiring_0 : 'a comm_semiring, + semiring_0_comm_semiring_0 : 'a semiring_0}; +val comm_semiring_comm_semiring_0 = #comm_semiring_comm_semiring_0 : + 'a comm_semiring_0 -> 'a comm_semiring; +val semiring_0_comm_semiring_0 = #semiring_0_comm_semiring_0 : + 'a comm_semiring_0 -> 'a semiring_0; + +type 'a comm_monoid_mult = + {ab_semigroup_mult_comm_monoid_mult : 'a ab_semigroup_mult, + monoid_mult_comm_monoid_mult : 'a monoid_mult}; +val ab_semigroup_mult_comm_monoid_mult = #ab_semigroup_mult_comm_monoid_mult : + 'a comm_monoid_mult -> 'a ab_semigroup_mult; +val monoid_mult_comm_monoid_mult = #monoid_mult_comm_monoid_mult : + 'a comm_monoid_mult -> 'a monoid_mult; + +type 'a comm_semiring_1 = + {comm_monoid_mult_comm_semiring_1 : 'a comm_monoid_mult, + comm_semiring_0_comm_semiring_1 : 'a comm_semiring_0, + dvd_comm_semiring_1 : 'a dvd, semiring_1_comm_semiring_1 : 'a semiring_1}; +val comm_monoid_mult_comm_semiring_1 = #comm_monoid_mult_comm_semiring_1 : + 'a comm_semiring_1 -> 'a comm_monoid_mult; +val comm_semiring_0_comm_semiring_1 = #comm_semiring_0_comm_semiring_1 : + 'a comm_semiring_1 -> 'a comm_semiring_0; +val dvd_comm_semiring_1 = #dvd_comm_semiring_1 : 'a comm_semiring_1 -> 'a dvd; +val semiring_1_comm_semiring_1 = #semiring_1_comm_semiring_1 : + 'a comm_semiring_1 -> 'a semiring_1; + +type 'a comm_semiring_0_cancel = + {comm_semiring_0_comm_semiring_0_cancel : 'a comm_semiring_0, + semiring_0_cancel_comm_semiring_0_cancel : 'a semiring_0_cancel}; +val comm_semiring_0_comm_semiring_0_cancel = + #comm_semiring_0_comm_semiring_0_cancel : + 'a comm_semiring_0_cancel -> 'a comm_semiring_0; +val semiring_0_cancel_comm_semiring_0_cancel = + #semiring_0_cancel_comm_semiring_0_cancel : + 'a comm_semiring_0_cancel -> 'a semiring_0_cancel; + +type 'a comm_semiring_1_cancel = + {comm_semiring_0_cancel_comm_semiring_1_cancel : 'a comm_semiring_0_cancel, + comm_semiring_1_comm_semiring_1_cancel : 'a comm_semiring_1, + semiring_1_cancel_comm_semiring_1_cancel : 'a semiring_1_cancel}; +val comm_semiring_0_cancel_comm_semiring_1_cancel = + #comm_semiring_0_cancel_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a comm_semiring_0_cancel; +val comm_semiring_1_comm_semiring_1_cancel = + #comm_semiring_1_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a comm_semiring_1; +val semiring_1_cancel_comm_semiring_1_cancel = + #semiring_1_cancel_comm_semiring_1_cancel : + 'a comm_semiring_1_cancel -> 'a semiring_1_cancel; + +type 'a diva = {dvd_div : 'a dvd, diva : 'a -> 'a -> 'a, moda : 'a -> 'a -> 'a}; +val dvd_div = #dvd_div : 'a diva -> 'a dvd; +val diva = #diva : 'a diva -> 'a -> 'a -> 'a; +val moda = #moda : 'a diva -> 'a -> 'a -> 'a; + +type 'a semiring_div = + {div_semiring_div : 'a diva, + comm_semiring_1_cancel_semiring_div : 'a comm_semiring_1_cancel, + no_zero_divisors_semiring_div : 'a no_zero_divisors}; +val div_semiring_div = #div_semiring_div : 'a semiring_div -> 'a diva; +val comm_semiring_1_cancel_semiring_div = #comm_semiring_1_cancel_semiring_div : + 'a semiring_div -> 'a comm_semiring_1_cancel; +val no_zero_divisors_semiring_div = #no_zero_divisors_semiring_div : + 'a semiring_div -> 'a no_zero_divisors; + +val one_int : IntInf.int = (1 : IntInf.int); + +val one_inta = {one = one_int} : IntInf.int one; + +val zero_neq_one_int = + {one_zero_neq_one = one_inta, zero_zero_neq_one = zero_inta} : + IntInf.int zero_neq_one; + +val semigroup_mult_int = {times_semigroup_mult = times_int} : + IntInf.int semigroup_mult; + +val plus_int = {plus = (fn a => fn b => IntInf.+ (a, b))} : IntInf.int plus; + +val semigroup_add_int = {plus_semigroup_add = plus_int} : + IntInf.int semigroup_add; + +val ab_semigroup_add_int = {semigroup_add_ab_semigroup_add = semigroup_add_int} + : IntInf.int ab_semigroup_add; + +val semiring_int = + {ab_semigroup_add_semiring = ab_semigroup_add_int, + semigroup_mult_semiring = semigroup_mult_int} + : IntInf.int semiring; + +val mult_zero_int = {times_mult_zero = times_int, zero_mult_zero = zero_inta} : + IntInf.int mult_zero; + +val monoid_add_int = + {semigroup_add_monoid_add = semigroup_add_int, zero_monoid_add = zero_inta} : + IntInf.int monoid_add; + +val comm_monoid_add_int = + {ab_semigroup_add_comm_monoid_add = ab_semigroup_add_int, + monoid_add_comm_monoid_add = monoid_add_int} + : IntInf.int comm_monoid_add; + +val semiring_0_int = + {comm_monoid_add_semiring_0 = comm_monoid_add_int, + mult_zero_semiring_0 = mult_zero_int, semiring_semiring_0 = semiring_int} + : IntInf.int semiring_0; + +val power_int = {one_power = one_inta, times_power = times_int} : + IntInf.int power; + +val monoid_mult_int = + {semigroup_mult_monoid_mult = semigroup_mult_int, + power_monoid_mult = power_int} + : IntInf.int monoid_mult; + +val semiring_1_int = + {monoid_mult_semiring_1 = monoid_mult_int, + semiring_0_semiring_1 = semiring_0_int, + zero_neq_one_semiring_1 = zero_neq_one_int} + : IntInf.int semiring_1; + +val cancel_semigroup_add_int = + {semigroup_add_cancel_semigroup_add = semigroup_add_int} : + IntInf.int cancel_semigroup_add; + +val cancel_ab_semigroup_add_int = + {ab_semigroup_add_cancel_ab_semigroup_add = ab_semigroup_add_int, + cancel_semigroup_add_cancel_ab_semigroup_add = cancel_semigroup_add_int} + : IntInf.int cancel_ab_semigroup_add; + +val cancel_comm_monoid_add_int = + {cancel_ab_semigroup_add_cancel_comm_monoid_add = cancel_ab_semigroup_add_int, + comm_monoid_add_cancel_comm_monoid_add = comm_monoid_add_int} + : IntInf.int cancel_comm_monoid_add; + +val semiring_0_cancel_int = + {cancel_comm_monoid_add_semiring_0_cancel = cancel_comm_monoid_add_int, + semiring_0_semiring_0_cancel = semiring_0_int} + : IntInf.int semiring_0_cancel; + +val semiring_1_cancel_int = + {semiring_0_cancel_semiring_1_cancel = semiring_0_cancel_int, + semiring_1_semiring_1_cancel = semiring_1_int} + : IntInf.int semiring_1_cancel; + +val dvd_int = {times_dvd = times_int} : IntInf.int dvd; + +val ab_semigroup_mult_int = + {semigroup_mult_ab_semigroup_mult = semigroup_mult_int} : + IntInf.int ab_semigroup_mult; + +val comm_semiring_int = + {ab_semigroup_mult_comm_semiring = ab_semigroup_mult_int, + semiring_comm_semiring = semiring_int} + : IntInf.int comm_semiring; + +val comm_semiring_0_int = + {comm_semiring_comm_semiring_0 = comm_semiring_int, + semiring_0_comm_semiring_0 = semiring_0_int} + : IntInf.int comm_semiring_0; + +val comm_monoid_mult_int = + {ab_semigroup_mult_comm_monoid_mult = ab_semigroup_mult_int, + monoid_mult_comm_monoid_mult = monoid_mult_int} + : IntInf.int comm_monoid_mult; + +val comm_semiring_1_int = + {comm_monoid_mult_comm_semiring_1 = comm_monoid_mult_int, + comm_semiring_0_comm_semiring_1 = comm_semiring_0_int, + dvd_comm_semiring_1 = dvd_int, semiring_1_comm_semiring_1 = semiring_1_int} + : IntInf.int comm_semiring_1; + +val comm_semiring_0_cancel_int = + {comm_semiring_0_comm_semiring_0_cancel = comm_semiring_0_int, + semiring_0_cancel_comm_semiring_0_cancel = semiring_0_cancel_int} + : IntInf.int comm_semiring_0_cancel; + +val comm_semiring_1_cancel_int = + {comm_semiring_0_cancel_comm_semiring_1_cancel = comm_semiring_0_cancel_int, + comm_semiring_1_comm_semiring_1_cancel = comm_semiring_1_int, + semiring_1_cancel_comm_semiring_1_cancel = semiring_1_cancel_int} + : IntInf.int comm_semiring_1_cancel; + +fun abs_int i = (if IntInf.< (i, (0 : IntInf.int)) then IntInf.~ i else i); + +fun split f (a, b) = f a b; + fun sgn_int i = - (if eqop eq_int i (0 : IntInf.int) then (0 : IntInf.int) + (if ((i : IntInf.int) = (0 : IntInf.int)) then (0 : IntInf.int) else (if IntInf.< ((0 : IntInf.int), i) then (1 : IntInf.int) else IntInf.~ (1 : IntInf.int))); fun apsnd f (x, y) = (x, f y); -fun divmoda k l = - (if eqop eq_int k (0 : IntInf.int) then ((0 : IntInf.int), (0 : IntInf.int)) - else (if eqop eq_int l (0 : IntInf.int) then ((0 : IntInf.int), k) +fun divmod_int k l = + (if ((k : IntInf.int) = (0 : IntInf.int)) + then ((0 : IntInf.int), (0 : IntInf.int)) + else (if ((l : IntInf.int) = (0 : IntInf.int)) then ((0 : IntInf.int), k) else apsnd (fn a => IntInf.* (sgn_int l, a)) - (if eqop eq_int (sgn_int k) (sgn_int l) - then (fn k => fn l => IntInf.divMod (IntInf.abs k, - IntInf.abs l)) - k l + (if (((sgn_int k) : IntInf.int) = (sgn_int l)) + then IntInf.divMod (IntInf.abs k, IntInf.abs l) else let - val a = - (fn k => fn l => IntInf.divMod (IntInf.abs k, - IntInf.abs l)) - k l; - val (r, s) = a; + val (r, s) = + IntInf.divMod (IntInf.abs k, IntInf.abs l); in - (if eqop eq_int s (0 : IntInf.int) + (if ((s : IntInf.int) = (0 : IntInf.int)) then (IntInf.~ r, (0 : IntInf.int)) else (IntInf.- (IntInf.~ r, (1 : IntInf.int)), IntInf.- (abs_int l, s))) end))); -fun mod_int a b = snd (divmoda a b); +fun snd (a, b) = b; + +fun mod_int a b = snd (divmod_int a b); + +fun fst (a, b) = a; + +fun div_int a b = fst (divmod_int a b); + +val div_inta = {dvd_div = dvd_int, diva = div_int, moda = mod_int} : + IntInf.int diva; + +val semiring_div_int = + {div_semiring_div = div_inta, + comm_semiring_1_cancel_semiring_div = comm_semiring_1_cancel_int, + no_zero_divisors_semiring_div = no_zero_divisors_int} + : IntInf.int semiring_div; + +fun dvd (A1_, A2_) a b = + eqa A2_ (moda (div_semiring_div A1_) b a) + (zero ((zero_no_zero_divisors o no_zero_divisors_semiring_div) A1_)); fun num_case f1 f2 f3 f4 f5 f6 f7 (Mul (inta, num)) = f7 inta num | num_case f1 f2 f3 f4 f5 f6 f7 (Sub (num1, num2)) = f6 num1 num2 @@ -742,11 +1315,11 @@ fun numneg t = nummul (IntInf.~ (1 : IntInf.int)) t; fun numadd (Cn (n1, c1, r1), Cn (n2, c2, r2)) = - (if eqop eq_nat n1 n2 + (if ((n1 : IntInf.int) = n2) then let val c = IntInf.+ (c1, c2); in - (if eqop eq_int c (0 : IntInf.int) then numadd (r1, r2) + (if ((c : IntInf.int) = (0 : IntInf.int)) then numadd (r1, r2) else Cn (n1, c, numadd (r1, r2))) end else (if IntInf.<= (n1, n2) @@ -807,10 +1380,8 @@ | numadd (Mul (at, au), Sub (hp, hq)) = Add (Mul (at, au), Sub (hp, hq)) | numadd (Mul (at, au), Mul (hr, hs)) = Add (Mul (at, au), Mul (hr, hs)); -val eq_numa = {eq = eq_num} : num eq; - fun numsub s t = - (if eqop eq_numa s t then C (0 : IntInf.int) else numadd (s, numneg t)); + (if eq_num s t then C (0 : IntInf.int) else numadd (s, numneg t)); fun simpnum (C j) = C j | simpnum (Bound n) = Cn (n, (1 : IntInf.int), C (0 : IntInf.int)) @@ -818,7 +1389,7 @@ | simpnum (Add (t, s)) = numadd (simpnum t, simpnum s) | simpnum (Sub (t, s)) = numsub (simpnum t) (simpnum s) | simpnum (Mul (i, t)) = - (if eqop eq_int i (0 : IntInf.int) then C (0 : IntInf.int) + (if ((i : IntInf.int) = (0 : IntInf.int)) then C (0 : IntInf.int) else nummul i (simpnum t)) | simpnum (Cn (v, va, vb)) = Cn (v, va, vb); @@ -843,23 +1414,20 @@ | nota (NClosed v) = Not (NClosed v); fun iffa p q = - (if eqop eq_fma p q then T - else (if eqop eq_fma p (nota q) orelse eqop eq_fma (nota p) q then F - else (if eqop eq_fma p F then nota q - else (if eqop eq_fma q F then nota p - else (if eqop eq_fma p T then q - else (if eqop eq_fma q T then p - else Iff (p, q))))))); + (if eq_fm p q then T + else (if eq_fm p (nota q) orelse eq_fm (nota p) q then F + else (if eq_fm p F then nota q + else (if eq_fm q F then nota p + else (if eq_fm p T then q + else (if eq_fm q T then p else Iff (p, q))))))); fun impa p q = - (if eqop eq_fma p F orelse eqop eq_fma q T then T - else (if eqop eq_fma p T then q - else (if eqop eq_fma q F then nota p else Imp (p, q)))); + (if eq_fm p F orelse eq_fm q T then T + else (if eq_fm p T then q else (if eq_fm q F then nota p else Imp (p, q)))); fun conj p q = - (if eqop eq_fma p F orelse eqop eq_fma q F then F - else (if eqop eq_fma p T then q - else (if eqop eq_fma q T then p else And (p, q)))); + (if eq_fm p F orelse eq_fm q F then F + else (if eq_fm p T then q else (if eq_fm q T then p else And (p, q)))); fun simpfm (And (p, q)) = conj (simpfm p) (simpfm q) | simpfm (Or (p, q)) = disj (simpfm p) (simpfm q) @@ -868,91 +1436,80 @@ | simpfm (Not p) = nota (simpfm p) | simpfm (Lt a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if IntInf.< (v, (0 : IntInf.int)) then T else F) - | Bound nat => Lt a' | Cn (nat, inta, num) => Lt a' | Neg num => Lt a' - | Add (num1, num2) => Lt a' | Sub (num1, num2) => Lt a' - | Mul (inta, num) => Lt a') + (case aa of C v => (if IntInf.< (v, (0 : IntInf.int)) then T else F) + | Bound _ => Lt aa | Cn (_, _, _) => Lt aa | Neg _ => Lt aa + | Add (_, _) => Lt aa | Sub (_, _) => Lt aa | Mul (_, _) => Lt aa) end | simpfm (Le a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if IntInf.<= (v, (0 : IntInf.int)) then T else F) - | Bound nat => Le a' | Cn (nat, inta, num) => Le a' | Neg num => Le a' - | Add (num1, num2) => Le a' | Sub (num1, num2) => Le a' - | Mul (inta, num) => Le a') + (case aa of C v => (if IntInf.<= (v, (0 : IntInf.int)) then T else F) + | Bound _ => Le aa | Cn (_, _, _) => Le aa | Neg _ => Le aa + | Add (_, _) => Le aa | Sub (_, _) => Le aa | Mul (_, _) => Le aa) end | simpfm (Gt a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if IntInf.< ((0 : IntInf.int), v) then T else F) - | Bound nat => Gt a' | Cn (nat, inta, num) => Gt a' | Neg num => Gt a' - | Add (num1, num2) => Gt a' | Sub (num1, num2) => Gt a' - | Mul (inta, num) => Gt a') + (case aa of C v => (if IntInf.< ((0 : IntInf.int), v) then T else F) + | Bound _ => Gt aa | Cn (_, _, _) => Gt aa | Neg _ => Gt aa + | Add (_, _) => Gt aa | Sub (_, _) => Gt aa | Mul (_, _) => Gt aa) end | simpfm (Ge a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if IntInf.<= ((0 : IntInf.int), v) then T else F) - | Bound nat => Ge a' | Cn (nat, inta, num) => Ge a' | Neg num => Ge a' - | Add (num1, num2) => Ge a' | Sub (num1, num2) => Ge a' - | Mul (inta, num) => Ge a') + (case aa of C v => (if IntInf.<= ((0 : IntInf.int), v) then T else F) + | Bound _ => Ge aa | Cn (_, _, _) => Ge aa | Neg _ => Ge aa + | Add (_, _) => Ge aa | Sub (_, _) => Ge aa | Mul (_, _) => Ge aa) end | simpfm (Eq a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if eqop eq_int v (0 : IntInf.int) then T else F) - | Bound nat => Eq a' | Cn (nat, inta, num) => Eq a' | Neg num => Eq a' - | Add (num1, num2) => Eq a' | Sub (num1, num2) => Eq a' - | Mul (inta, num) => Eq a') + (case aa + of C v => (if ((v : IntInf.int) = (0 : IntInf.int)) then T else F) + | Bound _ => Eq aa | Cn (_, _, _) => Eq aa | Neg _ => Eq aa + | Add (_, _) => Eq aa | Sub (_, _) => Eq aa | Mul (_, _) => Eq aa) end | simpfm (NEq a) = let - val a' = simpnum a; + val aa = simpnum a; in - (case a' of C v => (if not (eqop eq_int v (0 : IntInf.int)) then T else F) - | Bound nat => NEq a' | Cn (nat, inta, num) => NEq a' - | Neg num => NEq a' | Add (num1, num2) => NEq a' - | Sub (num1, num2) => NEq a' | Mul (inta, num) => NEq a') + (case aa + of C v => (if not ((v : IntInf.int) = (0 : IntInf.int)) then T else F) + | Bound _ => NEq aa | Cn (_, _, _) => NEq aa | Neg _ => NEq aa + | Add (_, _) => NEq aa | Sub (_, _) => NEq aa | Mul (_, _) => NEq aa) end | simpfm (Dvd (i, a)) = - (if eqop eq_int i (0 : IntInf.int) then simpfm (Eq a) - else (if eqop eq_int (abs_int i) (1 : IntInf.int) then T + (if ((i : IntInf.int) = (0 : IntInf.int)) then simpfm (Eq a) + else (if (((abs_int i) : IntInf.int) = (1 : IntInf.int)) then T else let - val a' = simpnum a; + val aa = simpnum a; in - (case a' - of C v => - (if eqop eq_int (mod_int v i) (0 : IntInf.int) then T - else F) - | Bound nat => Dvd (i, a') - | Cn (nat, inta, num) => Dvd (i, a') - | Neg num => Dvd (i, a') - | Add (num1, num2) => Dvd (i, a') - | Sub (num1, num2) => Dvd (i, a') - | Mul (inta, num) => Dvd (i, a')) + (case aa + of C v => + (if dvd (semiring_div_int, eq_int) i v then T else F) + | Bound _ => Dvd (i, aa) | Cn (_, _, _) => Dvd (i, aa) + | Neg _ => Dvd (i, aa) | Add (_, _) => Dvd (i, aa) + | Sub (_, _) => Dvd (i, aa) | Mul (_, _) => Dvd (i, aa)) end)) | simpfm (NDvd (i, a)) = - (if eqop eq_int i (0 : IntInf.int) then simpfm (NEq a) - else (if eqop eq_int (abs_int i) (1 : IntInf.int) then F + (if ((i : IntInf.int) = (0 : IntInf.int)) then simpfm (NEq a) + else (if (((abs_int i) : IntInf.int) = (1 : IntInf.int)) then F else let - val a' = simpnum a; + val aa = simpnum a; in - (case a' - of C v => - (if not (eqop eq_int (mod_int v i) (0 : IntInf.int)) - then T else F) - | Bound nat => NDvd (i, a') - | Cn (nat, inta, num) => NDvd (i, a') - | Neg num => NDvd (i, a') - | Add (num1, num2) => NDvd (i, a') - | Sub (num1, num2) => NDvd (i, a') - | Mul (inta, num) => NDvd (i, a')) + (case aa + of C v => + (if not (dvd (semiring_div_int, eq_int) i v) then T + else F) + | Bound _ => NDvd (i, aa) | Cn (_, _, _) => NDvd (i, aa) + | Neg _ => NDvd (i, aa) | Add (_, _) => NDvd (i, aa) + | Sub (_, _) => NDvd (i, aa) | Mul (_, _) => NDvd (i, aa)) end)) | simpfm T = T | simpfm F = F @@ -1025,32 +1582,40 @@ | mirror (Closed ap) = Closed ap | mirror (NClosed aq) = NClosed aq | mirror (Lt (Cn (cm, c, e))) = - (if eqop eq_nat cm 0 then Gt (Cn (0, c, Neg e)) - else Lt (Cn (suc (minus_nat cm 1), c, e))) + (if ((cm : IntInf.int) = (0 : IntInf.int)) + then Gt (Cn ((0 : IntInf.int), c, Neg e)) + else Lt (Cn (suc (minus_nat cm (1 : IntInf.int)), c, e))) | mirror (Le (Cn (dm, c, e))) = - (if eqop eq_nat dm 0 then Ge (Cn (0, c, Neg e)) - else Le (Cn (suc (minus_nat dm 1), c, e))) + (if ((dm : IntInf.int) = (0 : IntInf.int)) + then Ge (Cn ((0 : IntInf.int), c, Neg e)) + else Le (Cn (suc (minus_nat dm (1 : IntInf.int)), c, e))) | mirror (Gt (Cn (em, c, e))) = - (if eqop eq_nat em 0 then Lt (Cn (0, c, Neg e)) - else Gt (Cn (suc (minus_nat em 1), c, e))) + (if ((em : IntInf.int) = (0 : IntInf.int)) + then Lt (Cn ((0 : IntInf.int), c, Neg e)) + else Gt (Cn (suc (minus_nat em (1 : IntInf.int)), c, e))) | mirror (Ge (Cn (fm, c, e))) = - (if eqop eq_nat fm 0 then Le (Cn (0, c, Neg e)) - else Ge (Cn (suc (minus_nat fm 1), c, e))) + (if ((fm : IntInf.int) = (0 : IntInf.int)) + then Le (Cn ((0 : IntInf.int), c, Neg e)) + else Ge (Cn (suc (minus_nat fm (1 : IntInf.int)), c, e))) | mirror (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 then Eq (Cn (0, c, Neg e)) - else Eq (Cn (suc (minus_nat gm 1), c, e))) + (if ((gm : IntInf.int) = (0 : IntInf.int)) + then Eq (Cn ((0 : IntInf.int), c, Neg e)) + else Eq (Cn (suc (minus_nat gm (1 : IntInf.int)), c, e))) | mirror (NEq (Cn (hm, c, e))) = - (if eqop eq_nat hm 0 then NEq (Cn (0, c, Neg e)) - else NEq (Cn (suc (minus_nat hm 1), c, e))) + (if ((hm : IntInf.int) = (0 : IntInf.int)) + then NEq (Cn ((0 : IntInf.int), c, Neg e)) + else NEq (Cn (suc (minus_nat hm (1 : IntInf.int)), c, e))) | mirror (Dvd (i, Cn (im, c, e))) = - (if eqop eq_nat im 0 then Dvd (i, Cn (0, c, Neg e)) - else Dvd (i, Cn (suc (minus_nat im 1), c, e))) + (if ((im : IntInf.int) = (0 : IntInf.int)) + then Dvd (i, Cn ((0 : IntInf.int), c, Neg e)) + else Dvd (i, Cn (suc (minus_nat im (1 : IntInf.int)), c, e))) | mirror (NDvd (i, Cn (jm, c, e))) = - (if eqop eq_nat jm 0 then NDvd (i, Cn (0, c, Neg e)) - else NDvd (i, Cn (suc (minus_nat jm 1), c, e))); + (if ((jm : IntInf.int) = (0 : IntInf.int)) + then NDvd (i, Cn ((0 : IntInf.int), c, Neg e)) + else NDvd (i, Cn (suc (minus_nat jm (1 : IntInf.int)), c, e))); -fun size_list [] = 0 - | size_list (a :: lista) = IntInf.+ (size_list lista, suc 0); +fun size_list [] = (0 : IntInf.int) + | size_list (a :: lista) = IntInf.+ (size_list lista, suc (0 : IntInf.int)); fun alpha (And (p, q)) = append (alpha p) (alpha q) | alpha (Or (p, q)) = append (alpha p) (alpha q) @@ -1101,14 +1666,20 @@ | alpha (A ao) = [] | alpha (Closed ap) = [] | alpha (NClosed aq) = [] - | alpha (Lt (Cn (cm, c, e))) = (if eqop eq_nat cm 0 then [e] else []) + | alpha (Lt (Cn (cm, c, e))) = + (if ((cm : IntInf.int) = (0 : IntInf.int)) then [e] else []) | alpha (Le (Cn (dm, c, e))) = - (if eqop eq_nat dm 0 then [Add (C (~1 : IntInf.int), e)] else []) - | alpha (Gt (Cn (em, c, e))) = (if eqop eq_nat em 0 then [] else []) - | alpha (Ge (Cn (fm, c, e))) = (if eqop eq_nat fm 0 then [] else []) + (if ((dm : IntInf.int) = (0 : IntInf.int)) + then [Add (C (~1 : IntInf.int), e)] else []) + | alpha (Gt (Cn (em, c, e))) = + (if ((em : IntInf.int) = (0 : IntInf.int)) then [] else []) + | alpha (Ge (Cn (fm, c, e))) = + (if ((fm : IntInf.int) = (0 : IntInf.int)) then [] else []) | alpha (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 then [Add (C (~1 : IntInf.int), e)] else []) - | alpha (NEq (Cn (hm, c, e))) = (if eqop eq_nat hm 0 then [e] else []); + (if ((gm : IntInf.int) = (0 : IntInf.int)) + then [Add (C (~1 : IntInf.int), e)] else []) + | alpha (NEq (Cn (hm, c, e))) = + (if ((hm : IntInf.int) = (0 : IntInf.int)) then [e] else []); fun beta (And (p, q)) = append (beta p) (beta q) | beta (Or (p, q)) = append (beta p) (beta q) @@ -1159,24 +1730,39 @@ | beta (A ao) = [] | beta (Closed ap) = [] | beta (NClosed aq) = [] - | beta (Lt (Cn (cm, c, e))) = (if eqop eq_nat cm 0 then [] else []) - | beta (Le (Cn (dm, c, e))) = (if eqop eq_nat dm 0 then [] else []) - | beta (Gt (Cn (em, c, e))) = (if eqop eq_nat em 0 then [Neg e] else []) + | beta (Lt (Cn (cm, c, e))) = + (if ((cm : IntInf.int) = (0 : IntInf.int)) then [] else []) + | beta (Le (Cn (dm, c, e))) = + (if ((dm : IntInf.int) = (0 : IntInf.int)) then [] else []) + | beta (Gt (Cn (em, c, e))) = + (if ((em : IntInf.int) = (0 : IntInf.int)) then [Neg e] else []) | beta (Ge (Cn (fm, c, e))) = - (if eqop eq_nat fm 0 then [Sub (C (~1 : IntInf.int), e)] else []) + (if ((fm : IntInf.int) = (0 : IntInf.int)) + then [Sub (C (~1 : IntInf.int), e)] else []) | beta (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 then [Sub (C (~1 : IntInf.int), e)] else []) - | beta (NEq (Cn (hm, c, e))) = (if eqop eq_nat hm 0 then [Neg e] else []); + (if ((gm : IntInf.int) = (0 : IntInf.int)) + then [Sub (C (~1 : IntInf.int), e)] else []) + | beta (NEq (Cn (hm, c, e))) = + (if ((hm : IntInf.int) = (0 : IntInf.int)) then [Neg e] else []); + +val eq_numa = {eq = eq_num} : num eq; fun member A_ x [] = false - | member A_ x (y :: ys) = eqop A_ x y orelse member A_ x ys; + | member A_ x (y :: ys) = eqa A_ x y orelse member A_ x ys; fun remdups A_ [] = [] | remdups A_ (x :: xs) = (if member A_ x xs then remdups A_ xs else x :: remdups A_ xs); -fun delta (And (p, q)) = zlcm (delta p) (delta q) - | delta (Or (p, q)) = zlcm (delta p) (delta q) +fun gcd_int k l = + abs_int + (if ((l : IntInf.int) = (0 : IntInf.int)) then k + else gcd_int l (mod_int (abs_int k) (abs_int l))); + +fun lcm_int a b = div_int (IntInf.* (abs_int a, abs_int b)) (gcd_int a b); + +fun delta (And (p, q)) = lcm_int (delta p) (delta q) + | delta (Or (p, q)) = lcm_int (delta p) (delta q) | delta T = (1 : IntInf.int) | delta F = (1 : IntInf.int) | delta (Lt u) = (1 : IntInf.int) @@ -1205,110 +1791,117 @@ | delta (Closed ap) = (1 : IntInf.int) | delta (NClosed aq) = (1 : IntInf.int) | delta (Dvd (i, Cn (cm, c, e))) = - (if eqop eq_nat cm 0 then i else (1 : IntInf.int)) + (if ((cm : IntInf.int) = (0 : IntInf.int)) then i else (1 : IntInf.int)) | delta (NDvd (i, Cn (dm, c, e))) = - (if eqop eq_nat dm 0 then i else (1 : IntInf.int)); - -fun div_int a b = fst (divmoda a b); + (if ((dm : IntInf.int) = (0 : IntInf.int)) then i else (1 : IntInf.int)); fun a_beta (And (p, q)) = (fn k => And (a_beta p k, a_beta q k)) | a_beta (Or (p, q)) = (fn k => Or (a_beta p k, a_beta q k)) - | a_beta T = (fn k => T) - | a_beta F = (fn k => F) - | a_beta (Lt (C bo)) = (fn k => Lt (C bo)) - | a_beta (Lt (Bound bp)) = (fn k => Lt (Bound bp)) - | a_beta (Lt (Neg bt)) = (fn k => Lt (Neg bt)) - | a_beta (Lt (Add (bu, bv))) = (fn k => Lt (Add (bu, bv))) - | a_beta (Lt (Sub (bw, bx))) = (fn k => Lt (Sub (bw, bx))) - | a_beta (Lt (Mul (by, bz))) = (fn k => Lt (Mul (by, bz))) - | a_beta (Le (C co)) = (fn k => Le (C co)) - | a_beta (Le (Bound cp)) = (fn k => Le (Bound cp)) - | a_beta (Le (Neg ct)) = (fn k => Le (Neg ct)) - | a_beta (Le (Add (cu, cv))) = (fn k => Le (Add (cu, cv))) - | a_beta (Le (Sub (cw, cx))) = (fn k => Le (Sub (cw, cx))) - | a_beta (Le (Mul (cy, cz))) = (fn k => Le (Mul (cy, cz))) - | a_beta (Gt (C doa)) = (fn k => Gt (C doa)) - | a_beta (Gt (Bound dp)) = (fn k => Gt (Bound dp)) - | a_beta (Gt (Neg dt)) = (fn k => Gt (Neg dt)) - | a_beta (Gt (Add (du, dv))) = (fn k => Gt (Add (du, dv))) - | a_beta (Gt (Sub (dw, dx))) = (fn k => Gt (Sub (dw, dx))) - | a_beta (Gt (Mul (dy, dz))) = (fn k => Gt (Mul (dy, dz))) - | a_beta (Ge (C eo)) = (fn k => Ge (C eo)) - | a_beta (Ge (Bound ep)) = (fn k => Ge (Bound ep)) - | a_beta (Ge (Neg et)) = (fn k => Ge (Neg et)) - | a_beta (Ge (Add (eu, ev))) = (fn k => Ge (Add (eu, ev))) - | a_beta (Ge (Sub (ew, ex))) = (fn k => Ge (Sub (ew, ex))) - | a_beta (Ge (Mul (ey, ez))) = (fn k => Ge (Mul (ey, ez))) - | a_beta (Eq (C fo)) = (fn k => Eq (C fo)) - | a_beta (Eq (Bound fp)) = (fn k => Eq (Bound fp)) - | a_beta (Eq (Neg ft)) = (fn k => Eq (Neg ft)) - | a_beta (Eq (Add (fu, fv))) = (fn k => Eq (Add (fu, fv))) - | a_beta (Eq (Sub (fw, fx))) = (fn k => Eq (Sub (fw, fx))) - | a_beta (Eq (Mul (fy, fz))) = (fn k => Eq (Mul (fy, fz))) - | a_beta (NEq (C go)) = (fn k => NEq (C go)) - | a_beta (NEq (Bound gp)) = (fn k => NEq (Bound gp)) - | a_beta (NEq (Neg gt)) = (fn k => NEq (Neg gt)) - | a_beta (NEq (Add (gu, gv))) = (fn k => NEq (Add (gu, gv))) - | a_beta (NEq (Sub (gw, gx))) = (fn k => NEq (Sub (gw, gx))) - | a_beta (NEq (Mul (gy, gz))) = (fn k => NEq (Mul (gy, gz))) - | a_beta (Dvd (aa, C ho)) = (fn k => Dvd (aa, C ho)) - | a_beta (Dvd (aa, Bound hp)) = (fn k => Dvd (aa, Bound hp)) - | a_beta (Dvd (aa, Neg ht)) = (fn k => Dvd (aa, Neg ht)) - | a_beta (Dvd (aa, Add (hu, hv))) = (fn k => Dvd (aa, Add (hu, hv))) - | a_beta (Dvd (aa, Sub (hw, hx))) = (fn k => Dvd (aa, Sub (hw, hx))) - | a_beta (Dvd (aa, Mul (hy, hz))) = (fn k => Dvd (aa, Mul (hy, hz))) - | a_beta (NDvd (ac, C io)) = (fn k => NDvd (ac, C io)) - | a_beta (NDvd (ac, Bound ip)) = (fn k => NDvd (ac, Bound ip)) - | a_beta (NDvd (ac, Neg it)) = (fn k => NDvd (ac, Neg it)) - | a_beta (NDvd (ac, Add (iu, iv))) = (fn k => NDvd (ac, Add (iu, iv))) - | a_beta (NDvd (ac, Sub (iw, ix))) = (fn k => NDvd (ac, Sub (iw, ix))) - | a_beta (NDvd (ac, Mul (iy, iz))) = (fn k => NDvd (ac, Mul (iy, iz))) - | a_beta (Not ae) = (fn k => Not ae) - | a_beta (Imp (aj, ak)) = (fn k => Imp (aj, ak)) - | a_beta (Iff (al, am)) = (fn k => Iff (al, am)) - | a_beta (E an) = (fn k => E an) - | a_beta (A ao) = (fn k => A ao) - | a_beta (Closed ap) = (fn k => Closed ap) - | a_beta (NClosed aq) = (fn k => NClosed aq) + | a_beta T = (fn _ => T) + | a_beta F = (fn _ => F) + | a_beta (Lt (C bo)) = (fn _ => Lt (C bo)) + | a_beta (Lt (Bound bp)) = (fn _ => Lt (Bound bp)) + | a_beta (Lt (Neg bt)) = (fn _ => Lt (Neg bt)) + | a_beta (Lt (Add (bu, bv))) = (fn _ => Lt (Add (bu, bv))) + | a_beta (Lt (Sub (bw, bx))) = (fn _ => Lt (Sub (bw, bx))) + | a_beta (Lt (Mul (by, bz))) = (fn _ => Lt (Mul (by, bz))) + | a_beta (Le (C co)) = (fn _ => Le (C co)) + | a_beta (Le (Bound cp)) = (fn _ => Le (Bound cp)) + | a_beta (Le (Neg ct)) = (fn _ => Le (Neg ct)) + | a_beta (Le (Add (cu, cv))) = (fn _ => Le (Add (cu, cv))) + | a_beta (Le (Sub (cw, cx))) = (fn _ => Le (Sub (cw, cx))) + | a_beta (Le (Mul (cy, cz))) = (fn _ => Le (Mul (cy, cz))) + | a_beta (Gt (C doa)) = (fn _ => Gt (C doa)) + | a_beta (Gt (Bound dp)) = (fn _ => Gt (Bound dp)) + | a_beta (Gt (Neg dt)) = (fn _ => Gt (Neg dt)) + | a_beta (Gt (Add (du, dv))) = (fn _ => Gt (Add (du, dv))) + | a_beta (Gt (Sub (dw, dx))) = (fn _ => Gt (Sub (dw, dx))) + | a_beta (Gt (Mul (dy, dz))) = (fn _ => Gt (Mul (dy, dz))) + | a_beta (Ge (C eo)) = (fn _ => Ge (C eo)) + | a_beta (Ge (Bound ep)) = (fn _ => Ge (Bound ep)) + | a_beta (Ge (Neg et)) = (fn _ => Ge (Neg et)) + | a_beta (Ge (Add (eu, ev))) = (fn _ => Ge (Add (eu, ev))) + | a_beta (Ge (Sub (ew, ex))) = (fn _ => Ge (Sub (ew, ex))) + | a_beta (Ge (Mul (ey, ez))) = (fn _ => Ge (Mul (ey, ez))) + | a_beta (Eq (C fo)) = (fn _ => Eq (C fo)) + | a_beta (Eq (Bound fp)) = (fn _ => Eq (Bound fp)) + | a_beta (Eq (Neg ft)) = (fn _ => Eq (Neg ft)) + | a_beta (Eq (Add (fu, fv))) = (fn _ => Eq (Add (fu, fv))) + | a_beta (Eq (Sub (fw, fx))) = (fn _ => Eq (Sub (fw, fx))) + | a_beta (Eq (Mul (fy, fz))) = (fn _ => Eq (Mul (fy, fz))) + | a_beta (NEq (C go)) = (fn _ => NEq (C go)) + | a_beta (NEq (Bound gp)) = (fn _ => NEq (Bound gp)) + | a_beta (NEq (Neg gt)) = (fn _ => NEq (Neg gt)) + | a_beta (NEq (Add (gu, gv))) = (fn _ => NEq (Add (gu, gv))) + | a_beta (NEq (Sub (gw, gx))) = (fn _ => NEq (Sub (gw, gx))) + | a_beta (NEq (Mul (gy, gz))) = (fn _ => NEq (Mul (gy, gz))) + | a_beta (Dvd (aa, C ho)) = (fn _ => Dvd (aa, C ho)) + | a_beta (Dvd (aa, Bound hp)) = (fn _ => Dvd (aa, Bound hp)) + | a_beta (Dvd (aa, Neg ht)) = (fn _ => Dvd (aa, Neg ht)) + | a_beta (Dvd (aa, Add (hu, hv))) = (fn _ => Dvd (aa, Add (hu, hv))) + | a_beta (Dvd (aa, Sub (hw, hx))) = (fn _ => Dvd (aa, Sub (hw, hx))) + | a_beta (Dvd (aa, Mul (hy, hz))) = (fn _ => Dvd (aa, Mul (hy, hz))) + | a_beta (NDvd (ac, C io)) = (fn _ => NDvd (ac, C io)) + | a_beta (NDvd (ac, Bound ip)) = (fn _ => NDvd (ac, Bound ip)) + | a_beta (NDvd (ac, Neg it)) = (fn _ => NDvd (ac, Neg it)) + | a_beta (NDvd (ac, Add (iu, iv))) = (fn _ => NDvd (ac, Add (iu, iv))) + | a_beta (NDvd (ac, Sub (iw, ix))) = (fn _ => NDvd (ac, Sub (iw, ix))) + | a_beta (NDvd (ac, Mul (iy, iz))) = (fn _ => NDvd (ac, Mul (iy, iz))) + | a_beta (Not ae) = (fn _ => Not ae) + | a_beta (Imp (aj, ak)) = (fn _ => Imp (aj, ak)) + | a_beta (Iff (al, am)) = (fn _ => Iff (al, am)) + | a_beta (E an) = (fn _ => E an) + | a_beta (A ao) = (fn _ => A ao) + | a_beta (Closed ap) = (fn _ => Closed ap) + | a_beta (NClosed aq) = (fn _ => NClosed aq) | a_beta (Lt (Cn (cm, c, e))) = - (if eqop eq_nat cm 0 - then (fn k => Lt (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Lt (Cn (suc (minus_nat cm 1), c, e)))) + (if ((cm : IntInf.int) = (0 : IntInf.int)) + then (fn k => + Lt (Cn ((0 : IntInf.int), (1 : IntInf.int), Mul (div_int k c, e)))) + else (fn _ => Lt (Cn (suc (minus_nat cm (1 : IntInf.int)), c, e)))) | a_beta (Le (Cn (dm, c, e))) = - (if eqop eq_nat dm 0 - then (fn k => Le (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Le (Cn (suc (minus_nat dm 1), c, e)))) + (if ((dm : IntInf.int) = (0 : IntInf.int)) + then (fn k => + Le (Cn ((0 : IntInf.int), (1 : IntInf.int), Mul (div_int k c, e)))) + else (fn _ => Le (Cn (suc (minus_nat dm (1 : IntInf.int)), c, e)))) | a_beta (Gt (Cn (em, c, e))) = - (if eqop eq_nat em 0 - then (fn k => Gt (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Gt (Cn (suc (minus_nat em 1), c, e)))) + (if ((em : IntInf.int) = (0 : IntInf.int)) + then (fn k => + Gt (Cn ((0 : IntInf.int), (1 : IntInf.int), Mul (div_int k c, e)))) + else (fn _ => Gt (Cn (suc (minus_nat em (1 : IntInf.int)), c, e)))) | a_beta (Ge (Cn (fm, c, e))) = - (if eqop eq_nat fm 0 - then (fn k => Ge (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Ge (Cn (suc (minus_nat fm 1), c, e)))) + (if ((fm : IntInf.int) = (0 : IntInf.int)) + then (fn k => + Ge (Cn ((0 : IntInf.int), (1 : IntInf.int), Mul (div_int k c, e)))) + else (fn _ => Ge (Cn (suc (minus_nat fm (1 : IntInf.int)), c, e)))) | a_beta (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 - then (fn k => Eq (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Eq (Cn (suc (minus_nat gm 1), c, e)))) + (if ((gm : IntInf.int) = (0 : IntInf.int)) + then (fn k => + Eq (Cn ((0 : IntInf.int), (1 : IntInf.int), Mul (div_int k c, e)))) + else (fn _ => Eq (Cn (suc (minus_nat gm (1 : IntInf.int)), c, e)))) | a_beta (NEq (Cn (hm, c, e))) = - (if eqop eq_nat hm 0 - then (fn k => NEq (Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => NEq (Cn (suc (minus_nat hm 1), c, e)))) + (if ((hm : IntInf.int) = (0 : IntInf.int)) + then (fn k => + NEq (Cn ((0 : IntInf.int), (1 : IntInf.int), + Mul (div_int k c, e)))) + else (fn _ => NEq (Cn (suc (minus_nat hm (1 : IntInf.int)), c, e)))) | a_beta (Dvd (i, Cn (im, c, e))) = - (if eqop eq_nat im 0 + (if ((im : IntInf.int) = (0 : IntInf.int)) then (fn k => Dvd (IntInf.* (div_int k c, i), - Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => Dvd (i, Cn (suc (minus_nat im 1), c, e)))) + Cn ((0 : IntInf.int), (1 : IntInf.int), + Mul (div_int k c, e)))) + else (fn _ => Dvd (i, Cn (suc (minus_nat im (1 : IntInf.int)), c, e)))) | a_beta (NDvd (i, Cn (jm, c, e))) = - (if eqop eq_nat jm 0 + (if ((jm : IntInf.int) = (0 : IntInf.int)) then (fn k => NDvd (IntInf.* (div_int k c, i), - Cn (0, (1 : IntInf.int), Mul (div_int k c, e)))) - else (fn k => NDvd (i, Cn (suc (minus_nat jm 1), c, e)))); + Cn ((0 : IntInf.int), (1 : IntInf.int), + Mul (div_int k c, e)))) + else (fn _ => NDvd (i, Cn (suc (minus_nat jm (1 : IntInf.int)), c, e)))); -fun zeta (And (p, q)) = zlcm (zeta p) (zeta q) - | zeta (Or (p, q)) = zlcm (zeta p) (zeta q) +fun zeta (And (p, q)) = lcm_int (zeta p) (zeta q) + | zeta (Or (p, q)) = lcm_int (zeta p) (zeta q) | zeta T = (1 : IntInf.int) | zeta F = (1 : IntInf.int) | zeta (Lt (C bo)) = (1 : IntInf.int) @@ -1367,64 +1960,59 @@ | zeta (Closed ap) = (1 : IntInf.int) | zeta (NClosed aq) = (1 : IntInf.int) | zeta (Lt (Cn (cm, c, e))) = - (if eqop eq_nat cm 0 then c else (1 : IntInf.int)) + (if ((cm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (Le (Cn (dm, c, e))) = - (if eqop eq_nat dm 0 then c else (1 : IntInf.int)) + (if ((dm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (Gt (Cn (em, c, e))) = - (if eqop eq_nat em 0 then c else (1 : IntInf.int)) + (if ((em : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (Ge (Cn (fm, c, e))) = - (if eqop eq_nat fm 0 then c else (1 : IntInf.int)) + (if ((fm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (Eq (Cn (gm, c, e))) = - (if eqop eq_nat gm 0 then c else (1 : IntInf.int)) + (if ((gm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (NEq (Cn (hm, c, e))) = - (if eqop eq_nat hm 0 then c else (1 : IntInf.int)) + (if ((hm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (Dvd (i, Cn (im, c, e))) = - (if eqop eq_nat im 0 then c else (1 : IntInf.int)) + (if ((im : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)) | zeta (NDvd (i, Cn (jm, c, e))) = - (if eqop eq_nat jm 0 then c else (1 : IntInf.int)); + (if ((jm : IntInf.int) = (0 : IntInf.int)) then c else (1 : IntInf.int)); fun zsplit0 (C c) = ((0 : IntInf.int), C c) | zsplit0 (Bound n) = - (if eqop eq_nat n 0 then ((1 : IntInf.int), C (0 : IntInf.int)) + (if ((n : IntInf.int) = (0 : IntInf.int)) + then ((1 : IntInf.int), C (0 : IntInf.int)) else ((0 : IntInf.int), Bound n)) | zsplit0 (Cn (n, i, a)) = let - val aa = zsplit0 a; - val (i', a') = aa; + val (ia, aa) = zsplit0 a; in - (if eqop eq_nat n 0 then (IntInf.+ (i, i'), a') else (i', Cn (n, i, a'))) + (if ((n : IntInf.int) = (0 : IntInf.int)) then (IntInf.+ (i, ia), aa) + else (ia, Cn (n, i, aa))) end | zsplit0 (Neg a) = let - val aa = zsplit0 a; - val (i', a') = aa; + val (i, aa) = zsplit0 a; in - (IntInf.~ i', Neg a') + (IntInf.~ i, Neg aa) end | zsplit0 (Add (a, b)) = let - val aa = zsplit0 a; - val (ia, a') = aa; - val ab = zsplit0 b; - val (ib, b') = ab; + val (ia, aa) = zsplit0 a; + val (ib, ba) = zsplit0 b; in - (IntInf.+ (ia, ib), Add (a', b')) + (IntInf.+ (ia, ib), Add (aa, ba)) end | zsplit0 (Sub (a, b)) = let - val aa = zsplit0 a; - val (ia, a') = aa; - val ab = zsplit0 b; - val (ib, b') = ab; + val (ia, aa) = zsplit0 a; + val (ib, ba) = zsplit0 b; in - (IntInf.- (ia, ib), Sub (a', b')) + (IntInf.- (ia, ib), Sub (aa, ba)) end | zsplit0 (Mul (i, a)) = let - val aa = zsplit0 a; - val (i', a') = aa; + val (ia, aa) = zsplit0 a; in - (IntInf.* (i, i'), Mul (i, a')) + (IntInf.* (i, ia), Mul (i, aa)) end; fun zlfm (And (p, q)) = And (zlfm p, zlfm q) @@ -1434,79 +2022,79 @@ Or (And (zlfm p, zlfm q), And (zlfm (Not p), zlfm (Not q))) | zlfm (Lt a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Lt r - else (if IntInf.< ((0 : IntInf.int), c) then Lt (Cn (0, c, r)) - else Gt (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Lt r + else (if IntInf.< ((0 : IntInf.int), c) + then Lt (Cn ((0 : IntInf.int), c, r)) + else Gt (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (Le a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Le r - else (if IntInf.< ((0 : IntInf.int), c) then Le (Cn (0, c, r)) - else Ge (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Le r + else (if IntInf.< ((0 : IntInf.int), c) + then Le (Cn ((0 : IntInf.int), c, r)) + else Ge (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (Gt a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Gt r - else (if IntInf.< ((0 : IntInf.int), c) then Gt (Cn (0, c, r)) - else Lt (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Gt r + else (if IntInf.< ((0 : IntInf.int), c) + then Gt (Cn ((0 : IntInf.int), c, r)) + else Lt (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (Ge a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Ge r - else (if IntInf.< ((0 : IntInf.int), c) then Ge (Cn (0, c, r)) - else Le (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Ge r + else (if IntInf.< ((0 : IntInf.int), c) + then Ge (Cn ((0 : IntInf.int), c, r)) + else Le (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (Eq a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Eq r - else (if IntInf.< ((0 : IntInf.int), c) then Eq (Cn (0, c, r)) - else Eq (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Eq r + else (if IntInf.< ((0 : IntInf.int), c) + then Eq (Cn ((0 : IntInf.int), c, r)) + else Eq (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (NEq a) = let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then NEq r - else (if IntInf.< ((0 : IntInf.int), c) then NEq (Cn (0, c, r)) - else NEq (Cn (0, IntInf.~ c, Neg r)))) + (if ((c : IntInf.int) = (0 : IntInf.int)) then NEq r + else (if IntInf.< ((0 : IntInf.int), c) + then NEq (Cn ((0 : IntInf.int), c, r)) + else NEq (Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end | zlfm (Dvd (i, a)) = - (if eqop eq_int i (0 : IntInf.int) then zlfm (Eq a) + (if ((i : IntInf.int) = (0 : IntInf.int)) then zlfm (Eq a) else let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then Dvd (abs_int i, r) + (if ((c : IntInf.int) = (0 : IntInf.int)) then Dvd (abs_int i, r) else (if IntInf.< ((0 : IntInf.int), c) - then Dvd (abs_int i, Cn (0, c, r)) - else Dvd (abs_int i, Cn (0, IntInf.~ c, Neg r)))) + then Dvd (abs_int i, Cn ((0 : IntInf.int), c, r)) + else Dvd (abs_int i, + Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end) | zlfm (NDvd (i, a)) = - (if eqop eq_int i (0 : IntInf.int) then zlfm (NEq a) + (if ((i : IntInf.int) = (0 : IntInf.int)) then zlfm (NEq a) else let - val aa = zsplit0 a; - val (c, r) = aa; + val (c, r) = zsplit0 a; in - (if eqop eq_int c (0 : IntInf.int) then NDvd (abs_int i, r) + (if ((c : IntInf.int) = (0 : IntInf.int)) then NDvd (abs_int i, r) else (if IntInf.< ((0 : IntInf.int), c) - then NDvd (abs_int i, Cn (0, c, r)) - else NDvd (abs_int i, Cn (0, IntInf.~ c, Neg r)))) + then NDvd (abs_int i, Cn ((0 : IntInf.int), c, r)) + else NDvd (abs_int i, + Cn ((0 : IntInf.int), IntInf.~ c, Neg r)))) end) | zlfm (Not (And (p, q))) = Or (zlfm (Not p), zlfm (Not q)) | zlfm (Not (Or (p, q))) = And (zlfm (Not p), zlfm (Not q)) @@ -1537,10 +2125,11 @@ fun unita p = let - val p' = zlfm p; - val l = zeta p'; + val pa = zlfm p; + val l = zeta pa; val q = - And (Dvd (l, Cn (0, (1 : IntInf.int), C (0 : IntInf.int))), a_beta p' l); + And (Dvd (l, Cn ((0 : IntInf.int), (1 : IntInf.int), C (0 : IntInf.int))), + a_beta pa l); val d = delta q; val b = remdups eq_numa (map simpnum (beta q)); val a = remdups eq_numa (map simpnum (alpha q)); @@ -1551,18 +2140,16 @@ fun cooper p = let - val a = unita p; - val (q, aa) = a; - val (b, d) = aa; + val (q, (b, d)) = unita p; val js = iupt (1 : IntInf.int) d; val mq = simpfm (minusinf q); val md = evaldjf (fn j => simpfm (subst0 (C j) mq)) js; in - (if eqop eq_fma md T then T + (if eq_fm md T then T else let val qd = - evaldjf (fn ab as (ba, j) => simpfm (subst0 (Add (ba, C j)) q)) - (concat (map (fn ba => map (fn ab => (ba, ab)) js) b)); + evaldjf (fn (ba, j) => simpfm (subst0 (Add (ba, C j)) q)) + (concat_map (fn ba => map (fn a => (ba, a)) js) b); in decr (disj md qd) end) @@ -1669,37 +2256,19 @@ | qelim (Or (p, q)) = (fn qe => disj (qelim p qe) (qelim q qe)) | qelim (Imp (p, q)) = (fn qe => impa (qelim p qe) (qelim q qe)) | qelim (Iff (p, q)) = (fn qe => iffa (qelim p qe) (qelim q qe)) - | qelim T = (fn y => simpfm T) - | qelim F = (fn y => simpfm F) - | qelim (Lt u) = (fn y => simpfm (Lt u)) - | qelim (Le v) = (fn y => simpfm (Le v)) - | qelim (Gt w) = (fn y => simpfm (Gt w)) - | qelim (Ge x) = (fn y => simpfm (Ge x)) - | qelim (Eq y) = (fn ya => simpfm (Eq y)) - | qelim (NEq z) = (fn y => simpfm (NEq z)) - | qelim (Dvd (aa, ab)) = (fn y => simpfm (Dvd (aa, ab))) - | qelim (NDvd (ac, ad)) = (fn y => simpfm (NDvd (ac, ad))) - | qelim (Closed ap) = (fn y => simpfm (Closed ap)) - | qelim (NClosed aq) = (fn y => simpfm (NClosed aq)); + | qelim T = (fn _ => simpfm T) + | qelim F = (fn _ => simpfm F) + | qelim (Lt u) = (fn _ => simpfm (Lt u)) + | qelim (Le v) = (fn _ => simpfm (Le v)) + | qelim (Gt w) = (fn _ => simpfm (Gt w)) + | qelim (Ge x) = (fn _ => simpfm (Ge x)) + | qelim (Eq y) = (fn _ => simpfm (Eq y)) + | qelim (NEq z) = (fn _ => simpfm (NEq z)) + | qelim (Dvd (aa, ab)) = (fn _ => simpfm (Dvd (aa, ab))) + | qelim (NDvd (ac, ad)) = (fn _ => simpfm (NDvd (ac, ad))) + | qelim (Closed ap) = (fn _ => simpfm (Closed ap)) + | qelim (NClosed aq) = (fn _ => simpfm (NClosed aq)); fun pa p = qelim (prep p) cooper; -fun neg z = IntInf.< (z, (0 : IntInf.int)); - -fun nat_aux i n = - (if IntInf.<= (i, (0 : IntInf.int)) then n - else nat_aux (IntInf.- (i, (1 : IntInf.int))) (suc n)); - -fun adjust b = - (fn a as (q, r) => - (if IntInf.<= ((0 : IntInf.int), IntInf.- (r, b)) - then (IntInf.+ (IntInf.* ((2 : IntInf.int), q), (1 : IntInf.int)), - IntInf.- (r, b)) - else (IntInf.* ((2 : IntInf.int), q), r))); - -fun posDivAlg a b = - (if IntInf.< (a, b) orelse IntInf.<= (b, (0 : IntInf.int)) - then ((0 : IntInf.int), a) - else adjust b (posDivAlg a (IntInf.* ((2 : IntInf.int), b)))); - -end; (*struct GeneratedCooper*) +end; (*struct Generated_Cooper*) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Quotient/quotient_tacs.ML --- a/src/HOL/Tools/Quotient/quotient_tacs.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Quotient/quotient_tacs.ML Tue May 04 20:30:22 2010 +0200 @@ -48,7 +48,7 @@ fun atomize_thm thm = let - val thm' = Thm.freezeT (forall_intr_vars thm) (* FIXME/TODO: is this proper Isar-technology? *) + val thm' = Thm.legacy_freezeT (forall_intr_vars thm) (* FIXME/TODO: is this proper Isar-technology? *) val thm'' = Object_Logic.atomize (cprop_of thm') in @{thm equal_elim_rule1} OF [thm'', thm'] diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Quotient/quotient_typ.ML --- a/src/HOL/Tools/Quotient/quotient_typ.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Quotient/quotient_typ.ML Tue May 04 20:30:22 2010 +0200 @@ -45,7 +45,7 @@ val goals' = map (rpair []) goals fun after_qed' thms = after_qed (the_single thms) in - Proof.theorem_i NONE after_qed' [goals'] ctxt + Proof.theorem NONE after_qed' [goals'] ctxt end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/metis_tactics.ML --- a/src/HOL/Tools/Sledgehammer/metis_tactics.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/metis_tactics.ML Tue May 04 20:30:22 2010 +0200 @@ -123,14 +123,16 @@ in (map (hol_literal_to_fol mode) lits, types_sorts) end; (*Sign should be "true" for conjecture type constraints, "false" for type lits in clauses.*) -fun metis_of_typeLit pos (LTVar (s,x)) = metis_lit pos s [Metis.Term.Var x] - | metis_of_typeLit pos (LTFree (s,x)) = metis_lit pos s [Metis.Term.Fn(x,[])]; +fun metis_of_type_literals pos (TyLitVar (s, (s', _))) = + metis_lit pos s [Metis.Term.Var s'] + | metis_of_type_literals pos (TyLitFree (s, (s', _))) = + metis_lit pos s [Metis.Term.Fn (s',[])] fun default_sort _ (TVar _) = false | default_sort ctxt (TFree (x, s)) = (s = the_default [] (Variable.def_sort ctxt (x, ~1))); fun metis_of_tfree tf = - Metis.Thm.axiom (Metis.LiteralSet.singleton (metis_of_typeLit true tf)); + Metis.Thm.axiom (Metis.LiteralSet.singleton (metis_of_type_literals true tf)); fun hol_thm_to_fol is_conjecture ctxt mode th = let val thy = ProofContext.theory_of ctxt @@ -138,11 +140,12 @@ (literals_of_hol_thm thy mode o HOLogic.dest_Trueprop o prop_of) th in if is_conjecture then - (Metis.Thm.axiom (Metis.LiteralSet.fromList mlits), add_typs types_sorts) + (Metis.Thm.axiom (Metis.LiteralSet.fromList mlits), + add_type_literals types_sorts) else - let val tylits = add_typs (filter (not o default_sort ctxt) types_sorts) + let val tylits = add_type_literals (filter (not o default_sort ctxt) types_sorts) val mtylits = if Config.get ctxt type_lits - then map (metis_of_typeLit false) tylits else [] + then map (metis_of_type_literals false) tylits else [] in (Metis.Thm.axiom (Metis.LiteralSet.fromList(mtylits @ mlits)), []) end @@ -598,7 +601,9 @@ (*Extract TFree constraints from context to include as conjecture clauses*) fun init_tfrees ctxt = let fun add ((a,i),s) Ts = if i = ~1 then TFree(a,s) :: Ts else Ts - in add_typs (Vartab.fold add (#2 (Variable.constraints_of ctxt)) []) end; + in + add_type_literals (Vartab.fold add (#2 (Variable.constraints_of ctxt)) []) + end; (*transform isabelle type / arity clause to metis clause *) fun add_type_thm [] lmap = lmap @@ -669,7 +674,7 @@ val (mode, {axioms,tfrees}) = build_map mode ctxt cls ths val _ = if null tfrees then () else (trace_msg (fn () => "TFREE CLAUSES"); - app (fn tf => trace_msg (fn _ => tptp_of_typeLit true tf)) tfrees) + app (fn tf => trace_msg (fn _ => tptp_of_type_literal true tf NONE |> fst)) tfrees) val _ = trace_msg (fn () => "CLAUSES GIVEN TO METIS") val thms = map #1 axioms val _ = app (fn th => trace_msg (fn () => Metis.Thm.toString th)) thms @@ -693,10 +698,11 @@ val unused = th_cls_pairs |> map_filter (fn (name, cls) => if common_thm used cls then NONE else SOME name) in - if not (common_thm used cls) then - warning "Metis: The goal is provable because the context is \ - \inconsistent." - else if not (null unused) then + if not (null cls) andalso not (common_thm used cls) then + warning "Metis: The assumptions are inconsistent." + else + (); + if not (null unused) then warning ("Metis: Unused theorems: " ^ commas_quote unused ^ ".") else @@ -720,7 +726,7 @@ if exists_type type_has_topsort (prop_of st0) then raise METIS "Metis: Proof state contains the universal sort {}" else - (Meson.MESON neg_clausify + (Meson.MESON (maps neg_clausify) (fn cls => resolve_tac (FOL_SOLVE mode ctxt cls ths) 1) ctxt i THEN Meson_Tactic.expand_defs_tac st0) st0 end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_fact_filter.ML Author: Jia Meng, Cambridge University Computer Laboratory, NICTA + Author: Jasmin Blanchette, TU Muenchen *) signature SLEDGEHAMMER_FACT_FILTER = @@ -9,6 +10,7 @@ type axiom_name = Sledgehammer_HOL_Clause.axiom_name type hol_clause = Sledgehammer_HOL_Clause.hol_clause type hol_clause_id = Sledgehammer_HOL_Clause.hol_clause_id + type relevance_override = {add: Facts.ref list, del: Facts.ref list, @@ -18,15 +20,15 @@ val tfree_classes_of_terms : term list -> string list val type_consts_of_terms : theory -> term list -> string list val get_relevant_facts : - bool -> real -> real -> bool option -> bool -> int -> bool - -> relevance_override -> Proof.context * (thm list * 'a) -> thm list + bool -> real -> real -> bool -> int -> bool -> relevance_override + -> Proof.context * (thm list * 'a) -> thm list -> (thm * (string * int)) list - val prepare_clauses : bool option -> bool -> thm list -> thm list -> - (thm * (axiom_name * hol_clause_id)) list -> - (thm * (axiom_name * hol_clause_id)) list -> theory -> - axiom_name vector * - (hol_clause list * hol_clause list * hol_clause list * - hol_clause list * classrel_clause list * arity_clause list) + val prepare_clauses : + bool -> thm list -> thm list -> (thm * (axiom_name * hol_clause_id)) list + -> (thm * (axiom_name * hol_clause_id)) list -> theory + -> axiom_name vector + * (hol_clause list * hol_clause list * hol_clause list * + hol_clause list * classrel_clause list * arity_clause list) end; structure Sledgehammer_Fact_Filter : SLEDGEHAMMER_FACT_FILTER = @@ -390,13 +392,14 @@ fun is_multi (a, ths) = length ths > 1 orelse String.isSuffix ".axioms" a; -(*The single theorems go BEFORE the multiple ones. Blacklist is applied to all.*) +(* The single-name theorems go after the multiple-name ones, so that single + names are preferred when both are available. *) fun name_thm_pairs respect_no_atp ctxt = let val (mults, singles) = List.partition is_multi (all_valid_thms respect_no_atp ctxt) - val ps = [] |> fold add_multi_names mults - |> fold add_single_names singles + val ps = [] |> fold add_single_names singles + |> fold add_multi_names mults in ps |> respect_no_atp ? filter_out (No_ATPs.member ctxt o snd) end; fun check_named ("", th) = @@ -499,13 +502,10 @@ likely to lead to unsound proofs.*) fun remove_unwanted_clauses cls = filter (not o unwanted o prop_of o fst) cls; -fun is_first_order thy higher_order goal_cls = - case higher_order of - NONE => forall (Meson.is_fol_term thy) (map prop_of goal_cls) - | SOME b => not b +fun is_first_order thy = forall (Meson.is_fol_term thy) o map prop_of fun get_relevant_facts respect_no_atp relevance_threshold convergence - higher_order follow_defs max_new theory_relevant + follow_defs max_new theory_relevant (relevance_override as {add, only, ...}) (ctxt, (chain_ths, th)) goal_cls = if (only andalso null add) orelse relevance_threshold > 1.0 then @@ -513,7 +513,7 @@ else let val thy = ProofContext.theory_of ctxt - val is_FO = is_first_order thy higher_order goal_cls + val is_FO = is_first_order thy goal_cls val included_cls = get_all_lemmas respect_no_atp ctxt |> cnf_rules_pairs thy |> make_unique |> restrict_to_logic thy is_FO @@ -526,7 +526,7 @@ (* prepare for passing to writer, create additional clauses based on the information from extra_cls *) -fun prepare_clauses higher_order dfg goal_cls chain_ths axcls extra_cls thy = +fun prepare_clauses dfg goal_cls chain_ths axcls extra_cls thy = let (* add chain thms *) val chain_cls = @@ -534,7 +534,7 @@ (map (`Thm.get_name_hint) chain_ths)) val axcls = chain_cls @ axcls val extra_cls = chain_cls @ extra_cls - val is_FO = is_first_order thy higher_order goal_cls + val is_FO = is_first_order thy goal_cls val ccls = subtract_cls extra_cls goal_cls val _ = app (fn th => trace_msg (fn _ => Display.string_of_thm_global thy th)) ccls val ccltms = map prop_of ccls diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_fact_minimizer.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_minimizer.ML Tue May 04 20:30:22 2010 +0200 @@ -0,0 +1,131 @@ +(* Title: HOL/Tools/Sledgehammer/sledgehammer_fact_minimizer.ML + Author: Philipp Meyer, TU Muenchen + Author: Jasmin Blanchette, TU Muenchen + +Minimization of theorem list for Metis using automatic theorem provers. +*) + +signature SLEDGEHAMMER_FACT_MINIMIZER = +sig + type params = ATP_Manager.params + type prover_result = ATP_Manager.prover_result + + val minimize_theorems : + params -> int -> int -> Proof.state -> (string * thm list) list + -> (string * thm list) list option * string +end; + +structure Sledgehammer_Fact_Minimizer : SLEDGEHAMMER_FACT_MINIMIZER = +struct + +open Sledgehammer_Util +open Sledgehammer_Fact_Preprocessor +open Sledgehammer_Proof_Reconstruct +open ATP_Manager + +(* Linear minimization algorithm *) + +fun linear_minimize test s = + let + fun aux [] p = p + | aux (x :: xs) (needed, result) = + case test (xs @ needed) of + SOME result => aux xs (needed, result) + | NONE => aux xs (x :: needed, result) + in aux s end + + +(* wrapper for calling external prover *) + +fun string_for_failure Unprovable = "Unprovable." + | string_for_failure TimedOut = "Timed out." + | string_for_failure OutOfResources = "Failed." + | string_for_failure OldSpass = "Error." + | string_for_failure MalformedOutput = "Error." + | string_for_failure UnknownError = "Failed." +fun string_for_outcome NONE = "Success." + | string_for_outcome (SOME failure) = string_for_failure failure + +fun sledgehammer_test_theorems (params as {full_types, ...} : params) prover + timeout subgoal state filtered_clauses name_thms_pairs = + let + val num_theorems = length name_thms_pairs + val _ = priority ("Testing " ^ string_of_int num_theorems ^ + " theorem" ^ plural_s num_theorems ^ "...") + val name_thm_pairs = maps (fn (n, ths) => map (pair n) ths) name_thms_pairs + val axclauses = cnf_rules_pairs (Proof.theory_of state) name_thm_pairs + val {context = ctxt, facts, goal} = Proof.goal state + val problem = + {subgoal = subgoal, goal = (ctxt, (facts, goal)), + relevance_override = {add = [], del = [], only = false}, + axiom_clauses = SOME axclauses, + filtered_clauses = SOME (the_default axclauses filtered_clauses)} + in + prover params (K "") timeout problem + |> tap (fn result : prover_result => + priority (string_for_outcome (#outcome result))) + end + +(* minimalization of thms *) + +fun minimize_theorems (params as {debug, atps, minimize_timeout, isar_proof, + shrink_factor, ...}) + i n state name_thms_pairs = + let + val thy = Proof.theory_of state + val prover = case atps of + [atp_name] => get_prover thy atp_name + | _ => error "Expected a single ATP." + val msecs = Time.toMilliseconds minimize_timeout + val _ = + priority ("Sledgehammer minimizer: ATP " ^ quote (the_single atps) ^ + " with a time limit of " ^ string_of_int msecs ^ " ms.") + val test_thms_fun = + sledgehammer_test_theorems params prover minimize_timeout i state + fun test_thms filtered thms = + case test_thms_fun filtered thms of + (result as {outcome = NONE, ...}) => SOME result + | _ => NONE + + val {context = ctxt, facts, goal} = Proof.goal state; + in + (* try prove first to check result and get used theorems *) + (case test_thms_fun NONE name_thms_pairs of + result as {outcome = NONE, pool, internal_thm_names, conjecture_shape, + filtered_clauses, ...} => + let + val used = internal_thm_names |> Vector.foldr (op ::) [] + |> sort_distinct string_ord + val to_use = + if length used < length name_thms_pairs then + filter (fn (name1, _) => exists (curry (op =) name1) used) + name_thms_pairs + else name_thms_pairs + val (min_thms, {proof, internal_thm_names, ...}) = + linear_minimize (test_thms (SOME filtered_clauses)) to_use + ([], result) + val m = length min_thms + val _ = priority (cat_lines + ["Minimized: " ^ string_of_int m ^ " theorem" ^ plural_s m] ^ ".") + in + (SOME min_thms, + proof_text isar_proof + (pool, debug, shrink_factor, ctxt, conjecture_shape) + (K "", proof, internal_thm_names, goal, i) |> fst) + end + | {outcome = SOME TimedOut, ...} => + (NONE, "Timeout: You can increase the time limit using the \"timeout\" \ + \option (e.g., \"timeout = " ^ + string_of_int (10 + msecs div 1000) ^ " s\").") + | {outcome = SOME UnknownError, ...} => + (* Failure sometimes mean timeout, unfortunately. *) + (NONE, "Failure: No proof was found with the current time limit. You \ + \can increase the time limit using the \"timeout\" \ + \option (e.g., \"timeout = " ^ + string_of_int (10 + msecs div 1000) ^ " s\").") + | {message, ...} => (NONE, "ATP error: " ^ message)) + handle Sledgehammer_HOL_Clause.TRIVIAL => (SOME [], metis_line i n []) + | ERROR msg => (NONE, "Error: " ^ msg) + end + +end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_fact_preprocessor.ML Author: Jia Meng, Cambridge University Computer Laboratory + Author: Jasmin Blanchette, TU Muenchen Transformation of axiom rules (elim/intro/etc) into CNF forms. *) @@ -9,16 +10,19 @@ val trace: bool Unsynchronized.ref val trace_msg: (unit -> string) -> unit val skolem_prefix: string + val skolem_infix: string val cnf_axiom: theory -> thm -> thm list val multi_base_blacklist: string list val bad_for_atp: thm -> bool val type_has_topsort: typ -> bool val cnf_rules_pairs: theory -> (string * thm) list -> (thm * (string * int)) list - val neg_clausify: thm list -> thm list - val combinators: thm -> thm - val neg_conjecture_clauses: Proof.context -> thm -> int -> thm list * (string * typ) list val suppress_endtheory: bool Unsynchronized.ref (*for emergency use where endtheory causes problems*) + val strip_subgoal : thm -> int -> (string * typ) list * term list * term + val neg_clausify: thm -> thm list + val neg_conjecture_clauses: + Proof.context -> thm -> int -> thm list list * (string * typ) list + val neg_clausify_tac: Proof.context -> int -> tactic val setup: theory -> theory end; @@ -31,6 +35,7 @@ fun trace_msg msg = if !trace then tracing (msg ()) else (); val skolem_prefix = "sko_" +val skolem_infix = "$" fun freeze_thm th = #1 (Drule.legacy_freeze_thaw th); @@ -62,6 +67,13 @@ (**** SKOLEMIZATION BY INFERENCE (lcp) ****) +(*Keep the full complexity of the original name*) +fun flatten_name s = space_implode "_X" (Long_Name.explode s); + +fun skolem_name thm_name nref var_name = + skolem_prefix ^ thm_name ^ "_" ^ Int.toString (Unsynchronized.inc nref) ^ + skolem_infix ^ (if var_name = "" then "g" else flatten_name var_name) + fun rhs_extra_types lhsT rhs = let val lhs_vars = Term.add_tfreesT lhsT [] fun add_new_TFrees (TFree v) = @@ -75,10 +87,10 @@ fun declare_skofuns s th = let val nref = Unsynchronized.ref 0 (* FIXME ??? *) - fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (_, T, p))) (axs, thy) = + fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (s', T, p))) (axs, thy) = (*Existential: declare a Skolem function, then insert into body and continue*) let - val cname = skolem_prefix ^ s ^ "_" ^ Int.toString (Unsynchronized.inc nref) + val cname = skolem_name s nref s' val args0 = OldTerm.term_frees xtp (*get the formal parameter list*) val Ts = map type_of args0 val extraTs = rhs_extra_types (Ts ---> T) xtp @@ -107,13 +119,13 @@ (*Traverse a theorem, accumulating Skolem function definitions.*) fun assume_skofuns s th = let val sko_count = Unsynchronized.ref 0 (* FIXME ??? *) - fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs(_,T,p))) defs = + fun dec_sko (Const (@{const_name Ex}, _) $ (xtp as Abs (s', T, p))) defs = (*Existential: declare a Skolem function, then insert into body and continue*) let val skos = map (#1 o Logic.dest_equals) defs (*existing sko fns*) val args = subtract (op =) skos (OldTerm.term_frees xtp) (*the formal parameters*) val Ts = map type_of args val cT = Ts ---> T - val id = skolem_prefix ^ s ^ "_" ^ Int.toString (Unsynchronized.inc sko_count) + val id = skolem_name s sko_count s' val c = Free (id, cT) val rhs = list_abs_free (map dest_Free args, HOLogic.choice_const T $ xtp) @@ -334,9 +346,6 @@ ["defs", "select_defs", "update_defs", "induct", "inducts", "split", "splits", "split_asm", "cases", "ext_cases"]; -(*Keep the full complexity of the original name*) -fun flatten_name s = space_implode "_X" (Long_Name.explode s); - fun fake_name th = if Thm.has_name_hint th then flatten_name (Thm.get_name_hint th) else gensym "unknown_thm_"; @@ -346,7 +355,7 @@ if member (op =) multi_base_blacklist (Long_Name.base_name s) orelse bad_for_atp th then [] else let - val ctxt0 = Variable.thm_context th + val ctxt0 = Variable.global_thm_context th val (nnfth, ctxt1) = to_nnf th ctxt0 val (cnfs, ctxt2) = Meson.make_cnf (assume_skolem_of_def s nnfth) nnfth ctxt1 in cnfs |> map combinators |> Variable.export ctxt2 ctxt0 |> Meson.finish_cnf end @@ -399,7 +408,7 @@ local fun skolem_def (name, th) thy = - let val ctxt0 = Variable.thm_context th in + let val ctxt0 = Variable.global_thm_context th in (case try (to_nnf th) ctxt0 of NONE => (NONE, thy) | SOME (nnfth, ctxt1) => @@ -455,19 +464,31 @@ lambda_free, but then the individual theory caches become much bigger.*) +fun strip_subgoal goal i = + let + val (t, frees) = Logic.goal_params (prop_of goal) i + val hyp_ts = t |> Logic.strip_assums_hyp |> map (curry subst_bounds frees) + val concl_t = t |> Logic.strip_assums_concl |> curry subst_bounds frees + in (rev (map dest_Free frees), hyp_ts, concl_t) end + (*** Converting a subgoal into negated conjecture clauses. ***) fun neg_skolemize_tac ctxt = EVERY' [rtac ccontr, Object_Logic.atomize_prems_tac, Meson.skolemize_tac ctxt]; +fun neg_skolemize_tac ctxt = + EVERY' [rtac ccontr, Object_Logic.atomize_prems_tac, Meson.skolemize_tac ctxt]; + val neg_clausify = - Meson.make_clauses_unsorted #> map combinators #> Meson.finish_cnf; + single #> Meson.make_clauses_unsorted #> map combinators #> Meson.finish_cnf fun neg_conjecture_clauses ctxt st0 n = let val st = Seq.hd (neg_skolemize_tac ctxt n st0) val ({params, prems, ...}, _) = Subgoal.focus (Variable.set_body false ctxt) n st - in (neg_clausify prems, map (Term.dest_Free o Thm.term_of o #2) params) end; + in + (map neg_clausify prems, map (Term.dest_Free o Thm.term_of o #2) params) + end (*Conversion of a subgoal to conjecture clauses. Each clause has leading !!-bound universal variables, to express generality. *) @@ -479,30 +500,14 @@ [Subgoal.FOCUS (fn {prems, ...} => (Method.insert_tac - (map forall_intr_vars (neg_clausify prems)) i)) ctxt, + (map forall_intr_vars (maps neg_clausify prems)) i)) ctxt, REPEAT_DETERM_N (length ts) o etac thin_rl] i end); -val neg_clausify_setup = - Method.setup @{binding neg_clausify} (Scan.succeed (SIMPLE_METHOD' o neg_clausify_tac)) - "conversion of goal to conjecture clauses"; - - -(** Attribute for converting a theorem into clauses **) - -val clausify_setup = - Attrib.setup @{binding clausify} - (Scan.lift OuterParse.nat >> - (fn i => Thm.rule_attribute (fn context => fn th => - Meson.make_meta_clause (nth (cnf_axiom (Context.theory_of context) th) i)))) - "conversion of theorem to clauses"; - (** setup **) val setup = - neg_clausify_setup #> - clausify_setup #> perhaps saturate_skolem_cache #> Theory.at_end clause_cache_endtheory; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_fol_clause.ML Author: Jia Meng, Cambridge University Computer Laboratory + Author: Jasmin Blanchette, TU Muenchen Storing/printing FOL clauses and arity clauses. Typed equality is treated differently. @@ -43,9 +44,11 @@ TyConstr of name * fol_type list val string_of_fol_type : fol_type -> name_pool option -> string * name_pool option - datatype type_literal = LTVar of string * string | LTFree of string * string + datatype type_literal = + TyLitVar of string * name | + TyLitFree of string * name exception CLAUSE of string * term - val add_typs : typ list -> type_literal list + val add_type_literals : typ list -> type_literal list val get_tvar_strs: typ list -> string list datatype arLit = TConsLit of class * string * string list @@ -67,7 +70,7 @@ arity_clause -> int Symtab.table -> int Symtab.table val init_functab: int Symtab.table val dfg_sign: bool -> string -> string - val dfg_of_typeLit: bool -> type_literal -> string + val dfg_of_type_literal: bool -> type_literal -> string val gen_dfg_cls: int * string * kind * string list * string list * string list -> string val string_of_preds: (string * Int.int) list -> string val string_of_funcs: (string * int) list -> string @@ -78,7 +81,8 @@ val dfg_classrel_clause: classrel_clause -> string val dfg_arity_clause: arity_clause -> string val tptp_sign: bool -> string -> string - val tptp_of_typeLit : bool -> type_literal -> string + val tptp_of_type_literal : + bool -> type_literal -> name_pool option -> string * name_pool option val gen_tptp_cls : int * string * kind * string list * string list -> string val tptp_tfree_clause : string -> string val tptp_arity_clause : arity_clause -> string @@ -106,10 +110,10 @@ fun union_all xss = fold (union (op =)) xss [] -(* Provide readable names for the more common symbolic functions *) +(* Readable names for the more common symbolic functions. Do not mess with the + last six entries of the table unless you know what you are doing. *) val const_trans_table = Symtab.make [(@{const_name "op ="}, "equal"), - (@{const_name Orderings.less_eq}, "lessequals"), (@{const_name "op &"}, "and"), (@{const_name "op |"}, "or"), (@{const_name "op -->"}, "implies"), @@ -119,10 +123,11 @@ (@{const_name COMBK}, "COMBK"), (@{const_name COMBB}, "COMBB"), (@{const_name COMBC}, "COMBC"), - (@{const_name COMBS}, "COMBS")]; + (@{const_name COMBS}, "COMBS")] val type_const_trans_table = - Symtab.make [("*", "prod"), ("+", "sum"), ("~=>", "map")]; + Symtab.make [(@{type_name "*"}, "prod"), + (@{type_name "+"}, "sum")] (*Escaping of special characters. Alphanumeric characters are left unchanged. @@ -171,9 +176,7 @@ fun paren_pack [] = "" (*empty argument list*) | paren_pack strings = "(" ^ commas strings ^ ")"; -(*TSTP format uses (...) rather than the old [...]*) -fun tptp_pack strings = "(" ^ space_implode " | " strings ^ ")"; - +fun tptp_clause strings = "(" ^ space_implode " | " strings ^ ")" (*Remove the initial ' character from a type variable, if it is present*) fun trim_type_var s = @@ -190,11 +193,16 @@ tvar_prefix ^ (ascii_of_indexname (trim_type_var x,i)); fun make_fixed_type_var x = tfree_prefix ^ (ascii_of (trim_type_var x)); -(* HACK because SPASS 3.0 truncates identifiers to 63 characters. (This is - solved in 3.7 and perhaps in earlier versions too.) *) -(* 32-bit hash, so we expect no collisions. *) +val max_dfg_symbol_length = + if is_new_spass_version then 1000000 (* arbitrary large number *) else 63 + +(* HACK because SPASS 3.0 truncates identifiers to 63 characters. *) fun controlled_length dfg s = - if dfg andalso size s > 60 then Word.toString (hashw_string (s, 0w0)) else s; + if dfg andalso size s > max_dfg_symbol_length then + String.extract (s, 0, SOME (max_dfg_symbol_length div 2 - 1)) ^ "__" ^ + String.extract (s, size s - max_dfg_symbol_length div 2 + 1, NONE) + else + s fun lookup_const dfg c = case Symtab.lookup const_trans_table c of @@ -223,9 +231,9 @@ fun empty_name_pool readable_names = if readable_names then SOME (`I Symtab.empty) else NONE +fun pool_fold f xs z = pair z #> fold_rev (fn x => uncurry (f x)) xs fun pool_map f xs = - fold_rev (fn x => fn (ys, pool) => f x pool |>> (fn y => y :: ys)) xs - o pair [] + pool_fold (fn x => fn ys => fn pool => f x pool |>> (fn y => y :: ys)) xs [] fun add_nice_name full_name nice_prefix j the_pool = let @@ -258,7 +266,9 @@ val s' = if s' = "" orelse not (Char.isAlpha (String.sub (s', 0))) then "X" ^ s' else s' - val s' = if s' = "op" then full_name else s' + (* Avoid "equal", since it's built into ATPs; and "op" is very ambiguous + ("op &", "op |", etc.). *) + val s' = if s' = "equal" orelse s' = "op" then full_name else s' in case (Char.isLower (String.sub (full_name, 0)), Char.isLower (String.sub (s', 0))) of @@ -297,8 +307,10 @@ val (ss, pool) = pool_map string_of_fol_type tys pool in (s ^ paren_pack ss, pool) end -(*First string is the type class; the second is a TVar or TFfree*) -datatype type_literal = LTVar of string * string | LTFree of string * string; +(* The first component is the type class; the second is a TVar or TFree. *) +datatype type_literal = + TyLitVar of string * name | + TyLitFree of string * name exception CLAUSE of string * term; @@ -308,21 +320,21 @@ let val sorts = sorts_on_typs_aux ((x,i), ss) in if s = "HOL.type" then sorts - else if i = ~1 then LTFree(make_type_class s, make_fixed_type_var x) :: sorts - else LTVar(make_type_class s, make_schematic_type_var (x,i)) :: sorts + else if i = ~1 then TyLitFree (make_type_class s, `make_fixed_type_var x) :: sorts + else TyLitVar (make_type_class s, (make_schematic_type_var (x,i), x)) :: sorts end; fun sorts_on_typs (TFree (a,s)) = sorts_on_typs_aux ((a,~1),s) | sorts_on_typs (TVar (v,s)) = sorts_on_typs_aux (v,s); -fun pred_of_sort (LTVar (s,ty)) = (s,1) - | pred_of_sort (LTFree (s,ty)) = (s,1) +fun pred_of_sort (TyLitVar (s, _)) = (s, 1) + | pred_of_sort (TyLitFree (s, _)) = (s, 1) (*Given a list of sorted type variables, return a list of type literals.*) -fun add_typs Ts = fold (union (op =)) (map sorts_on_typs Ts) [] +fun add_type_literals Ts = fold (union (op =)) (map sorts_on_typs Ts) [] (*The correct treatment of TFrees like 'a in lemmas (axiom clauses) is not clear. - * Ignoring them leads to unsound proofs, since we do nothing to ensure that 'a + * Ignoring them leads to unsound proofs, since we do nothing to ensure that 'a in a lemma has the same sort as 'a in the conjecture. * Deleting such clauses will lead to problems with locales in other use of local results where 'a is fixed. Probably we should delete clauses unless the sorts agree. @@ -490,8 +502,10 @@ fun dfg_sign true s = s | dfg_sign false s = "not(" ^ s ^ ")" -fun dfg_of_typeLit pos (LTVar (s,ty)) = dfg_sign pos (s ^ "(" ^ ty ^ ")") - | dfg_of_typeLit pos (LTFree (s,ty)) = dfg_sign pos (s ^ "(" ^ ty ^ ")"); +fun dfg_of_type_literal pos (TyLitVar (s, (s', _))) = + dfg_sign pos (s ^ "(" ^ s' ^ ")") + | dfg_of_type_literal pos (TyLitFree (s, (s', _))) = + dfg_sign pos (s ^ "(" ^ s' ^ ")"); (*Enclose the clause body by quantifiers, if necessary*) fun dfg_forall [] body = body @@ -554,21 +568,23 @@ fun tptp_sign true s = s | tptp_sign false s = "~ " ^ s -fun tptp_of_typeLit pos (LTVar (s, ty)) = tptp_sign pos (s ^ "(" ^ ty ^ ")") - | tptp_of_typeLit pos (LTFree (s, ty)) = tptp_sign pos (s ^ "(" ^ ty ^ ")") +fun tptp_of_type_literal pos (TyLitVar (s, name)) = + nice_name name #>> (fn s' => tptp_sign pos (s ^ "(" ^ s' ^ ")")) + | tptp_of_type_literal pos (TyLitFree (s, name)) = + nice_name name #>> (fn s' => tptp_sign pos (s ^ "(" ^ s' ^ ")")) fun tptp_cnf name kind formula = "cnf(" ^ name ^ ", " ^ kind ^ ",\n " ^ formula ^ ").\n" fun gen_tptp_cls (cls_id, ax_name, Axiom, lits, tylits) = tptp_cnf (string_of_clausename (cls_id, ax_name)) "axiom" - (tptp_pack (tylits @ lits)) + (tptp_clause (tylits @ lits)) | gen_tptp_cls (cls_id, ax_name, Conjecture, lits, _) = tptp_cnf (string_of_clausename (cls_id, ax_name)) "negated_conjecture" - (tptp_pack lits) + (tptp_clause lits) fun tptp_tfree_clause tfree_lit = - tptp_cnf "tfree_tcs" "negated_conjecture" (tptp_pack [tfree_lit]) + tptp_cnf "tfree_tcs" "negated_conjecture" (tptp_clause [tfree_lit]) fun tptp_of_arLit (TConsLit (c,t,args)) = tptp_sign true (make_type_class c ^ "(" ^ t ^ paren_pack args ^ ")") @@ -577,11 +593,11 @@ fun tptp_arity_clause (ArityClause{axiom_name,conclLit,premLits,...}) = tptp_cnf (string_of_ar axiom_name) "axiom" - (tptp_pack (map tptp_of_arLit (conclLit :: premLits))) + (tptp_clause (map tptp_of_arLit (conclLit :: premLits))) fun tptp_classrelLits sub sup = let val tvar = "(T)" - in tptp_pack [tptp_sign false (sub^tvar), tptp_sign true (sup^tvar)] end; + in tptp_clause [tptp_sign false (sub^tvar), tptp_sign true (sup^tvar)] end; fun tptp_classrel_clause (ClassrelClause {axiom_name,subclass,superclass,...}) = tptp_cnf axiom_name "axiom" (tptp_classrelLits subclass superclass) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ -(* Title: HOL/Sledgehammer/sledgehammer_hol_clause.ML +(* Title: HOL/Tools/Sledgehammer/sledgehammer_hol_clause.ML Author: Jia Meng, NICTA + Author: Jasmin Blanchette, TU Muenchen FOL clauses translated from HOL formulae. *) @@ -7,6 +8,7 @@ signature SLEDGEHAMMER_HOL_CLAUSE = sig type name = Sledgehammer_FOL_Clause.name + type name_pool = Sledgehammer_FOL_Clause.name_pool type kind = Sledgehammer_FOL_Clause.kind type fol_type = Sledgehammer_FOL_Clause.fol_type type classrel_clause = Sledgehammer_FOL_Clause.classrel_clause @@ -36,10 +38,10 @@ hol_clause list val write_tptp_file : bool -> bool -> bool -> Path.T -> hol_clause list * hol_clause list * hol_clause list * hol_clause list * - classrel_clause list * arity_clause list -> unit + classrel_clause list * arity_clause list -> name_pool option * int val write_dfg_file : bool -> bool -> Path.T -> hol_clause list * hol_clause list * hol_clause list * hol_clause list * - classrel_clause list * arity_clause list -> unit + classrel_clause list * arity_clause list -> name_pool option * int end structure Sledgehammer_HOL_Clause : SLEDGEHAMMER_HOL_CLAUSE = @@ -54,7 +56,7 @@ If "explicit_apply" is false, each function will be directly applied to as many arguments as possible, avoiding use of the "apply" operator. Use of - hBOOL is also minimized. + "hBOOL" is also minimized. *) fun min_arity_of const_min_arity c = the_default 0 (Symtab.lookup const_min_arity c); @@ -300,9 +302,13 @@ (* Given a clause, returns its literals paired with a list of literals concerning TFrees; the latter should only occur in conjecture clauses. *) -fun tptp_type_literals params pos (HOLClause {literals, ctypes_sorts, ...}) = - pool_map (tptp_literal params) literals - #>> rpair (map (tptp_of_typeLit pos) (add_typs ctypes_sorts)) +fun tptp_type_literals params pos (HOLClause {literals, ctypes_sorts, ...}) + pool = + let + val (lits, pool) = pool_map (tptp_literal params) literals pool + val (tylits, pool) = pool_map (tptp_of_type_literal pos) + (add_type_literals ctypes_sorts) pool + in ((lits, tylits), pool) end fun tptp_clause params (cls as HOLClause {axiom_name, clause_id, kind, ...}) pool = @@ -321,7 +327,7 @@ fun dfg_type_literals params pos (HOLClause {literals, ctypes_sorts, ...}) = pool_map (dfg_literal params) literals - #>> rpair (map (dfg_of_typeLit pos) (add_typs ctypes_sorts)) + #>> rpair (map (dfg_of_type_literal pos) (add_type_literals ctypes_sorts)) fun get_uvars (CombConst _) vars pool = (vars, pool) | get_uvars (CombVar (name, _)) vars pool = @@ -352,19 +358,19 @@ fun add_types tvars = fold add_fol_type_funcs tvars fun add_decls (full_types, explicit_apply, cma, cnh) - (CombConst ((c, _), _, tvars)) (funcs, preds) = - if c = "equal" then - (add_types tvars funcs, preds) - else - let val arity = min_arity_of cma c - val ntys = if not full_types then length tvars else 0 - val addit = Symtab.update(c, arity+ntys) - in - if needs_hBOOL explicit_apply cnh c then - (add_types tvars (addit funcs), preds) - else - (add_types tvars funcs, addit preds) - end + (CombConst ((c, _), ctp, tvars)) (funcs, preds) = + (if c = "equal" then + (add_types tvars funcs, preds) + else + let val arity = min_arity_of cma c + val ntys = if not full_types then length tvars else 0 + val addit = Symtab.update(c, arity + ntys) + in + if needs_hBOOL explicit_apply cnh c then + (add_types tvars (addit funcs), preds) + else + (add_types tvars funcs, addit preds) + end) |>> full_types ? add_fol_type_funcs ctp | add_decls _ (CombVar (_, ctp)) (funcs, preds) = (add_fol_type_funcs ctp funcs, preds) | add_decls params (CombApp (P, Q)) decls = @@ -485,8 +491,8 @@ fold count_constants_clause conjectures (Symtab.empty, Symtab.empty) |> fold count_constants_clause extra_clauses |> fold count_constants_clause helper_clauses - val _ = List.app (display_arity explicit_apply const_needs_hBOOL) - (Symtab.dest (const_min_arity)) + val _ = app (display_arity explicit_apply const_needs_hBOOL) + (Symtab.dest (const_min_arity)) in (const_min_arity, const_needs_hBOOL) end else (Symtab.empty, Symtab.empty); @@ -499,7 +505,9 @@ fun write_tptp_file readable_names full_types explicit_apply file clauses = let fun section _ [] = [] - | section name ss = "\n% " ^ name ^ plural_s (length ss) ^ "\n" :: ss + | section name ss = + "\n% " ^ name ^ plural_s (length ss) ^ " (" ^ Int.toString (length ss) ^ + ")\n" :: ss val pool = empty_name_pool readable_names val (conjectures, axclauses, _, helper_clauses, classrel_clauses, arity_clauses) = clauses @@ -514,17 +522,19 @@ val arity_clss = map tptp_arity_clause arity_clauses val (helper_clss, pool) = pool_map (apfst fst oo tptp_clause params) helper_clauses pool - in - File.write_list file - (header () :: - section "Relevant fact" ax_clss @ - section "Type variable" tfree_clss @ - section "Conjecture" conjecture_clss @ - section "Class relationship" classrel_clss @ - section "Arity declaration" arity_clss @ - section "Helper fact" helper_clss) - end - + val conjecture_offset = + length ax_clss + length classrel_clss + length arity_clss + + length helper_clss + val _ = + File.write_list file + (header () :: + section "Relevant fact" ax_clss @ + section "Class relationship" classrel_clss @ + section "Arity declaration" arity_clss @ + section "Helper fact" helper_clss @ + section "Conjecture" conjecture_clss @ + section "Type variable" tfree_clss) + in (pool, conjecture_offset) end (* DFG format *) @@ -540,30 +550,33 @@ val params = (full_types, explicit_apply, cma, cnh) val ((conjecture_clss, tfree_litss), pool) = pool_map (dfg_clause params) conjectures pool |>> ListPair.unzip - and probname = Path.implode (Path.base file) + and problem_name = Path.implode (Path.base file) val (axstrs, pool) = pool_map (apfst fst oo dfg_clause params) axclauses pool val tfree_clss = map dfg_tfree_clause (union_all tfree_litss) val (helper_clauses_strs, pool) = pool_map (apfst fst oo dfg_clause params) helper_clauses pool val (funcs, cl_preds) = decls_of_clauses params (helper_clauses @ conjectures @ axclauses) arity_clauses and ty_preds = preds_of_clauses axclauses classrel_clauses arity_clauses - in - File.write_list file - (header () :: - string_of_start probname :: - string_of_descrip probname :: - string_of_symbols (string_of_funcs funcs) - (string_of_preds (cl_preds @ ty_preds)) :: - "list_of_clauses(axioms, cnf).\n" :: - axstrs @ - map dfg_classrel_clause classrel_clauses @ - map dfg_arity_clause arity_clauses @ - helper_clauses_strs @ - ["end_of_list.\n\nlist_of_clauses(conjectures, cnf).\n"] @ - tfree_clss @ - conjecture_clss @ - ["end_of_list.\n\n", - "end_problem.\n"]) - end + val conjecture_offset = + length axclauses + length classrel_clauses + length arity_clauses + + length helper_clauses + val _ = + File.write_list file + (header () :: + string_of_start problem_name :: + string_of_descrip problem_name :: + string_of_symbols (string_of_funcs funcs) + (string_of_preds (cl_preds @ ty_preds)) :: + "list_of_clauses(axioms, cnf).\n" :: + axstrs @ + map dfg_classrel_clause classrel_clauses @ + map dfg_arity_clause arity_clauses @ + helper_clauses_strs @ + ["end_of_list.\n\nlist_of_clauses(conjectures, cnf).\n"] @ + conjecture_clss @ + tfree_clss @ + ["end_of_list.\n\n", + "end_problem.\n"]) + in (pool, conjecture_offset) end end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_isar.ML Tue May 04 20:30:22 2010 +0200 @@ -1,4 +1,4 @@ -(* Title: HOL/Sledgehammer/sledgehammer_isar.ML +(* Title: HOL/Tools/Sledgehammer/sledgehammer_isar.ML Author: Jasmin Blanchette, TU Muenchen Adds "sledgehammer" and related commands to Isabelle/Isar's outer syntax. @@ -8,19 +8,46 @@ sig type params = ATP_Manager.params + val atps: string Unsynchronized.ref + val timeout: int Unsynchronized.ref + val full_types: bool Unsynchronized.ref val default_params : theory -> (string * string) list -> params + val setup: theory -> theory end; structure Sledgehammer_Isar : SLEDGEHAMMER_ISAR = struct open Sledgehammer_Util +open Sledgehammer_Fact_Preprocessor open ATP_Manager -open ATP_Minimal -open ATP_Wrapper +open ATP_Systems +open Sledgehammer_Fact_Minimizer structure K = OuterKeyword and P = OuterParse +(** Proof method used in Isar proofs **) + +val neg_clausify_setup = + Method.setup @{binding neg_clausify} + (Scan.succeed (SIMPLE_METHOD' o neg_clausify_tac)) + "conversion of goal to negated conjecture clauses" + +(** Attribute for converting a theorem into clauses **) + +val parse_clausify_attribute : attribute context_parser = + Scan.lift OuterParse.nat + >> (fn i => Thm.rule_attribute (fn context => fn th => + let val thy = Context.theory_of context in + Meson.make_meta_clause (nth (cnf_axiom thy th) i) + end)) + +val clausify_setup = + Attrib.setup @{binding clausify} parse_clausify_attribute + "conversion of theorem to clauses" + +(** Sledgehammer commands **) + fun add_to_relevance_override ns : relevance_override = {add = ns, del = [], only = false} fun del_from_relevance_override ns : relevance_override = @@ -35,6 +62,29 @@ fun merge_relevance_overrides rs = fold merge_relevance_override_pairwise rs (only_relevance_override []) +(*** parameters ***) + +val atps = Unsynchronized.ref (default_atps_param_value ()) +val timeout = Unsynchronized.ref 60 +val full_types = Unsynchronized.ref false + +val _ = + ProofGeneralPgip.add_preference Preferences.category_proof + (Preferences.string_pref atps + "Sledgehammer: ATPs" + "Default automatic provers (separated by whitespace)") + +val _ = + ProofGeneralPgip.add_preference Preferences.category_proof + (Preferences.int_pref timeout + "Sledgehammer: Time Limit" + "ATPs will be interrupted after this time (in seconds)") + +val _ = + ProofGeneralPgip.add_preference Preferences.category_proof + (Preferences.bool_pref full_types + "Sledgehammer: Full Types" "ATPs will use full type information") + type raw_param = string * string list val default_default_params = @@ -46,11 +96,9 @@ ("relevance_threshold", "50"), ("convergence", "320"), ("theory_relevant", "smart"), - ("higher_order", "smart"), ("follow_defs", "false"), ("isar_proof", "false"), - ("modulus", "1"), - ("sorts", "false"), + ("shrink_factor", "1"), ("minimize_timeout", "5 s")] val alias_params = @@ -59,18 +107,16 @@ [("no_debug", "debug"), ("quiet", "verbose"), ("no_overlord", "overlord"), + ("partial_types", "full_types"), ("implicit_apply", "explicit_apply"), ("ignore_no_atp", "respect_no_atp"), - ("partial_types", "full_types"), ("theory_irrelevant", "theory_relevant"), - ("first_order", "higher_order"), ("dont_follow_defs", "follow_defs"), - ("metis_proof", "isar_proof"), - ("no_sorts", "sorts")] + ("metis_proof", "isar_proof")] val params_for_minimize = - ["full_types", "explicit_apply", "higher_order", "isar_proof", "modulus", - "sorts", "minimize_timeout"] + ["debug", "verbose", "overlord", "full_types", "explicit_apply", + "isar_proof", "shrink_factor", "minimize_timeout"] val property_dependent_params = ["atps", "full_types", "timeout"] @@ -150,11 +196,9 @@ 0.01 * Real.fromInt (lookup_int "relevance_threshold") val convergence = 0.01 * Real.fromInt (lookup_int "convergence") val theory_relevant = lookup_bool_option "theory_relevant" - val higher_order = lookup_bool_option "higher_order" val follow_defs = lookup_bool "follow_defs" val isar_proof = lookup_bool "isar_proof" - val modulus = Int.max (1, lookup_int "modulus") - val sorts = lookup_bool "sorts" + val shrink_factor = Int.max (1, lookup_int "shrink_factor") val timeout = lookup_time "timeout" val minimize_timeout = lookup_time "minimize_timeout" in @@ -162,50 +206,51 @@ full_types = full_types, explicit_apply = explicit_apply, respect_no_atp = respect_no_atp, relevance_threshold = relevance_threshold, convergence = convergence, theory_relevant = theory_relevant, - higher_order = higher_order, follow_defs = follow_defs, - isar_proof = isar_proof, modulus = modulus, sorts = sorts, - timeout = timeout, minimize_timeout = minimize_timeout} + follow_defs = follow_defs, isar_proof = isar_proof, + shrink_factor = shrink_factor, timeout = timeout, + minimize_timeout = minimize_timeout} end fun get_params thy = extract_params thy (default_raw_params thy) fun default_params thy = get_params thy o map (apsnd single) -fun minimize override_params old_style_args i fact_refs state = +val subgoal_count = Logic.count_prems o prop_of o #goal o Proof.goal + +(* Sledgehammer the given subgoal *) + +fun run {atps = [], ...} _ _ _ _ = error "No ATP is set." + | run (params as {atps, timeout, ...}) i relevance_override minimize_command + state = + case subgoal_count state of + 0 => priority "No subgoal!" + | n => + let + val birth_time = Time.now () + val death_time = Time.+ (birth_time, timeout) + val _ = kill_atps () (* race w.r.t. other Sledgehammer invocations *) + val _ = priority "Sledgehammering..." + val _ = app (start_prover_thread params birth_time death_time i n + relevance_override minimize_command + state) atps + in () end + +fun minimize override_params i fact_refs state = let val thy = Proof.theory_of state val ctxt = Proof.context_of state - fun theorems_from_refs ctxt = - map (fn fact_ref => - let - val ths = ProofContext.get_fact ctxt fact_ref - val name' = Facts.string_of_ref fact_ref - in (name', ths) end) - fun get_time_limit_arg s = - (case Int.fromString s of - SOME t => Time.fromSeconds t - | NONE => error ("Invalid time limit: " ^ quote s ^ ".")) - fun get_opt (name, a) (p, t) = - (case name of - "time" => (p, get_time_limit_arg a) - | "atp" => (a, t) - | n => error ("Invalid argument: " ^ n ^ ".")) - val {atps, minimize_timeout, ...} = get_params thy override_params - val (atp, timeout) = fold get_opt old_style_args (hd atps, minimize_timeout) - val params = - get_params thy - (override_params @ - [("atps", [atp]), - ("minimize_timeout", - [string_of_int (Time.toMilliseconds timeout) ^ " ms"])]) - val prover = - (case get_prover thy atp of - SOME prover => prover - | NONE => error ("Unknown ATP: " ^ quote atp ^ ".")) + val theorems_from_refs = + map o pairf Facts.string_of_ref o ProofContext.get_fact val name_thms_pairs = theorems_from_refs ctxt fact_refs in - priority (#2 (minimize_theorems params prover atp i state name_thms_pairs)) + case subgoal_count state of + 0 => priority "No subgoal!" + | n => priority (#2 (minimize_theorems (get_params thy override_params) i n + state name_thms_pairs)) end +val sledgehammerN = "sledgehammer" +val sledgehammer_paramsN = "sledgehammer_params" + val runN = "run" val minimizeN = "minimize" val messagesN = "messages" @@ -221,12 +266,10 @@ val is_raw_param_relevant_for_minimize = member (op =) params_for_minimize o fst o unalias_raw_param fun string_for_raw_param (key, values) = - key ^ (case space_implode " " values of - "" => "" - | value => " = " ^ value) + key ^ (case space_implode " " values of "" => "" | value => " = " ^ value) fun minimize_command override_params i atp_name facts = - "sledgehammer minimize [atp = " ^ atp_name ^ + sledgehammerN ^ " " ^ minimizeN ^ " [atp = " ^ atp_name ^ (override_params |> filter is_raw_param_relevant_for_minimize |> implode o map (prefix ", " o string_for_raw_param)) ^ "] (" ^ space_implode " " facts ^ ")" ^ @@ -235,15 +278,15 @@ fun hammer_away override_params subcommand opt_i relevance_override state = let val thy = Proof.theory_of state - val _ = List.app check_raw_param override_params + val _ = app check_raw_param override_params in if subcommand = runN then let val i = the_default 1 opt_i in - sledgehammer (get_params thy override_params) i relevance_override - (minimize_command override_params i) state + run (get_params thy override_params) i relevance_override + (minimize_command override_params i) state end else if subcommand = minimizeN then - minimize (map (apfst minimizize_raw_param_name) override_params) [] + minimize (map (apfst minimizize_raw_param_name) override_params) (the_default 1 opt_i) (#add relevance_override) state else if subcommand = messagesN then messages opt_i @@ -301,40 +344,17 @@ val parse_sledgehammer_params_command = parse_params #>> sledgehammer_params_trans -val parse_minimize_args = - Scan.optional (Args.bracks (P.list (P.short_ident --| P.$$$ "=" -- P.xname))) - [] val _ = - OuterSyntax.improper_command "atp_kill" "kill all managed provers" K.diag - (Scan.succeed (Toplevel.no_timing o Toplevel.imperative kill_atps)) -val _ = - OuterSyntax.improper_command "atp_info" - "print information about managed provers" K.diag - (Scan.succeed (Toplevel.no_timing o Toplevel.imperative running_atps)) -val _ = - OuterSyntax.improper_command "atp_messages" - "print recent messages issued by managed provers" K.diag - (Scan.option (P.$$$ "(" |-- P.nat --| P.$$$ ")") >> - (fn limit => Toplevel.no_timing - o Toplevel.imperative (fn () => messages limit))) + OuterSyntax.improper_command sledgehammerN + "search for first-order proof using automatic theorem provers" K.diag + parse_sledgehammer_command val _ = - OuterSyntax.improper_command "print_atps" "print external provers" K.diag - (Scan.succeed (Toplevel.no_timing o Toplevel.unknown_theory o - Toplevel.keep (available_atps o Toplevel.theory_of))) -val _ = - OuterSyntax.improper_command "atp_minimize" - "minimize theorem list with external prover" K.diag - (parse_minimize_args -- parse_fact_refs >> (fn (args, fact_refs) => - Toplevel.no_timing o Toplevel.unknown_proof o - Toplevel.keep (minimize [] args 1 fact_refs o Toplevel.proof_of))) + OuterSyntax.command sledgehammer_paramsN + "set and display the default parameters for Sledgehammer" K.thy_decl + parse_sledgehammer_params_command -val _ = - OuterSyntax.improper_command "sledgehammer" - "search for first-order proof using automatic theorem provers" K.diag - parse_sledgehammer_command -val _ = - OuterSyntax.command "sledgehammer_params" - "set and display the default parameters for Sledgehammer" K.thy_decl - parse_sledgehammer_params_command +val setup = + neg_clausify_setup + #> clausify_setup end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML Tue May 04 20:30:22 2010 +0200 @@ -1,5 +1,6 @@ (* Title: HOL/Tools/Sledgehammer/sledgehammer_proof_reconstruct.ML Author: Lawrence C Paulson and Claire Quigley, Cambridge University Computer Laboratory + Author: Jasmin Blanchette, TU Muenchen Transfer of proofs from external provers. *) @@ -7,6 +8,7 @@ signature SLEDGEHAMMER_PROOF_RECONSTRUCT = sig type minimize_command = string list -> string + type name_pool = Sledgehammer_FOL_Clause.name_pool val chained_hint: string val invert_const: string -> string @@ -14,17 +16,16 @@ val num_typargs: theory -> string -> int val make_tvar: string -> typ val strip_prefix: string -> string -> string option - val is_proof_well_formed: string -> bool val metis_line: int -> int -> string list -> string val metis_proof_text: minimize_command * string * string vector * thm * int -> string * string list val isar_proof_text: - bool -> int -> bool -> Proof.context + name_pool option * bool * int * Proof.context * int list list -> minimize_command * string * string vector * thm * int -> string * string list val proof_text: - bool -> bool -> int -> bool -> Proof.context + bool -> name_pool option * bool * int * Proof.context * int list list -> minimize_command * string * string vector * thm * int -> string * string list end; @@ -32,119 +33,191 @@ structure Sledgehammer_Proof_Reconstruct : SLEDGEHAMMER_PROOF_RECONSTRUCT = struct +open Sledgehammer_Util open Sledgehammer_FOL_Clause open Sledgehammer_Fact_Preprocessor type minimize_command = string list -> string -val trace_proof_path = Path.basic "atp_trace"; +fun is_ident_char c = Char.isAlphaNum c orelse c = #"_" +fun is_head_digit s = Char.isDigit (String.sub (s, 0)) -fun trace_proof_msg f = - if !trace then File.append (File.tmp_path trace_proof_path) (f ()) else (); +(* Hack: Could return false positives (e.g., a user happens to declare a + constant called "SomeTheory.sko_means_shoe_in_$wedish". *) +val is_skolem_const_name = + Long_Name.base_name + #> String.isPrefix skolem_prefix andf String.isSubstring skolem_infix + +val index_in_shape : int -> int list list -> int = + find_index o exists o curry (op =) +fun is_axiom_clause_number thm_names num = num <= Vector.length thm_names +fun is_conjecture_clause_number conjecture_shape num = + index_in_shape num conjecture_shape >= 0 -fun string_of_thm ctxt = PrintMode.setmp [] (Display.string_of_thm ctxt); +fun ugly_name NONE s = s + | ugly_name (SOME the_pool) s = + case Symtab.lookup (snd the_pool) s of + SOME s' => s' + | NONE => s -fun is_ident_char c = Char.isAlphaNum c orelse c = #"_" +fun smart_lambda v t = + Abs (case v of + Const (s, _) => + List.last (space_explode skolem_infix (Long_Name.base_name s)) + | Var ((s, _), _) => s + | Free (s, _) => s + | _ => "", fastype_of v, abstract_over (v, t)) +fun forall_of v t = HOLogic.all_const (fastype_of v) $ smart_lambda v t -fun is_axiom thm_names line_no = line_no <= Vector.length thm_names +datatype ('a, 'b, 'c, 'd, 'e) raw_step = + Definition of 'a * 'b * 'c | + Inference of 'a * 'd * 'e list (**** PARSING OF TSTP FORMAT ****) -(* Syntax trees, either term list or formulae *) -datatype stree = Int of int | Br of string * stree list; +fun strip_spaces_in_list [] = "" + | strip_spaces_in_list [c1] = if Char.isSpace c1 then "" else str c1 + | strip_spaces_in_list [c1, c2] = + strip_spaces_in_list [c1] ^ strip_spaces_in_list [c2] + | strip_spaces_in_list (c1 :: c2 :: c3 :: cs) = + if Char.isSpace c1 then + strip_spaces_in_list (c2 :: c3 :: cs) + else if Char.isSpace c2 then + if Char.isSpace c3 then + strip_spaces_in_list (c1 :: c3 :: cs) + else + str c1 ^ (if forall is_ident_char [c1, c3] then " " else "") ^ + strip_spaces_in_list (c3 :: cs) + else + str c1 ^ strip_spaces_in_list (c2 :: c3 :: cs) +val strip_spaces = strip_spaces_in_list o String.explode -fun atom x = Br(x,[]); +(* Syntax trees, either term list or formulae *) +datatype node = IntLeaf of int | StrNode of string * node list -fun scons (x,y) = Br("cons", [x,y]); -val listof = List.foldl scons (atom "nil"); +fun str_leaf s = StrNode (s, []) + +fun scons (x, y) = StrNode ("cons", [x, y]) +val slist_of = List.foldl scons (str_leaf "nil") (*Strings enclosed in single quotes, e.g. filenames*) -val quoted = $$ "'" |-- Scan.repeat (~$$ "'") --| $$ "'" >> implode; - -(*Intended for $true and $false*) -fun tf s = "c_" ^ str (Char.toUpper (String.sub(s,0))) ^ String.extract(s,1,NONE); -val truefalse = $$ "$" |-- Symbol.scan_id >> (atom o tf); +val parse_quoted = $$ "'" |-- Scan.repeat (~$$ "'") --| $$ "'" >> implode; (*Integer constants, typically proof line numbers*) -fun is_digit s = Char.isDigit (String.sub(s,0)); -val integer = Scan.many1 is_digit >> (the o Int.fromString o implode); +val parse_integer = Scan.many1 is_head_digit >> (the o Int.fromString o implode) -(* needed for SPASS's nonstandard output format *) -fun fix_symbol "equal" = "c_equal" - | fix_symbol s = s +val parse_dollar_name = + Scan.repeat ($$ "$") -- Symbol.scan_id >> (fn (ss, s) => implode ss ^ s) -(*Generalized FO terms, which include filenames, numbers, etc.*) -fun term x = (quoted >> atom || integer >> Int || truefalse - || (Symbol.scan_id >> fix_symbol) - -- Scan.optional ($$ "(" |-- terms --| $$ ")") [] >> Br - || $$ "(" |-- term --| $$ ")" - || $$ "[" |-- Scan.optional terms [] --| $$ "]" >> listof) x -and terms x = (term ::: Scan.repeat ($$ "," |-- term)) x +(* needed for SPASS's output format *) +fun repair_name _ "$true" = "c_True" + | repair_name _ "$false" = "c_False" + | repair_name _ "$$e" = "c_equal" (* seen in Vampire 11 proofs *) + | repair_name _ "equal" = "c_equal" (* probably not needed *) + | repair_name pool s = ugly_name pool s +(* Generalized first-order terms, which include file names, numbers, etc. *) +(* The "x" argument is not strictly necessary, but without it Poly/ML loops + forever at compile time. *) +fun parse_term pool x = + (parse_quoted >> str_leaf + || parse_integer >> IntLeaf + || (parse_dollar_name >> repair_name pool) + -- Scan.optional ($$ "(" |-- parse_terms pool --| $$ ")") [] >> StrNode + || $$ "(" |-- parse_term pool --| $$ ")" + || $$ "[" |-- Scan.optional (parse_terms pool) [] --| $$ "]" >> slist_of) x +and parse_terms pool x = + (parse_term pool ::: Scan.repeat ($$ "," |-- parse_term pool)) x -fun negate t = Br ("c_Not", [t]) -fun equate t1 t2 = Br ("c_equal", [t1, t2]); - -(*Apply equal or not-equal to a term*) -fun syn_equal (t, NONE) = t - | syn_equal (t1, SOME (NONE, t2)) = equate t1 t2 - | syn_equal (t1, SOME (SOME _, t2)) = negate (equate t1 t2) +fun negate_node u = StrNode ("c_Not", [u]) +fun equate_nodes u1 u2 = StrNode ("c_equal", [u1, u2]) -(*Literals can involve negation, = and !=.*) -fun literal x = - ($$ "~" |-- literal >> negate - || (term -- Scan.option (Scan.option ($$ "!") --| $$ "=" -- term) - >> syn_equal)) x - -val literals = literal ::: Scan.repeat ($$ "|" |-- literal); - -(*Clause: a list of literals separated by the disjunction sign*) -val clause = $$ "(" |-- literals --| $$ ")" || Scan.single literal; +(* Apply equal or not-equal to a term. *) +fun repair_predicate_term (u, NONE) = u + | repair_predicate_term (u1, SOME (NONE, u2)) = equate_nodes u1 u2 + | repair_predicate_term (u1, SOME (SOME _, u2)) = + negate_node (equate_nodes u1 u2) +fun parse_predicate_term pool = + parse_term pool -- Scan.option (Scan.option ($$ "!") --| $$ "=" + -- parse_term pool) + >> repair_predicate_term +fun parse_literal pool x = + ($$ "~" |-- parse_literal pool >> negate_node || parse_predicate_term pool) x +fun parse_literals pool = + parse_literal pool ::: Scan.repeat ($$ "|" |-- parse_literal pool) +fun parse_parenthesized_literals pool = + $$ "(" |-- parse_literals pool --| $$ ")" || parse_literals pool +fun parse_clause pool = + parse_parenthesized_literals pool + ::: Scan.repeat ($$ "|" |-- parse_parenthesized_literals pool) + >> List.concat -fun ints_of_stree (Int n) = cons n - | ints_of_stree (Br (_, ts)) = fold ints_of_stree ts +fun ints_of_node (IntLeaf n) = cons n + | ints_of_node (StrNode (_, us)) = fold ints_of_node us +val parse_tstp_annotations = + Scan.optional ($$ "," |-- parse_term NONE + --| Scan.option ($$ "," |-- parse_terms NONE) + >> (fn source => ints_of_node source [])) [] -val tstp_annotations = - Scan.optional ($$ "," |-- term --| Scan.option ($$ "," |-- terms) - >> (fn source => ints_of_stree source [])) [] +fun parse_definition pool = + $$ "(" |-- parse_literal NONE --| Scan.this_string "<=>" + -- parse_clause pool --| $$ ")" -fun retuple_tstp_line ((name, ts), deps) = (name, ts, deps) - -(* ::= cnf(, , ). - The could be an identifier, but we assume integers. *) -val parse_tstp_line = - (Scan.this_string "cnf" -- $$ "(") |-- integer --| $$ "," --| Symbol.scan_id - --| $$ "," -- clause -- tstp_annotations --| $$ ")" --| $$ "." - >> retuple_tstp_line +(* Syntax: cnf(, , ). + The could be an identifier, but we assume integers. *) +fun finish_tstp_definition_line (num, (u, us)) = Definition (num, u, us) +fun finish_tstp_inference_line ((num, us), deps) = Inference (num, us, deps) +fun parse_tstp_line pool = + ((Scan.this_string "fof" -- $$ "(") |-- parse_integer --| $$ "," + --| Scan.this_string "definition" --| $$ "," -- parse_definition pool + --| parse_tstp_annotations --| $$ ")" --| $$ "." + >> finish_tstp_definition_line) + || ((Scan.this_string "cnf" -- $$ "(") |-- parse_integer --| $$ "," + --| Symbol.scan_id --| $$ "," -- parse_clause pool + -- parse_tstp_annotations --| $$ ")" --| $$ "." + >> finish_tstp_inference_line) (**** PARSING OF SPASS OUTPUT ****) -val dot_name = integer --| $$ "." --| integer +(* SPASS returns clause references of the form "x.y". We ignore "y", whose role + is not clear anyway. *) +val parse_dot_name = parse_integer --| $$ "." --| parse_integer -val spass_annotations = - Scan.optional ($$ ":" |-- Scan.repeat (dot_name --| Scan.option ($$ ","))) [] +val parse_spass_annotations = + Scan.optional ($$ ":" |-- Scan.repeat (parse_dot_name + --| Scan.option ($$ ","))) [] -val starred_literal = literal --| Scan.repeat ($$ "*" || $$ " ") +(* It is not clear why some literals are followed by sequences of stars and/or + pluses. We ignore them. *) +fun parse_decorated_predicate_term pool = + parse_predicate_term pool --| Scan.repeat ($$ "*" || $$ "+" || $$ " ") -val horn_clause = - Scan.repeat starred_literal --| $$ "-" --| $$ ">" - -- Scan.repeat starred_literal - >> (fn ([], []) => [atom (tf "false")] - | (clauses1, clauses2) => map negate clauses1 @ clauses2) - -fun retuple_spass_proof_line ((name, deps), ts) = (name, ts, deps) +fun parse_horn_clause pool = + Scan.repeat (parse_decorated_predicate_term pool) --| $$ "|" --| $$ "|" + -- Scan.repeat (parse_decorated_predicate_term pool) --| $$ "-" --| $$ ">" + -- Scan.repeat (parse_decorated_predicate_term pool) + >> (fn (([], []), []) => [str_leaf "c_False"] + | ((clauses1, clauses2), clauses3) => + map negate_node (clauses1 @ clauses2) @ clauses3) -(* Syntax: [0:] || -> **. *) -val parse_spass_proof_line = - integer --| $$ "[" --| $$ "0" --| $$ ":" --| Symbol.scan_id - -- spass_annotations --| $$ "]" --| $$ "|" --| $$ "|" -- horn_clause - --| $$ "." - >> retuple_spass_proof_line +(* Syntax: [0:] + || -> . *) +fun finish_spass_line ((num, deps), us) = Inference (num, us, deps) +fun parse_spass_line pool = + parse_integer --| $$ "[" --| $$ "0" --| $$ ":" --| Symbol.scan_id + -- parse_spass_annotations --| $$ "]" -- parse_horn_clause pool --| $$ "." + >> finish_spass_line -val parse_proof_line = fst o (parse_tstp_line || parse_spass_proof_line) +fun parse_line pool = parse_tstp_line pool || parse_spass_line pool +fun parse_lines pool = Scan.repeat1 (parse_line pool) +fun parse_proof pool = + fst o Scan.finite Symbol.stopper + (Scan.error (!! (fn _ => raise Fail "unrecognized ATP output") + (parse_lines pool))) + o explode o strip_spaces (**** INTERPRETATION OF TSTP SYNTAX TREES ****) -exception STREE of stree; +exception NODE of node (*If string s has the prefix s1, return the result of deleting it.*) fun strip_prefix s1 s = @@ -167,34 +240,28 @@ (*Type variables are given the basic sort, HOL.type. Some will later be constrained by information from type literals, or by type inference.*) -fun type_of_stree t = - case t of - Int _ => raise STREE t - | Br (a,ts) => - let val Ts = map type_of_stree ts - in - case strip_prefix tconst_prefix a of - SOME b => Type(invert_type_const b, Ts) - | NONE => - if not (null ts) then raise STREE t (*only tconsts have type arguments*) - else - case strip_prefix tfree_prefix a of - SOME b => TFree("'" ^ b, HOLogic.typeS) - | NONE => - case strip_prefix tvar_prefix a of - SOME b => make_tvar b - | NONE => make_tparam a (* Variable from the ATP, say "X1" *) - end; +fun type_of_node (u as IntLeaf _) = raise NODE u + | type_of_node (u as StrNode (a, us)) = + let val Ts = map type_of_node us in + case strip_prefix tconst_prefix a of + SOME b => Type (invert_type_const b, Ts) + | NONE => + if not (null us) then + raise NODE u (*only tconsts have type arguments*) + else case strip_prefix tfree_prefix a of + SOME b => TFree ("'" ^ b, HOLogic.typeS) + | NONE => + case strip_prefix tvar_prefix a of + SOME b => make_tvar b + | NONE => make_tparam a (* Variable from the ATP, say "X1" *) + end (*Invert the table of translations between Isabelle and ATPs*) val const_trans_table_inv = - Symtab.update ("fequal", "op =") - (Symtab.make (map swap (Symtab.dest const_trans_table))); + Symtab.update ("fequal", @{const_name "op ="}) + (Symtab.make (map swap (Symtab.dest const_trans_table))) -fun invert_const c = - case Symtab.lookup const_trans_table_inv c of - SOME c' => c' - | NONE => c; +fun invert_const c = c |> Symtab.lookup const_trans_table_inv |> the_default c (*The number of type arguments of a constant, zero if it's monomorphic*) fun num_typargs thy s = length (Sign.const_typargs thy (s, Sign.the_const_type thy s)); @@ -202,378 +269,317 @@ (*Generates a constant, given its type arguments*) fun const_of thy (a,Ts) = Const(a, Sign.const_instance thy (a,Ts)); +fun fix_atp_variable_name s = + let + fun subscript_name s n = s ^ nat_subscript n + val s = String.map Char.toLower s + in + case space_explode "_" s of + [_] => (case take_suffix Char.isDigit (String.explode s) of + (cs1 as _ :: _, cs2 as _ :: _) => + subscript_name (String.implode cs1) + (the (Int.fromString (String.implode cs2))) + | (_, _) => s) + | [s1, s2] => (case Int.fromString s2 of + SOME n => subscript_name s1 n + | NONE => s) + | _ => s + end + (*First-order translation. No types are known for variables. HOLogic.typeT should allow them to be inferred.*) -fun term_of_stree args thy t = - case t of - Int _ => raise STREE t - | Br ("hBOOL",[t]) => term_of_stree [] thy t (*ignore hBOOL*) - | Br ("hAPP",[t,u]) => term_of_stree (u::args) thy t - | Br (a,ts) => - case strip_prefix const_prefix a of - SOME "equal" => - list_comb(Const (@{const_name "op ="}, HOLogic.typeT), List.map (term_of_stree [] thy) ts) - | SOME b => - let val c = invert_const b - val nterms = length ts - num_typargs thy c - val us = List.map (term_of_stree [] thy) (List.take(ts,nterms) @ args) - (*Extra args from hAPP come AFTER any arguments given directly to the - constant.*) - val Ts = List.map type_of_stree (List.drop(ts,nterms)) - in list_comb(const_of thy (c, Ts), us) end - | NONE => (*a variable, not a constant*) - let val T = HOLogic.typeT - val opr = (*a Free variable is typically a Skolem function*) - case strip_prefix fixed_var_prefix a of - SOME b => Free(b,T) - | NONE => - case strip_prefix schematic_var_prefix a of - SOME b => make_var (b,T) - | NONE => make_var (a,T) (* Variable from the ATP, say "X1" *) - in list_comb (opr, List.map (term_of_stree [] thy) (ts@args)) end; +fun term_of_node args thy u = + case u of + IntLeaf _ => raise NODE u + | StrNode ("hBOOL", [u]) => term_of_node [] thy u (* ignore hBOOL *) + | StrNode ("hAPP", [u1, u2]) => term_of_node (u2 :: args) thy u1 + | StrNode (a, us) => + case strip_prefix const_prefix a of + SOME "equal" => + list_comb (Const (@{const_name "op ="}, HOLogic.typeT), + map (term_of_node [] thy) us) + | SOME b => + let + val c = invert_const b + val nterms = length us - num_typargs thy c + val ts = map (term_of_node [] thy) (take nterms us @ args) + (*Extra args from hAPP come AFTER any arguments given directly to the + constant.*) + val Ts = map type_of_node (drop nterms us) + in list_comb(const_of thy (c, Ts), ts) end + | NONE => (*a variable, not a constant*) + let + val opr = + (* a Free variable is typically a Skolem function *) + case strip_prefix fixed_var_prefix a of + SOME b => Free (b, HOLogic.typeT) + | NONE => + case strip_prefix schematic_var_prefix a of + SOME b => make_var (b, HOLogic.typeT) + | NONE => + (* Variable from the ATP, say "X1" *) + make_var (fix_atp_variable_name a, HOLogic.typeT) + in list_comb (opr, map (term_of_node [] thy) (us @ args)) end -(*Type class literal applied to a type. Returns triple of polarity, class, type.*) -fun constraint_of_stree pol (Br("c_Not",[t])) = constraint_of_stree (not pol) t - | constraint_of_stree pol t = case t of - Int _ => raise STREE t - | Br (a,ts) => - (case (strip_prefix class_prefix a, map type_of_stree ts) of - (SOME b, [T]) => (pol, b, T) - | _ => raise STREE t); +(* Type class literal applied to a type. Returns triple of polarity, class, + type. *) +fun constraint_of_node pos (StrNode ("c_Not", [u])) = + constraint_of_node (not pos) u + | constraint_of_node pos u = case u of + IntLeaf _ => raise NODE u + | StrNode (a, us) => + (case (strip_prefix class_prefix a, map type_of_node us) of + (SOME b, [T]) => (pos, b, T) + | _ => raise NODE u) (** Accumulate type constraints in a clause: negative type literals **) -fun addix (key,z) = Vartab.map_default (key,[]) (cons z); +fun add_var (key, z) = Vartab.map_default (key, []) (cons z) -fun add_constraint ((false, cl, TFree(a,_)), vt) = addix ((a,~1),cl) vt - | add_constraint ((false, cl, TVar(ix,_)), vt) = addix (ix,cl) vt +fun add_constraint ((false, cl, TFree(a,_)), vt) = add_var ((a,~1),cl) vt + | add_constraint ((false, cl, TVar(ix,_)), vt) = add_var (ix,cl) vt | add_constraint (_, vt) = vt; -(*False literals (which E includes in its proofs) are deleted*) -val nofalses = filter (not o equal HOLogic.false_const); +fun is_positive_literal (@{const Not} $ _) = false + | is_positive_literal t = true + +fun negate_term thy (Const (@{const_name All}, T) $ Abs (s, T', t')) = + Const (@{const_name Ex}, T) $ Abs (s, T', negate_term thy t') + | negate_term thy (Const (@{const_name Ex}, T) $ Abs (s, T', t')) = + Const (@{const_name All}, T) $ Abs (s, T', negate_term thy t') + | negate_term thy (@{const "op -->"} $ t1 $ t2) = + @{const "op &"} $ t1 $ negate_term thy t2 + | negate_term thy (@{const "op &"} $ t1 $ t2) = + @{const "op |"} $ negate_term thy t1 $ negate_term thy t2 + | negate_term thy (@{const "op |"} $ t1 $ t2) = + @{const "op &"} $ negate_term thy t1 $ negate_term thy t2 + | negate_term _ (@{const Not} $ t) = t + | negate_term _ t = @{const Not} $ t -(*Final treatment of the list of "real" literals from a clause.*) -fun finish [] = HOLogic.true_const (*No "real" literals means only type information*) - | finish lits = - case nofalses lits of - [] => HOLogic.false_const (*The empty clause, since we started with real literals*) - | xs => foldr1 HOLogic.mk_disj (rev xs); +fun clause_for_literals _ [] = HOLogic.false_const + | clause_for_literals _ [lit] = lit + | clause_for_literals thy lits = + case List.partition is_positive_literal lits of + (pos_lits as _ :: _, neg_lits as _ :: _) => + @{const "op -->"} + $ foldr1 HOLogic.mk_conj (map (negate_term thy) neg_lits) + $ foldr1 HOLogic.mk_disj pos_lits + | _ => foldr1 HOLogic.mk_disj lits + +(* Final treatment of the list of "real" literals from a clause. + No "real" literals means only type information. *) +fun finish_clause _ [] = HOLogic.true_const + | finish_clause thy lits = + lits |> filter_out (curry (op =) HOLogic.false_const) |> rev + |> clause_for_literals thy (*Accumulate sort constraints in vt, with "real" literals in lits.*) -fun lits_of_strees _ (vt, lits) [] = (vt, finish lits) - | lits_of_strees ctxt (vt, lits) (t::ts) = - lits_of_strees ctxt (add_constraint (constraint_of_stree true t, vt), lits) ts - handle STREE _ => - lits_of_strees ctxt (vt, term_of_stree [] (ProofContext.theory_of ctxt) t :: lits) ts; +fun lits_of_nodes thy (vt, lits) [] = (vt, finish_clause thy lits) + | lits_of_nodes thy (vt, lits) (u :: us) = + lits_of_nodes thy (add_constraint (constraint_of_node true u, vt), lits) us + handle NODE _ => lits_of_nodes thy (vt, term_of_node [] thy u :: lits) us (*Update TVars/TFrees with detected sort constraints.*) -fun fix_sorts vt = - let fun tysubst (Type (a, Ts)) = Type (a, map tysubst Ts) - | tysubst (TVar (xi, s)) = TVar (xi, the_default s (Vartab.lookup vt xi)) - | tysubst (TFree (x, s)) = TFree (x, the_default s (Vartab.lookup vt (x, ~1))) - fun tmsubst (Const (a, T)) = Const (a, tysubst T) - | tmsubst (Free (a, T)) = Free (a, tysubst T) - | tmsubst (Var (xi, T)) = Var (xi, tysubst T) - | tmsubst (t as Bound _) = t - | tmsubst (Abs (a, T, t)) = Abs (a, tysubst T, tmsubst t) - | tmsubst (t $ u) = tmsubst t $ tmsubst u; - in not (Vartab.is_empty vt) ? tmsubst end; +fun repair_sorts vt = + let + fun do_type (Type (a, Ts)) = Type (a, map do_type Ts) + | do_type (TVar (xi, s)) = TVar (xi, the_default s (Vartab.lookup vt xi)) + | do_type (TFree (x, s)) = + TFree (x, the_default s (Vartab.lookup vt (x, ~1))) + fun do_term (Const (a, T)) = Const (a, do_type T) + | do_term (Free (a, T)) = Free (a, do_type T) + | do_term (Var (xi, T)) = Var (xi, do_type T) + | do_term (t as Bound _) = t + | do_term (Abs (a, T, t)) = Abs (a, do_type T, do_term t) + | do_term (t1 $ t2) = do_term t1 $ do_term t2 + in not (Vartab.is_empty vt) ? do_term end -(*Interpret a list of syntax trees as a clause, given by "real" literals and sort constraints. - vt0 holds the initial sort constraints, from the conjecture clauses.*) -fun clause_of_strees ctxt vt0 ts = - let val (vt, dt) = lits_of_strees ctxt (vt0,[]) ts in - singleton (Syntax.check_terms ctxt) (TypeInfer.constrain HOLogic.boolT (fix_sorts vt dt)) - end +fun unskolemize_term t = + fold forall_of (Term.add_consts t [] + |> filter (is_skolem_const_name o fst) |> map Const) t + +val combinator_table = + [(@{const_name COMBI}, @{thm COMBI_def_raw}), + (@{const_name COMBK}, @{thm COMBK_def_raw}), + (@{const_name COMBB}, @{thm COMBB_def_raw}), + (@{const_name COMBC}, @{thm COMBC_def_raw}), + (@{const_name COMBS}, @{thm COMBS_def_raw})] -fun gen_all_vars t = fold_rev Logic.all (OldTerm.term_vars t) t; - -fun decode_proof_step vt0 (name, ts, deps) ctxt = - let val cl = clause_of_strees ctxt vt0 ts in - ((name, cl, deps), fold Variable.declare_term (OldTerm.term_frees cl) ctxt) - end - -(** Global sort constraints on TFrees (from tfree_tcs) are positive unit clauses. **) +fun uncombine_term (t1 $ t2) = betapply (pairself uncombine_term (t1, t2)) + | uncombine_term (Abs (s, T, t')) = Abs (s, T, uncombine_term t') + | uncombine_term (t as Const (x as (s, _))) = + (case AList.lookup (op =) combinator_table s of + SOME thm => thm |> prop_of |> specialize_type @{theory} x |> Logic.dest_equals |> snd + | NONE => t) + | uncombine_term t = t -fun add_tfree_constraint ((true, cl, TFree(a,_)), vt) = addix ((a,~1),cl) vt - | add_tfree_constraint (_, vt) = vt; +(* Interpret a list of syntax trees as a clause, given by "real" literals and + sort constraints. "vt" holds the initial sort constraints, from the + conjecture clauses. *) +fun clause_of_nodes ctxt vt us = + let val (vt, t) = lits_of_nodes (ProofContext.theory_of ctxt) (vt, []) us in + t |> repair_sorts vt + end +fun check_formula ctxt = + TypeInfer.constrain HOLogic.boolT + #> Syntax.check_term (ProofContext.set_mode ProofContext.mode_schematic ctxt) +(** Global sort constraints on TFrees (from tfree_tcs) are positive unit + clauses. **) + +fun add_tfree_constraint (true, cl, TFree (a, _)) = add_var ((a, ~1), cl) + | add_tfree_constraint _ = I fun tfree_constraints_of_clauses vt [] = vt - | tfree_constraints_of_clauses vt ([lit]::tss) = - (tfree_constraints_of_clauses (add_tfree_constraint (constraint_of_stree true lit, vt)) tss - handle STREE _ => (*not a positive type constraint: ignore*) - tfree_constraints_of_clauses vt tss) - | tfree_constraints_of_clauses vt (_::tss) = tfree_constraints_of_clauses vt tss; + | tfree_constraints_of_clauses vt ([lit] :: uss) = + (tfree_constraints_of_clauses (add_tfree_constraint + (constraint_of_node true lit) vt) uss + handle NODE _ => (* Not a positive type constraint? Ignore the literal. *) + tfree_constraints_of_clauses vt uss) + | tfree_constraints_of_clauses vt (_ :: uss) = + tfree_constraints_of_clauses vt uss (**** Translation of TSTP files to Isar Proofs ****) -fun decode_proof_steps ctxt tuples = - let val vt0 = tfree_constraints_of_clauses Vartab.empty (map #2 tuples) in - #1 (fold_map (decode_proof_step vt0) tuples ctxt) - end - -(** Finding a matching assumption. The literals may be permuted, and variable names - may disagree. We must try all combinations of literals (quadratic!) and - match the variable names consistently. **) - -fun strip_alls_aux n (Const(@{const_name all}, _)$Abs(a,T,t)) = - strip_alls_aux (n+1) (subst_bound (Var ((a,n), T), t)) - | strip_alls_aux _ t = t; - -val strip_alls = strip_alls_aux 0; - -exception MATCH_LITERAL of unit +fun unvarify_term (Var ((s, 0), T)) = Free (s, T) + | unvarify_term t = raise TERM ("unvarify_term: non-Var", [t]) -(* Remark 1: Ignore types. They are not to be trusted. - Remark 2: Ignore order of arguments for equality. SPASS sometimes swaps - them for no apparent reason. *) -fun match_literal (Const (@{const_name "op ="}, _) $ t1 $ u1) - (Const (@{const_name "op ="}, _) $ t2 $ u2) env = - (env |> match_literal t1 t2 |> match_literal u1 u2 - handle MATCH_LITERAL () => - env |> match_literal t1 u2 |> match_literal u1 t2) - | match_literal (t1 $ u1) (t2 $ u2) env = - env |> match_literal t1 t2 |> match_literal u1 u2 - | match_literal (Abs (_,_,t1)) (Abs (_,_,t2)) env = - match_literal t1 t2 env - | match_literal (Bound i1) (Bound i2) env = - if i1=i2 then env else raise MATCH_LITERAL () - | match_literal (Const(a1,_)) (Const(a2,_)) env = - if a1=a2 then env else raise MATCH_LITERAL () - | match_literal (Free(a1,_)) (Free(a2,_)) env = - if a1=a2 then env else raise MATCH_LITERAL () - | match_literal (Var(ix1,_)) (Var(ix2,_)) env = insert (op =) (ix1,ix2) env - | match_literal _ _ _ = raise MATCH_LITERAL () - -(* Checking that all variable associations are unique. The list "env" contains - no repetitions, but does it contain say (x, y) and (y, y)? *) -fun good env = - let val (xs,ys) = ListPair.unzip env - in not (has_duplicates (op=) xs orelse has_duplicates (op=) ys) end; +fun clauses_in_lines (Definition (_, u, us)) = u :: us + | clauses_in_lines (Inference (_, us, _)) = us -(*Match one list of literals against another, ignoring types and the order of - literals. Sorting is unreliable because we don't have types or variable names.*) -fun matches_aux _ [] [] = true - | matches_aux env (lit::lits) ts = - let fun match1 us [] = false - | match1 us (t::ts) = - let val env' = match_literal lit t env - in (good env' andalso matches_aux env' lits (us@ts)) orelse - match1 (t::us) ts - end - handle MATCH_LITERAL () => match1 (t::us) ts - in match1 [] ts end; - -(*Is this length test useful?*) -fun matches (lits1,lits2) = - length lits1 = length lits2 andalso - matches_aux [] (map Envir.eta_contract lits1) (map Envir.eta_contract lits2); - -fun permuted_clause t = - let val lits = HOLogic.disjuncts t - fun perm [] = NONE - | perm (ctm::ctms) = - if matches (lits, HOLogic.disjuncts (HOLogic.dest_Trueprop (strip_alls ctm))) - then SOME ctm else perm ctms - in perm end; - -(*ctms is a list of conjecture clauses as yielded by Isabelle. Those returned by the - ATP may have their literals reordered.*) -fun isar_proof_body ctxt sorts ctms = +fun decode_line vt (Definition (num, u, us)) ctxt = + let + val t1 = clause_of_nodes ctxt vt [u] + val vars = snd (strip_comb t1) + val frees = map unvarify_term vars + val unvarify_args = subst_atomic (vars ~~ frees) + val t2 = clause_of_nodes ctxt vt us + val (t1, t2) = + HOLogic.eq_const HOLogic.typeT $ t1 $ t2 + |> unvarify_args |> uncombine_term |> check_formula ctxt + |> HOLogic.dest_eq + in + (Definition (num, t1, t2), + fold Variable.declare_term (maps OldTerm.term_frees [t1, t2]) ctxt) + end + | decode_line vt (Inference (num, us, deps)) ctxt = + let + val t = us |> clause_of_nodes ctxt vt + |> unskolemize_term |> uncombine_term |> check_formula ctxt + in + (Inference (num, t, deps), + fold Variable.declare_term (OldTerm.term_frees t) ctxt) + end +fun decode_lines ctxt lines = let - val _ = trace_proof_msg (K "\n\nisar_proof_body: start\n") - val string_of_term = - PrintMode.setmp (filter (curry (op =) Symbol.xsymbolsN) - (print_mode_value ())) - (Syntax.string_of_term ctxt) - fun have_or_show "show" _ = " show \"" - | have_or_show have lname = " " ^ have ^ " " ^ lname ^ ": \"" - fun do_line _ (lname, t, []) = - (* No depedencies: it's a conjecture clause, with no proof. *) - (case permuted_clause t ctms of - SOME u => " assume " ^ lname ^ ": \"" ^ string_of_term u ^ "\"\n" - | NONE => raise TERM ("Sledgehammer_Proof_Reconstruct.isar_proof_body", - [t])) - | do_line have (lname, t, deps) = - have_or_show have lname ^ - string_of_term (gen_all_vars (HOLogic.mk_Trueprop t)) ^ - "\"\n by (metis " ^ space_implode " " deps ^ ")\n" - fun do_lines [(lname, t, deps)] = [do_line "show" (lname, t, deps)] - | do_lines ((lname, t, deps) :: lines) = - do_line "have" (lname, t, deps) :: do_lines lines - in setmp_CRITICAL show_sorts sorts do_lines end; + val vt = tfree_constraints_of_clauses Vartab.empty + (map clauses_in_lines lines) + in #1 (fold_map (decode_line vt) lines ctxt) end + +fun aint_inference _ (Definition _) = true + | aint_inference t (Inference (_, t', _)) = not (t aconv t') -fun unequal t (_, t', _) = not (t aconv t'); - -(*No "real" literals means only type information*) -fun eq_types t = t aconv HOLogic.true_const; +(* No "real" literals means only type information (tfree_tcs, clsrel, or + clsarity). *) +val is_only_type_information = curry (op aconv) HOLogic.true_const -fun replace_dep (old:int, new) dep = if dep=old then new else [dep]; - -fun replace_deps (old:int, new) (lno, t, deps) = - (lno, t, List.foldl (uncurry (union (op =))) [] (map (replace_dep (old, new)) deps)); +fun replace_one_dep (old, new) dep = if dep = old then new else [dep] +fun replace_deps_in_line _ (line as Definition _) = line + | replace_deps_in_line p (Inference (num, t, deps)) = + Inference (num, t, fold (union (op =) o replace_one_dep p) deps []) (*Discard axioms; consolidate adjacent lines that prove the same clause, since they differ only in type information.*) -fun add_proof_line thm_names (lno, t, []) lines = - (* No dependencies: axiom or conjecture clause *) - if is_axiom thm_names lno then - (* Axioms are not proof lines *) - if eq_types t then - (* Must be clsrel/clsarity: type information, so delete refs to it *) - map (replace_deps (lno, [])) lines - else - (case take_prefix (unequal t) lines of - (_,[]) => lines (*no repetition of proof line*) - | (pre, (lno', _, _) :: post) => (*repetition: replace later line by earlier one*) - pre @ map (replace_deps (lno', [lno])) post) - else - (lno, t, []) :: lines - | add_proof_line _ (lno, t, deps) lines = - if eq_types t then (lno, t, deps) :: lines - (*Type information will be deleted later; skip repetition test.*) - else (*FIXME: Doesn't this code risk conflating proofs involving different types??*) - case take_prefix (unequal t) lines of - (_,[]) => (lno, t, deps) :: lines (*no repetition of proof line*) - | (pre, (lno', t', _) :: post) => - (lno, t', deps) :: (*repetition: replace later line by earlier one*) - (pre @ map (replace_deps (lno', [lno])) post); - -(*Recursively delete empty lines (type information) from the proof.*) -fun add_nonnull_prfline ((lno, t, []), lines) = (*no dependencies, so a conjecture clause*) - if eq_types t (*must be type information, tfree_tcs, clsrel, clsarity: delete refs to it*) - then delete_dep lno lines - else (lno, t, []) :: lines - | add_nonnull_prfline ((lno, t, deps), lines) = (lno, t, deps) :: lines -and delete_dep lno lines = List.foldr add_nonnull_prfline [] (map (replace_deps (lno, [])) lines); - -fun bad_free (Free (a,_)) = String.isPrefix skolem_prefix a - | bad_free _ = false; +fun add_line _ _ (line as Definition _) lines = line :: lines + | add_line conjecture_shape thm_names (Inference (num, t, [])) lines = + (* No dependencies: axiom, conjecture clause, or internal axioms or + definitions (Vampire). *) + if is_axiom_clause_number thm_names num then + (* Axioms are not proof lines. *) + if is_only_type_information t then + map (replace_deps_in_line (num, [])) lines + (* Is there a repetition? If so, replace later line by earlier one. *) + else case take_prefix (aint_inference t) lines of + (_, []) => lines (*no repetition of proof line*) + | (pre, Inference (num', _, _) :: post) => + pre @ map (replace_deps_in_line (num', [num])) post + else if is_conjecture_clause_number conjecture_shape num then + Inference (num, t, []) :: lines + else + map (replace_deps_in_line (num, [])) lines + | add_line _ _ (Inference (num, t, deps)) lines = + (* Type information will be deleted later; skip repetition test. *) + if is_only_type_information t then + Inference (num, t, deps) :: lines + (* Is there a repetition? If so, replace later line by earlier one. *) + else case take_prefix (aint_inference t) lines of + (* FIXME: Doesn't this code risk conflating proofs involving different + types?? *) + (_, []) => Inference (num, t, deps) :: lines + | (pre, Inference (num', t', _) :: post) => + Inference (num, t', deps) :: + pre @ map (replace_deps_in_line (num', [num])) post -(*TVars are forbidden in goals. Also, we don't want lines with <2 dependencies. - To further compress proofs, setting modulus:=n deletes every nth line, and nlines - counts the number of proof lines processed so far. - Deleted lines are replaced by their own dependencies. Note that the "add_nonnull_prfline" - phase may delete some dependencies, hence this phase comes later.*) -fun add_wanted_prfline ctxt _ ((lno, t, []), (nlines, lines)) = - (nlines, (lno, t, []) :: lines) (*conjecture clauses must be kept*) - | add_wanted_prfline ctxt modulus ((lno, t, deps), (nlines, lines)) = - if eq_types t orelse not (null (Term.add_tvars t [])) orelse - exists_subterm bad_free t orelse - (not (null lines) andalso (*final line can't be deleted for these reasons*) - (length deps < 2 orelse nlines mod modulus <> 0)) - then (nlines+1, map (replace_deps (lno, deps)) lines) (*Delete line*) - else (nlines+1, (lno, t, deps) :: lines); +(* Recursively delete empty lines (type information) from the proof. *) +fun add_nontrivial_line (Inference (num, t, [])) lines = + if is_only_type_information t then delete_dep num lines + else Inference (num, t, []) :: lines + | add_nontrivial_line line lines = line :: lines +and delete_dep num lines = + fold_rev add_nontrivial_line (map (replace_deps_in_line (num, [])) lines) [] + +(* ATPs sometimes reuse free variable names in the strangest ways. Surprisingly, + removing the offending lines often does the trick. *) +fun is_bad_free frees (Free x) = not (member (op =) frees x) + | is_bad_free _ _ = false + +(* Vampire is keen on producing these. *) +fun is_trivial_formula (@{const Not} $ (Const (@{const_name "op ="}, _) + $ t1 $ t2)) = (t1 aconv t2) + | is_trivial_formula t = false -(*Replace numeric proof lines by strings, either from thm_names or sequential line numbers*) -fun stringify_deps thm_names deps_map [] = [] - | stringify_deps thm_names deps_map ((lno, t, deps) :: lines) = - if is_axiom thm_names lno then - (Vector.sub(thm_names,lno-1), t, []) :: stringify_deps thm_names deps_map lines - else let val lname = Int.toString (length deps_map) - fun fix lno = if is_axiom thm_names lno - then SOME(Vector.sub(thm_names,lno-1)) - else AList.lookup (op =) deps_map lno; - in (lname, t, map_filter fix (distinct (op=) deps)) :: - stringify_deps thm_names ((lno,lname)::deps_map) lines - end; +fun add_desired_line _ _ _ _ _ (line as Definition _) (j, lines) = + (j, line :: lines) + | add_desired_line ctxt shrink_factor conjecture_shape thm_names frees + (Inference (num, t, deps)) (j, lines) = + (j + 1, + if is_axiom_clause_number thm_names num orelse + is_conjecture_clause_number conjecture_shape num orelse + (not (is_only_type_information t) andalso + null (Term.add_tvars t []) andalso + not (exists_subterm (is_bad_free frees) t) andalso + not (is_trivial_formula t) andalso + (null lines orelse (* last line must be kept *) + (length deps >= 2 andalso j mod shrink_factor = 0))) then + Inference (num, t, deps) :: lines (* keep line *) + else + map (replace_deps_in_line (num, deps)) lines) (* drop line *) -fun isar_proof_start i = - (if i = 1 then "" else "prefer " ^ string_of_int i ^ "\n") ^ - "proof (neg_clausify)\n"; -fun isar_fixes [] = "" - | isar_fixes ts = " fix " ^ space_implode " " ts ^ "\n"; -fun isar_proof_end 1 = "qed" - | isar_proof_end _ = "next" +(** EXTRACTING LEMMAS **) -fun isar_proof_from_atp_proof cnfs modulus sorts ctxt goal i thm_names = +(* A list consisting of the first number in each line is returned. + TSTP: Interesting lines have the form "cnf(108, axiom, ...)", where the + number (108) is extracted. + SPASS: Lines have the form "108[0:Inp] ...", where the first number (108) is + extracted. *) +fun extract_clause_numbers_in_atp_proof atp_proof = let - val _ = trace_proof_msg (K "\nisar_proof_from_atp_proof: start\n") - val tuples = map (parse_proof_line o explode) cnfs - val _ = trace_proof_msg (fn () => - Int.toString (length tuples) ^ " tuples extracted\n") - val ctxt = ProofContext.set_mode ProofContext.mode_schematic ctxt - val raw_lines = - fold_rev (add_proof_line thm_names) (decode_proof_steps ctxt tuples) [] - val _ = trace_proof_msg (fn () => - Int.toString (length raw_lines) ^ " raw_lines extracted\n") - val nonnull_lines = List.foldr add_nonnull_prfline [] raw_lines - val _ = trace_proof_msg (fn () => - Int.toString (length nonnull_lines) ^ " nonnull_lines extracted\n") - val (_, lines) = List.foldr (add_wanted_prfline ctxt modulus) (0,[]) nonnull_lines - val _ = trace_proof_msg (fn () => - Int.toString (length lines) ^ " lines extracted\n") - val (ccls, fixes) = neg_conjecture_clauses ctxt goal i - val _ = trace_proof_msg (fn () => - Int.toString (length ccls) ^ " conjecture clauses\n") - val ccls = map forall_intr_vars ccls - val _ = app (fn th => trace_proof_msg - (fn () => "\nccl: " ^ string_of_thm ctxt th)) ccls - val body = isar_proof_body ctxt sorts (map prop_of ccls) - (stringify_deps thm_names [] lines) - val n = Logic.count_prems (prop_of goal) - val _ = trace_proof_msg (K "\nisar_proof_from_atp_proof: finishing\n") - in - isar_proof_start i ^ isar_fixes (map #1 fixes) ^ implode body ^ - isar_proof_end n ^ "\n" - end - handle STREE _ => raise Fail "Cannot parse ATP output"; - - -(*=== EXTRACTING PROOF-TEXT === *) - -val begin_proof_strs = ["# SZS output start CNFRefutation.", - "=========== Refutation ==========", - "Here is a proof"]; - -val end_proof_strs = ["# SZS output end CNFRefutation", - "======= End of refutation =======", - "Formulae used in the proof"]; - -fun get_proof_extract proof = - (* Splits by the first possible of a list of splitters. *) - case pairself (find_first (fn s => String.isSubstring s proof)) - (begin_proof_strs, end_proof_strs) of - (SOME begin_string, SOME end_string) => - proof |> first_field begin_string |> the |> snd - |> first_field end_string |> the |> fst - | _ => raise Fail "Cannot extract proof" - -(* ==== CHECK IF PROOF WAS SUCCESSFUL === *) - -fun is_proof_well_formed proof = - forall (exists (fn s => String.isSubstring s proof)) - [begin_proof_strs, end_proof_strs] - -(* === EXTRACTING LEMMAS === *) -(* A list consisting of the first number in each line is returned. - TPTP: Interesting lines have the form "cnf(108, axiom, ...)", where the - number (108) is extracted. - DFG: Lines have the form "108[0:Inp] ...", where the first number (108) is - extracted. *) -fun get_step_nums proof_extract = - let - val toks = String.tokens (not o is_ident_char) - fun inputno ("cnf" :: ntok :: "axiom" :: _) = Int.fromString ntok - | inputno ("cnf" :: ntok :: "negated_conjecture" :: _) = - Int.fromString ntok - | inputno (ntok::"0"::"Inp"::_) = Int.fromString ntok (* DFG format *) - | inputno _ = NONE - val lines = split_lines proof_extract - in map_filter (inputno o toks) lines end + val tokens_of = String.tokens (not o is_ident_char) + fun extract_num ("cnf" :: num :: "axiom" :: _) = Int.fromString num + | extract_num (num :: "0" :: "Inp" :: _) = Int.fromString num + | extract_num _ = NONE + in atp_proof |> split_lines |> map_filter (extract_num o tokens_of) end -(*Used to label theorems chained into the sledgehammer call*) -val chained_hint = "CHAINED"; -val kill_chained = filter_out (curry (op =) chained_hint) +(* Used to label theorems chained into the Sledgehammer call (or rather + goal?) *) +val chained_hint = "sledgehammer_chained" fun apply_command _ 1 = "by " | apply_command 1 _ = "apply " | apply_command i _ = "prefer " ^ string_of_int i ^ " apply " -fun metis_command i n [] = - apply_command i n ^ "metis" - | metis_command i n xs = - apply_command i n ^ "(metis " ^ space_implode " " xs ^ ")" +fun metis_command i n [] = apply_command i n ^ "metis" + | metis_command i n ss = + apply_command i n ^ "(metis " ^ space_implode " " ss ^ ")" fun metis_line i n xs = "Try this command: " ^ Markup.markup Markup.sendback (metis_command i n xs) ^ ".\n" @@ -585,68 +591,396 @@ "To minimize the number of lemmas, try this command: " ^ Markup.markup Markup.sendback command ^ ".\n" -fun metis_proof_text (minimize_command, proof, thm_names, goal, i) = +fun metis_proof_text (minimize_command, atp_proof, thm_names, goal, i) = let val lemmas = - proof |> get_proof_extract - |> get_step_nums - |> filter (is_axiom thm_names) - |> map (fn i => Vector.sub (thm_names, i - 1)) - |> filter (fn x => x <> "??.unknown") - |> sort_distinct string_ord + atp_proof |> extract_clause_numbers_in_atp_proof + |> filter (is_axiom_clause_number thm_names) + |> map (fn i => Vector.sub (thm_names, i - 1)) + |> filter_out (fn s => s = "??.unknown" orelse s = chained_hint) + |> sort_distinct string_ord val n = Logic.count_prems (prop_of goal) - val xs = kill_chained lemmas + in (metis_line i n lemmas ^ minimize_line minimize_command lemmas, lemmas) end + +(** Isar proof construction and manipulation **) + +fun merge_fact_sets (ls1, ss1) (ls2, ss2) = + (union (op =) ls1 ls2, union (op =) ss1 ss2) + +type label = string * int +type facts = label list * string list + +datatype qualifier = Show | Then | Moreover | Ultimately + +datatype step = + Fix of (string * typ) list | + Let of term * term | + Assume of label * term | + Have of qualifier list * label * term * byline +and byline = + ByMetis of facts | + CaseSplit of step list list * facts + +fun smart_case_split [] facts = ByMetis facts + | smart_case_split proofs facts = CaseSplit (proofs, facts) + +val raw_prefix = "X" +val assum_prefix = "A" +val fact_prefix = "F" + +fun string_for_label (s, num) = s ^ string_of_int num + +fun add_fact_from_dep thm_names num = + if is_axiom_clause_number thm_names num then + apsnd (insert (op =) (Vector.sub (thm_names, num - 1))) + else + apfst (insert (op =) (raw_prefix, num)) + +fun forall_vars t = fold_rev forall_of (map Var (Term.add_vars t [])) t + +fun step_for_line _ _ (Definition (num, t1, t2)) = Let (t1, t2) + | step_for_line _ _ (Inference (num, t, [])) = Assume ((raw_prefix, num), t) + | step_for_line thm_names j (Inference (num, t, deps)) = + Have (if j = 1 then [Show] else [], (raw_prefix, num), + forall_vars t, + ByMetis (fold (add_fact_from_dep thm_names) deps ([], []))) + +fun proof_from_atp_proof pool ctxt shrink_factor atp_proof conjecture_shape + thm_names frees = + let + val lines = + atp_proof ^ "$" (* the $ sign acts as a sentinel *) + |> parse_proof pool + |> decode_lines ctxt + |> rpair [] |-> fold_rev (add_line conjecture_shape thm_names) + |> rpair [] |-> fold_rev add_nontrivial_line + |> rpair (0, []) |-> fold_rev (add_desired_line ctxt shrink_factor + conjecture_shape thm_names frees) + |> snd in - (metis_line i n xs ^ minimize_line minimize_command xs, kill_chained lemmas) + (if null frees then [] else [Fix frees]) @ + map2 (step_for_line thm_names) (length lines downto 1) lines end -val is_proof_line = String.isPrefix "cnf(" orf String.isSubstring "||" +val indent_size = 2 +val no_label = ("", ~1) + +fun no_show qs = not (member (op =) qs Show) + +(* When redirecting proofs, we keep information about the labels seen so far in + the "backpatches" data structure. The first component indicates which facts + should be associated with forthcoming proof steps. The second component is a + pair ("keep_ls", "drop_ls"), where "keep_ls" are the labels to keep and + "drop_ls" are those that should be dropped in a case split. *) +type backpatches = (label * facts) list * (label list * label list) + +fun used_labels_of_step (Have (_, _, _, by)) = + (case by of + ByMetis (ls, _) => ls + | CaseSplit (proofs, (ls, _)) => + fold (union (op =) o used_labels_of) proofs ls) + | used_labels_of_step _ = [] +and used_labels_of proof = fold (union (op =) o used_labels_of_step) proof [] + +fun new_labels_of_step (Fix _) = [] + | new_labels_of_step (Let _) = [] + | new_labels_of_step (Assume (l, _)) = [l] + | new_labels_of_step (Have (_, l, _, _)) = [l] +val new_labels_of = maps new_labels_of_step -fun do_space c = if Char.isSpace c then "" else str c +val join_proofs = + let + fun aux _ [] = NONE + | aux proof_tail (proofs as (proof1 :: _)) = + if exists null proofs then + NONE + else if forall (curry (op =) (hd proof1) o hd) (tl proofs) then + aux (hd proof1 :: proof_tail) (map tl proofs) + else case hd proof1 of + Have ([], l, t, by) => + if forall (fn Have ([], l', t', _) :: _ => (l, t) = (l', t') + | _ => false) (tl proofs) andalso + not (exists (member (op =) (maps new_labels_of proofs)) + (used_labels_of proof_tail)) then + SOME (l, t, map rev proofs, proof_tail) + else + NONE + | _ => NONE + in aux [] o map rev end + +fun case_split_qualifiers proofs = + case length proofs of + 0 => [] + | 1 => [Then] + | _ => [Ultimately] -fun strip_spaces_in_list [] = "" - | strip_spaces_in_list [c1] = do_space c1 - | strip_spaces_in_list [c1, c2] = do_space c1 ^ do_space c2 - | strip_spaces_in_list (c1 :: c2 :: c3 :: cs) = - if Char.isSpace c1 then - strip_spaces_in_list (c2 :: c3 :: cs) - else if Char.isSpace c2 then - if Char.isSpace c3 then - strip_spaces_in_list (c1 :: c3 :: cs) - else - str c1 ^ - (if is_ident_char c1 andalso is_ident_char c3 then " " else "") ^ - strip_spaces_in_list (c3 :: cs) - else - str c1 ^ strip_spaces_in_list (c2 :: c3 :: cs) +fun redirect_proof thy conjecture_shape hyp_ts concl_t proof = + let + val concl_ls = map (pair raw_prefix) (List.last conjecture_shape) + fun find_hyp num = nth hyp_ts (index_in_shape num conjecture_shape) + fun first_pass ([], contra) = ([], contra) + | first_pass ((step as Fix _) :: proof, contra) = + first_pass (proof, contra) |>> cons step + | first_pass ((step as Let _) :: proof, contra) = + first_pass (proof, contra) |>> cons step + | first_pass ((step as Assume (l as (_, num), t)) :: proof, contra) = + if member (op =) concl_ls l then + first_pass (proof, contra ||> cons step) + else + first_pass (proof, contra) |>> cons (Assume (l, find_hyp num)) + | first_pass ((step as Have (qs, l, t, ByMetis (ls, ss))) :: proof, + contra) = + if exists (member (op =) (fst contra)) ls then + first_pass (proof, contra |>> cons l ||> cons step) + else + first_pass (proof, contra) |>> cons step + | first_pass _ = raise Fail "malformed proof" + val (proof_top, (contra_ls, contra_proof)) = + first_pass (proof, (concl_ls, [])) + val backpatch_label = the_default ([], []) oo AList.lookup (op =) o fst + fun backpatch_labels patches ls = + fold merge_fact_sets (map (backpatch_label patches) ls) ([], []) + fun second_pass end_qs ([], assums, patches) = + ([Have (end_qs, no_label, + if length assums < length concl_ls then + clause_for_literals thy (map (negate_term thy o fst) assums) + else + concl_t, + ByMetis (backpatch_labels patches (map snd assums)))], patches) + | second_pass end_qs (Assume (l, t) :: proof, assums, patches) = + second_pass end_qs (proof, (t, l) :: assums, patches) + | second_pass end_qs (Have (qs, l, t, ByMetis (ls, ss)) :: proof, assums, + patches) = + if member (op =) (snd (snd patches)) l andalso + not (AList.defined (op =) (fst patches) l) then + second_pass end_qs (proof, assums, patches ||> apsnd (append ls)) + else + (case List.partition (member (op =) contra_ls) ls of + ([contra_l], co_ls) => + if no_show qs then + second_pass end_qs + (proof, assums, + patches |>> cons (contra_l, (l :: co_ls, ss))) + |>> cons (if member (op =) (fst (snd patches)) l then + Assume (l, negate_term thy t) + else + Have (qs, l, negate_term thy t, + ByMetis (backpatch_label patches l))) + else + second_pass end_qs (proof, assums, + patches |>> cons (contra_l, (co_ls, ss))) + | (contra_ls as _ :: _, co_ls) => + let + val proofs = + map_filter + (fn l => + if member (op =) concl_ls l then + NONE + else + let + val drop_ls = filter (curry (op <>) l) contra_ls + in + second_pass [] + (proof, assums, + patches ||> apfst (insert (op =) l) + ||> apsnd (union (op =) drop_ls)) + |> fst |> SOME + end) contra_ls + val facts = (co_ls, []) + in + (case join_proofs proofs of + SOME (l, t, proofs, proof_tail) => + Have (case_split_qualifiers proofs @ + (if null proof_tail then end_qs else []), l, t, + smart_case_split proofs facts) :: proof_tail + | NONE => + [Have (case_split_qualifiers proofs @ end_qs, no_label, + concl_t, smart_case_split proofs facts)], + patches) + end + | _ => raise Fail "malformed proof") + | second_pass _ _ = raise Fail "malformed proof" + val proof_bottom = + second_pass [Show] (contra_proof, [], ([], ([], []))) |> fst + in proof_top @ proof_bottom end -val strip_spaces = strip_spaces_in_list o String.explode +val kill_duplicate_assumptions_in_proof = + let + fun relabel_facts subst = + apfst (map (fn l => AList.lookup (op =) subst l |> the_default l)) + fun do_step (step as Assume (l, t)) (proof, subst, assums) = + (case AList.lookup (op aconv) assums t of + SOME l' => (proof, (l', l) :: subst, assums) + | NONE => (step :: proof, subst, (t, l) :: assums)) + | do_step (Have (qs, l, t, by)) (proof, subst, assums) = + (Have (qs, l, t, + case by of + ByMetis facts => ByMetis (relabel_facts subst facts) + | CaseSplit (proofs, facts) => + CaseSplit (map do_proof proofs, relabel_facts subst facts)) :: + proof, subst, assums) + | do_step step (proof, subst, assums) = (step :: proof, subst, assums) + and do_proof proof = fold do_step proof ([], [], []) |> #1 |> rev + in do_proof end + +val then_chain_proof = + let + fun aux _ [] = [] + | aux _ ((step as Assume (l, _)) :: proof) = step :: aux l proof + | aux l' (Have (qs, l, t, by) :: proof) = + (case by of + ByMetis (ls, ss) => + Have (if member (op =) ls l' then + (Then :: qs, l, t, + ByMetis (filter_out (curry (op =) l') ls, ss)) + else + (qs, l, t, ByMetis (ls, ss))) + | CaseSplit (proofs, facts) => + Have (qs, l, t, CaseSplit (map (aux no_label) proofs, facts))) :: + aux l proof + | aux _ (step :: proof) = step :: aux no_label proof + in aux no_label end + +fun kill_useless_labels_in_proof proof = + let + val used_ls = used_labels_of proof + fun do_label l = if member (op =) used_ls l then l else no_label + fun do_step (Assume (l, t)) = Assume (do_label l, t) + | do_step (Have (qs, l, t, by)) = + Have (qs, do_label l, t, + case by of + CaseSplit (proofs, facts) => + CaseSplit (map (map do_step) proofs, facts) + | _ => by) + | do_step step = step + in map do_step proof end + +fun prefix_for_depth n = replicate_string (n + 1) -fun isar_proof_text debug modulus sorts ctxt - (minimize_command, proof, thm_names, goal, i) = +val relabel_proof = + let + fun aux _ _ _ [] = [] + | aux subst depth (next_assum, next_fact) (Assume (l, t) :: proof) = + if l = no_label then + Assume (l, t) :: aux subst depth (next_assum, next_fact) proof + else + let val l' = (prefix_for_depth depth assum_prefix, next_assum) in + Assume (l', t) :: + aux ((l, l') :: subst) depth (next_assum + 1, next_fact) proof + end + | aux subst depth (next_assum, next_fact) (Have (qs, l, t, by) :: proof) = + let + val (l', subst, next_fact) = + if l = no_label then + (l, subst, next_fact) + else + let + val l' = (prefix_for_depth depth fact_prefix, next_fact) + in (l', (l, l') :: subst, next_fact + 1) end + val relabel_facts = + apfst (map (fn l => + case AList.lookup (op =) subst l of + SOME l' => l' + | NONE => raise Fail ("unknown label " ^ + quote (string_for_label l)))) + val by = + case by of + ByMetis facts => ByMetis (relabel_facts facts) + | CaseSplit (proofs, facts) => + CaseSplit (map (aux subst (depth + 1) (1, 1)) proofs, + relabel_facts facts) + in + Have (qs, l', t, by) :: + aux subst depth (next_assum, next_fact) proof + end + | aux subst depth nextp (step :: proof) = + step :: aux subst depth nextp proof + in aux [] 0 (1, 1) end + +fun string_for_proof ctxt i n = let - val cnfs = proof |> get_proof_extract |> split_lines |> map strip_spaces - |> filter is_proof_line + fun fix_print_mode f = + PrintMode.setmp (filter (curry (op =) Symbol.xsymbolsN) + (print_mode_value ())) f + fun do_indent ind = replicate_string (ind * indent_size) " " + fun do_free (s, T) = + maybe_quote s ^ " :: " ^ + maybe_quote (fix_print_mode (Syntax.string_of_typ ctxt) T) + fun do_label l = if l = no_label then "" else string_for_label l ^ ": " + fun do_have qs = + (if member (op =) qs Moreover then "moreover " else "") ^ + (if member (op =) qs Ultimately then "ultimately " else "") ^ + (if member (op =) qs Then then + if member (op =) qs Show then "thus" else "hence" + else + if member (op =) qs Show then "show" else "have") + val do_term = maybe_quote o fix_print_mode (Syntax.string_of_term ctxt) + fun do_facts (ls, ss) = + let + val ls = ls |> sort_distinct (prod_ord string_ord int_ord) + val ss = ss |> sort_distinct string_ord + in metis_command 1 1 (map string_for_label ls @ ss) end + and do_step ind (Fix xs) = + do_indent ind ^ "fix " ^ space_implode " and " (map do_free xs) ^ "\n" + | do_step ind (Let (t1, t2)) = + do_indent ind ^ "let " ^ do_term t1 ^ " = " ^ do_term t2 ^ "\n" + | do_step ind (Assume (l, t)) = + do_indent ind ^ "assume " ^ do_label l ^ do_term t ^ "\n" + | do_step ind (Have (qs, l, t, ByMetis facts)) = + do_indent ind ^ do_have qs ^ " " ^ + do_label l ^ do_term t ^ " " ^ do_facts facts ^ "\n" + | do_step ind (Have (qs, l, t, CaseSplit (proofs, facts))) = + space_implode (do_indent ind ^ "moreover\n") + (map (do_block ind) proofs) ^ + do_indent ind ^ do_have qs ^ " " ^ do_label l ^ do_term t ^ " " ^ + do_facts facts ^ "\n" + and do_steps prefix suffix ind steps = + let val s = implode (map (do_step ind) steps) in + replicate_string (ind * indent_size - size prefix) " " ^ prefix ^ + String.extract (s, ind * indent_size, + SOME (size s - ind * indent_size - 1)) ^ + suffix ^ "\n" + end + and do_block ind proof = do_steps "{ " " }" (ind + 1) proof + (* One-step proofs are pointless; better use the Metis one-liner + directly. *) + and do_proof [Have (_, _, _, ByMetis _)] = "" + | do_proof proof = + (if i <> 1 then "prefer " ^ string_of_int i ^ "\n" else "") ^ + do_indent 0 ^ "proof -\n" ^ + do_steps "" "" 1 proof ^ + do_indent 0 ^ (if n <> 1 then "next" else "qed") ^ "\n" + in do_proof end + +fun isar_proof_text (pool, debug, shrink_factor, ctxt, conjecture_shape) + (minimize_command, atp_proof, thm_names, goal, i) = + let + val thy = ProofContext.theory_of ctxt + val (frees, hyp_ts, concl_t) = strip_subgoal goal i + val n = Logic.count_prems (prop_of goal) val (one_line_proof, lemma_names) = - metis_proof_text (minimize_command, proof, thm_names, goal, i) - val tokens = String.tokens (fn c => c = #" ") one_line_proof + metis_proof_text (minimize_command, atp_proof, thm_names, goal, i) fun isar_proof_for () = - case isar_proof_from_atp_proof cnfs modulus sorts ctxt goal i thm_names of + case proof_from_atp_proof pool ctxt shrink_factor atp_proof + conjecture_shape thm_names frees + |> redirect_proof thy conjecture_shape hyp_ts concl_t + |> kill_duplicate_assumptions_in_proof + |> then_chain_proof + |> kill_useless_labels_in_proof + |> relabel_proof + |> string_for_proof ctxt i n of "" => "" - | isar_proof => - "\nStructured proof:\n" ^ Markup.markup Markup.sendback isar_proof + | proof => "\nStructured proof:\n" ^ Markup.markup Markup.sendback proof val isar_proof = - if member (op =) tokens chained_hint then - "" - else if debug then + if debug then isar_proof_for () else try isar_proof_for () |> the_default "Warning: The Isar proof construction failed.\n" in (one_line_proof ^ isar_proof, lemma_names) end -fun proof_text isar_proof debug modulus sorts ctxt = - if isar_proof then isar_proof_text debug modulus sorts ctxt - else metis_proof_text +fun proof_text isar_proof isar_params other_params = + (if isar_proof then isar_proof_text isar_params else metis_proof_text) + other_params end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/Sledgehammer/sledgehammer_util.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_util.ML Tue May 04 20:30:22 2010 +0200 @@ -6,6 +6,8 @@ signature SLEDGEHAMMER_UTIL = sig + val is_new_spass_version : bool + val pairf : ('a -> 'b) -> ('a -> 'c) -> 'a -> 'b * 'c val plural_s : int -> string val serial_commas : string -> string list -> string list val replace_all : string -> string -> string -> string @@ -13,14 +15,30 @@ val timestamp : unit -> string val parse_bool_option : bool -> string -> string -> bool option val parse_time_option : string -> string -> Time.time option - val hashw : word * word -> word - val hashw_char : Char.char * word -> word - val hashw_string : string * word -> word + val nat_subscript : int -> string + val unyxml : string -> string + val maybe_quote : string -> string + val monomorphic_term : Type.tyenv -> term -> term + val specialize_type : theory -> (string * typ) -> term -> term end; structure Sledgehammer_Util : SLEDGEHAMMER_UTIL = struct +val is_new_spass_version = + case getenv "SPASS_VERSION" of + "" => (case getenv "SPASS_HOME" of + "" => false + | s => + (* Hack: Preliminary versions of the SPASS 3.7 package don't set + "SPASS_VERSION". *) + String.isSubstring "/spass-3.7/" s) + | s => (case s |> space_explode "." |> map Int.fromString of + SOME m :: SOME n :: _ => m > 3 orelse (m = 3 andalso n >= 5) + | _ => false) + +fun pairf f g x = (f x, g x) + fun plural_s n = if n = 1 then "" else "s" fun serial_commas _ [] = ["??"] @@ -38,7 +56,6 @@ else aux (String.sub (s, 0) :: seen) (String.extract (s, 1, NONE)) in aux [] end - fun remove_all bef = replace_all bef "" val timestamp = Date.fmt "%Y-%m-%d %H:%M:%S" o Date.fromTimeLocal o Time.now @@ -73,11 +90,45 @@ SOME (Time.fromMilliseconds msecs) end -(* This hash function is recommended in Compilers: Principles, Techniques, and - Tools, by Aho, Sethi and Ullman. The hashpjw function, which they - particularly recommend, triggers a bug in versions of Poly/ML up to 4.2.0. *) -fun hashw (u, w) = Word.+ (u, Word.* (0w65599, w)) -fun hashw_char (c, w) = hashw (Word.fromInt (Char.ord c), w) -fun hashw_string (s:string, w) = CharVector.foldl hashw_char w s +val subscript = implode o map (prefix "\<^isub>") o explode +val nat_subscript = subscript o string_of_int + +fun plain_string_from_xml_tree t = + Buffer.empty |> XML.add_content t |> Buffer.content +val unyxml = plain_string_from_xml_tree o YXML.parse + +val is_long_identifier = forall Syntax.is_identifier o space_explode "." +fun maybe_quote y = + let val s = unyxml y in + y |> ((not (is_long_identifier (perhaps (try (unprefix "'")) s)) andalso + not (is_long_identifier (perhaps (try (unprefix "?")) s))) orelse + OuterKeyword.is_keyword s) ? quote + end + +fun monomorphic_term subst t = + map_types (map_type_tvar (fn v => + case Type.lookup subst v of + SOME typ => typ + | NONE => raise TERM ("monomorphic_term: uninstanitated schematic type \ + \variable", [t]))) t + +fun specialize_type thy (s, T) t = + let + fun subst_for (Const (s', T')) = + if s = s' then + SOME (Sign.typ_match thy (T', T) Vartab.empty) + handle Type.TYPE_MATCH => NONE + else + NONE + | subst_for (t1 $ t2) = + (case subst_for t1 of SOME x => SOME x | NONE => subst_for t2) + | subst_for (Abs (_, _, t')) = subst_for t' + | subst_for _ = NONE + in + case subst_for t of + SOME subst => monomorphic_term subst t + | NONE => raise Type.TYPE_MATCH + end + end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/TFL/post.ML --- a/src/HOL/Tools/TFL/post.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/TFL/post.ML Tue May 04 20:30:22 2010 +0200 @@ -128,9 +128,9 @@ Split_Rule.split_rule_var (Term.head_of (HOLogic.dest_Trueprop (concl_of rl))) rl; (*lcp: put a theorem into Isabelle form, using meta-level connectives*) -val meta_outer = +fun meta_outer ctxt = curry_rule o Drule.export_without_context o - rule_by_tactic (REPEAT (FIRSTGOAL (resolve_tac [allI, impI, conjI] ORELSE' etac conjE))); + rule_by_tactic ctxt (REPEAT (FIRSTGOAL (resolve_tac [allI, impI, conjI] ORELSE' etac conjE))); (*Strip off the outer !P*) val spec'= read_instantiate @{context} [(("x", 0), "P::?'b=>bool")] spec; @@ -139,7 +139,9 @@ | tracing false msg = writeln msg; fun simplify_defn strict thy cs ss congs wfs id pats def0 = - let val def = Thm.freezeT def0 RS meta_eq_to_obj_eq + let + val ctxt = ProofContext.init_global thy + val def = Thm.unvarify_global def0 RS meta_eq_to_obj_eq val {rules,rows,TCs,full_pats_TCs} = Prim.post_definition congs (thy, (def,pats)) val {lhs=f,rhs} = S.dest_eq (concl def) @@ -153,7 +155,7 @@ TCs = TCs} val rules' = map (Drule.export_without_context o Object_Logic.rulify_no_asm) (R.CONJUNCTS rules) - in {induct = meta_outer (Object_Logic.rulify_no_asm (induction RS spec')), + in {induct = meta_outer ctxt (Object_Logic.rulify_no_asm (induction RS spec')), rules = ListPair.zip(rules', rows), tcs = (termination_goals rules') @ tcs} end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/TFL/rules.ML --- a/src/HOL/Tools/TFL/rules.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/TFL/rules.ML Tue May 04 20:30:22 2010 +0200 @@ -134,9 +134,8 @@ in fold_rev (fn tm => fn th => if check tm then DISCH tm th else th) (chyps thm) thm end; -(* freezeT expensive! *) fun UNDISCH thm = - let val tm = D.mk_prop (#1 (D.dest_imp (cconcl (Thm.freezeT thm)))) + let val tm = D.mk_prop (#1 (D.dest_imp (cconcl thm))) in Thm.implies_elim (thm RS mp) (ASSUME tm) end handle U.ERR _ => raise RULES_ERR "UNDISCH" "" | THM _ => raise RULES_ERR "UNDISCH" ""; @@ -252,7 +251,7 @@ | place _ _ = raise RULES_ERR "organize" "not a permutation.2" in place end; -(* freezeT expensive! *) + fun DISJ_CASESL disjth thl = let val c = cconcl disjth fun eq th atm = exists (fn t => HOLogic.dest_Trueprop t @@ -265,7 +264,7 @@ | DL th (th1::rst) = let val tm = #2(D.dest_disj(D.drop_prop(cconcl th))) in DISJ_CASES th th1 (DL (ASSUME tm) rst) end - in DL (Thm.freezeT disjth) (organize eq tml thl) + in DL disjth (organize eq tml thl) end; @@ -814,7 +813,7 @@ let val thy = Thm.theory_of_cterm ptm; val t = Thm.term_of ptm; - val ctxt = ProofContext.init thy |> Variable.auto_fixes t; + val ctxt = ProofContext.init_global thy |> Variable.auto_fixes t; in if strict then Goal.prove ctxt [] [] t (K tac) else Goal.prove ctxt [] [] t (K tac) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/TFL/tfl.ML --- a/src/HOL/Tools/TFL/tfl.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/TFL/tfl.ML Tue May 04 20:30:22 2010 +0200 @@ -361,7 +361,7 @@ (*For Isabelle, the lhs of a definition must be a constant.*) fun const_def sign (c, Ty, rhs) = - singleton (Syntax.check_terms (ProofContext.init sign)) + singleton (Syntax.check_terms (ProofContext.init_global sign)) (Const("==",dummyT) $ Const(c,Ty) $ rhs); (*Make all TVars available for instantiation by adding a ? to the front*) @@ -541,7 +541,7 @@ thy |> PureThy.add_defs false [Thm.no_attributes (Binding.name (fid ^ "_def"), defn)] - val def = Thm.freezeT def0; + val def = Thm.unvarify_global def0; val dummy = if !trace then writeln ("DEF = " ^ Display.string_of_thm_global theory def) else () diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/choice_specification.ML --- a/src/HOL/Tools/choice_specification.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/choice_specification.ML Tue May 04 20:30:22 2010 +0200 @@ -78,10 +78,9 @@ | NONE => mk_definitional cos arg end -fun add_specification axiomatic cos arg = - arg |> apsnd Thm.freezeT - |> proc_exprop axiomatic cos - |> apsnd Drule.export_without_context +fun add_specification axiomatic cos = + proc_exprop axiomatic cos + #> apsnd Drule.export_without_context (* Collect all intances of constants in term *) @@ -217,16 +216,17 @@ then writeln "specification" else () in - arg |> apsnd Thm.freezeT + arg |> apsnd Thm.unvarify_global |> process_all (zip3 alt_names rew_imps frees) end - fun after_qed [[thm]] = ProofContext.theory (fn thy => - #1 (post_process (add_specification axiomatic (zip3 names cnames overloaded) (thy, thm)))); + fun after_qed [[thm]] = (ProofContext.theory (fn thy => + #1 (post_process (add_specification axiomatic (zip3 names cnames overloaded) (thy, thm))))); in thy - |> ProofContext.init - |> Proof.theorem_i NONE after_qed [[(HOLogic.mk_Trueprop ex_prop, [])]] + |> ProofContext.init_global + |> Variable.declare_term ex_prop + |> Proof.theorem NONE after_qed [[(HOLogic.mk_Trueprop ex_prop, [])]] end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/cnf_funcs.ML --- a/src/HOL/Tools/cnf_funcs.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/cnf_funcs.ML Tue May 04 20:30:22 2010 +0200 @@ -436,8 +436,8 @@ val var = new_free () val thm2 = make_cnfx_disj_thm (betapply (x', var)) y' (* (x' | y') = body' *) val thm3 = forall_intr (cterm_of thy var) thm2 (* !!v. (x' | y') = body' *) - val thm4 = strip_shyps (thm3 COMP allI) (* ALL v. (x' | y') = body' *) - val thm5 = strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x' | y')) = (EX v. body') *) + val thm4 = Thm.strip_shyps (thm3 COMP allI) (* ALL v. (x' | y') = body' *) + val thm5 = Thm.strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x' | y')) = (EX v. body') *) in iff_trans OF [thm1, thm5] (* ((Ex x') | y') = (Ex v. body') *) end @@ -447,8 +447,8 @@ val var = new_free () val thm2 = make_cnfx_disj_thm x' (betapply (y', var)) (* (x' | y') = body' *) val thm3 = forall_intr (cterm_of thy var) thm2 (* !!v. (x' | y') = body' *) - val thm4 = strip_shyps (thm3 COMP allI) (* ALL v. (x' | y') = body' *) - val thm5 = strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x' | y')) = (EX v. body') *) + val thm4 = Thm.strip_shyps (thm3 COMP allI) (* ALL v. (x' | y') = body' *) + val thm5 = Thm.strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x' | y')) = (EX v. body') *) in iff_trans OF [thm1, thm5] (* (x' | (Ex y')) = (EX v. body') *) end @@ -467,8 +467,8 @@ val body = HOLogic.mk_conj (HOLogic.mk_disj (x, var), HOLogic.mk_disj (y, HOLogic.Not $ var)) val thm2 = make_cnfx_thm_from_nnf body (* (x | v) & (y | ~v) = body' *) val thm3 = forall_intr (cterm_of thy var) thm2 (* !!v. (x | v) & (y | ~v) = body' *) - val thm4 = strip_shyps (thm3 COMP allI) (* ALL v. (x | v) & (y | ~v) = body' *) - val thm5 = strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x | v) & (y | ~v)) = (EX v. body') *) + val thm4 = Thm.strip_shyps (thm3 COMP allI) (* ALL v. (x | v) & (y | ~v) = body' *) + val thm5 = Thm.strip_shyps (thm4 RS make_cnfx_ex_cong) (* (EX v. (x | v) & (y | ~v)) = (EX v. body') *) in iff_trans OF [thm1, thm5] end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/inductive.ML --- a/src/HOL/Tools/inductive.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/inductive.ML Tue May 04 20:30:22 2010 +0200 @@ -340,7 +340,7 @@ (* prove introduction rules *) -fun prove_intrs quiet_mode coind mono fp_def k params intr_ts rec_preds_defs ctxt = +fun prove_intrs quiet_mode coind mono fp_def k intr_ts rec_preds_defs ctxt ctxt' = let val _ = clean_message quiet_mode " Proving the introduction rules ..."; @@ -354,27 +354,27 @@ val rules = [refl, TrueI, notFalseI, exI, conjI]; - val intrs = map_index (fn (i, intr) => rulify - (Skip_Proof.prove ctxt (map (fst o dest_Free) params) [] intr (fn _ => EVERY + val intrs = map_index (fn (i, intr) => + Skip_Proof.prove ctxt [] [] intr (fn _ => EVERY [rewrite_goals_tac rec_preds_defs, rtac (unfold RS iffD2) 1, EVERY1 (select_disj (length intr_ts) (i + 1)), (*Not ares_tac, since refl must be tried before any equality assumptions; backtracking may occur if the premises have extra variables!*) - DEPTH_SOLVE_1 (resolve_tac rules 1 APPEND assume_tac 1)]))) intr_ts + DEPTH_SOLVE_1 (resolve_tac rules 1 APPEND assume_tac 1)]) + |> rulify + |> singleton (ProofContext.export ctxt ctxt')) intr_ts in (intrs, unfold) end; (* prove elimination rules *) -fun prove_elims quiet_mode cs params intr_ts intr_names unfold rec_preds_defs ctxt = +fun prove_elims quiet_mode cs params intr_ts intr_names unfold rec_preds_defs ctxt ctxt''' = let val _ = clean_message quiet_mode " Proving the elimination rules ..."; - val ([pname], ctxt') = ctxt |> - Variable.add_fixes (map (fst o dest_Free) params) |> snd |> - Variable.variant_fixes ["P"]; + val ([pname], ctxt') = Variable.variant_fixes ["P"] ctxt; val P = HOLogic.mk_Trueprop (Free (pname, HOLogic.boolT)); fun dest_intr r = @@ -410,7 +410,7 @@ EVERY (map (fn prem => DEPTH_SOLVE_1 (ares_tac [rewrite_rule rec_preds_defs prem, conjI] 1)) (tl prems))]) |> rulify - |> singleton (ProofContext.export ctxt'' ctxt), + |> singleton (ProofContext.export ctxt'' ctxt'''), map #2 c_intrs, length Ts) end @@ -446,7 +446,7 @@ val cprop = Thm.cterm_of thy prop; val tac = ALLGOALS (simp_case_tac ss) THEN prune_params_tac; fun mk_elim rl = - Thm.implies_intr cprop (Tactic.rule_by_tactic tac (Thm.assume cprop RS rl)) + Thm.implies_intr cprop (Tactic.rule_by_tactic ctxt tac (Thm.assume cprop RS rl)) |> singleton (Variable.export (Variable.auto_fixes prop ctxt) ctxt); in (case get_first (try mk_elim) elims of @@ -488,16 +488,14 @@ (* prove induction rule *) fun prove_indrule quiet_mode cs argTs bs xs rec_const params intr_ts mono - fp_def rec_preds_defs ctxt = + fp_def rec_preds_defs ctxt ctxt''' = let val _ = clean_message quiet_mode " Proving the induction rule ..."; val thy = ProofContext.theory_of ctxt; (* predicates for induction rule *) - val (pnames, ctxt') = ctxt |> - Variable.add_fixes (map (fst o dest_Free) params) |> snd |> - Variable.variant_fixes (mk_names "P" (length cs)); + val (pnames, ctxt') = Variable.variant_fixes (mk_names "P" (length cs)) ctxt; val preds = map2 (curry Free) pnames (map (fn c => arg_types_of (length params) c ---> HOLogic.boolT) cs); @@ -592,7 +590,7 @@ rewrite_goals_tac simp_thms', atac 1])]) - in singleton (ProofContext.export ctxt'' ctxt) (induct RS lemma) end; + in singleton (ProofContext.export ctxt'' ctxt''') (induct RS lemma) end; @@ -689,11 +687,13 @@ ||> Local_Theory.restore_naming lthy'; val preds = (case cs of [_] => [rec_const] | _ => map #1 consts_defs); - val mono = prove_mono quiet_mode skip_mono fork_mono predT fp_fun monos lthy''; - val ((_, [mono']), lthy''') = - Local_Theory.note (apfst Binding.conceal Attrib.empty_binding, [mono]) lthy''; + val (_, lthy''') = Variable.add_fixes (map (fst o dest_Free) params) lthy''; + val mono = prove_mono quiet_mode skip_mono fork_mono predT fp_fun monos lthy'''; + val (_, lthy'''') = + Local_Theory.note (apfst Binding.conceal Attrib.empty_binding, + ProofContext.export lthy''' lthy'' [mono]) lthy''; - in (lthy''', rec_name, mono', fp_def', map (#2 o #2) consts_defs, + in (lthy'''', lthy''', rec_name, mono, fp_def', map (#2 o #2) consts_defs, list_comb (rec_const, params), preds, argTs, bs, xs) end; @@ -774,31 +774,30 @@ val ((intr_names, intr_atts), intr_ts) = apfst split_list (split_list (map (check_rule lthy cs params) intros)); - val (lthy1, rec_name, mono, fp_def, rec_preds_defs, rec_const, preds, + val (lthy1, lthy2, rec_name, mono, fp_def, rec_preds_defs, rec_const, preds, argTs, bs, xs) = mk_ind_def quiet_mode skip_mono fork_mono alt_name coind cs intr_ts monos params cnames_syn lthy; val (intrs, unfold) = prove_intrs quiet_mode coind mono fp_def (length bs + length xs) - params intr_ts rec_preds_defs lthy1; + intr_ts rec_preds_defs lthy2 lthy1; val elims = if no_elim then [] else prove_elims quiet_mode cs params intr_ts (map Binding.name_of intr_names) - unfold rec_preds_defs lthy1; + unfold rec_preds_defs lthy2 lthy1; val raw_induct = zero_var_indexes (if no_ind then Drule.asm_rl else if coind then - singleton (ProofContext.export - (snd (Variable.add_fixes (map (fst o dest_Free) params) lthy1)) lthy1) + singleton (ProofContext.export lthy2 lthy1) (rotate_prems ~1 (Object_Logic.rulify (fold_rule rec_preds_defs (rewrite_rule simp_thms''' (mono RS (fp_def RS @{thm def_coinduct})))))) else prove_indrule quiet_mode cs argTs bs xs rec_const params intr_ts mono fp_def - rec_preds_defs lthy1); + rec_preds_defs lthy2 lthy1); - val (intrs', elims', induct, inducts, lthy2) = declare_rules rec_name coind no_ind + val (intrs', elims', induct, inducts, lthy3) = declare_rules rec_name coind no_ind cnames preds intrs intr_names intr_atts elims raw_induct lthy1; val result = @@ -809,11 +808,11 @@ induct = induct, inducts = inducts}; - val lthy3 = lthy2 + val lthy4 = lthy3 |> Local_Theory.declaration false (fn phi => let val result' = morph_result phi result; in put_inductives cnames (*global names!?*) ({names = cnames, coind = coind}, result') end); - in (result, lthy3) end; + in (result, lthy4) end; (* external interfaces *) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/inductive_codegen.ML --- a/src/HOL/Tools/inductive_codegen.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/inductive_codegen.ML Tue May 04 20:30:22 2010 +0200 @@ -66,7 +66,7 @@ val nparms = (case optnparms of SOME k => k | NONE => (case rules of - [] => (case try (Inductive.the_inductive (ProofContext.init thy)) s of + [] => (case try (Inductive.the_inductive (ProofContext.init_global thy)) s of SOME (_, {raw_induct, ...}) => length (Inductive.params_of raw_induct) | NONE => 0) @@ -84,7 +84,7 @@ fun get_clauses thy s = let val {intros, graph, ...} = CodegenData.get thy in case Symtab.lookup intros s of - NONE => (case try (Inductive.the_inductive (ProofContext.init thy)) s of + NONE => (case try (Inductive.the_inductive (ProofContext.init_global thy)) s of NONE => NONE | SOME ({names, ...}, {intrs, raw_induct, ...}) => SOME (names, Codegen.thyname_of_const thy s, diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/inductive_realizer.ML --- a/src/HOL/Tools/inductive_realizer.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/inductive_realizer.ML Tue May 04 20:30:22 2010 +0200 @@ -137,7 +137,7 @@ fun fun_of_prem thy rsets vs params rule ivs intr = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy val args = map (Free o apfst fst o dest_Var) ivs; val args' = map (Free o apfst fst) (subtract (op =) params (Term.add_vars (prop_of intr) [])); @@ -484,7 +484,7 @@ fun add_ind_realizers name rsets thy = let val (_, {intrs, induct, raw_induct, elims, ...}) = - Inductive.the_inductive (ProofContext.init thy) name; + Inductive.the_inductive (ProofContext.init_global thy) name; val vss = sort (int_ord o pairself length) (subsets (map fst (relevant_vars (concl_of (hd intrs))))) in diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/meson.ML --- a/src/HOL/Tools/meson.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/meson.ML Tue May 04 20:30:22 2010 +0200 @@ -555,7 +555,7 @@ skolemize_nnf_list ctxt ths); fun add_clauses th cls = - let val ctxt0 = Variable.thm_context th + let val ctxt0 = Variable.global_thm_context th val (cnfs, ctxt) = make_cnf [] th ctxt0 in Variable.export ctxt ctxt0 cnfs @ cls end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/numeral_simprocs.ML --- a/src/HOL/Tools/numeral_simprocs.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/numeral_simprocs.ML Tue May 04 20:30:22 2010 +0200 @@ -332,8 +332,8 @@ val field_combine_numerals = Arith_Data.prep_simproc @{theory} ("field_combine_numerals", - ["(i::'a::{number_ring,field,division_by_zero}) + j", - "(i::'a::{number_ring,field,division_by_zero}) - j"], + ["(i::'a::{field_inverse_zero, number_ring}) + j", + "(i::'a::{field_inverse_zero, number_ring}) - j"], K FieldCombineNumerals.proc); (** Constant folding for multiplication in semirings **) @@ -442,9 +442,9 @@ "(l::'a::{semiring_div,number_ring}) div (m * n)"], K DivCancelNumeralFactor.proc), ("divide_cancel_numeral_factor", - ["((l::'a::{division_by_zero,field,number_ring}) * m) / n", - "(l::'a::{division_by_zero,field,number_ring}) / (m * n)", - "((number_of v)::'a::{division_by_zero,field,number_ring}) / (number_of w)"], + ["((l::'a::{field_inverse_zero,number_ring}) * m) / n", + "(l::'a::{field_inverse_zero,number_ring}) / (m * n)", + "((number_of v)::'a::{field_inverse_zero,number_ring}) / (number_of w)"], K DivideCancelNumeralFactor.proc)]; val field_cancel_numeral_factors = @@ -454,9 +454,9 @@ "(l::'a::{field,number_ring}) = m * n"], K EqCancelNumeralFactor.proc), ("field_cancel_numeral_factor", - ["((l::'a::{division_by_zero,field,number_ring}) * m) / n", - "(l::'a::{division_by_zero,field,number_ring}) / (m * n)", - "((number_of v)::'a::{division_by_zero,field,number_ring}) / (number_of w)"], + ["((l::'a::{field_inverse_zero,number_ring}) * m) / n", + "(l::'a::{field_inverse_zero,number_ring}) / (m * n)", + "((number_of v)::'a::{field_inverse_zero,number_ring}) / (number_of w)"], K DivideCancelNumeralFactor.proc)] @@ -598,8 +598,8 @@ ["((l::'a::idom) * m) dvd n", "(l::'a::idom) dvd (m * n)"], K DvdCancelFactor.proc), ("divide_cancel_factor", - ["((l::'a::{division_by_zero,field}) * m) / n", - "(l::'a::{division_by_zero,field}) / (m * n)"], + ["((l::'a::field_inverse_zero) * m) / n", + "(l::'a::field_inverse_zero) / (m * n)"], K DivideCancelFactor.proc)]; end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/old_primrec.ML --- a/src/HOL/Tools/old_primrec.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/old_primrec.ML Tue May 04 20:30:22 2010 +0200 @@ -214,7 +214,7 @@ fs @ map Bound (0 ::(length ls downto 1)))) val def_name = Long_Name.base_name fname ^ "_" ^ Long_Name.base_name tname ^ "_def"; val def_prop = - singleton (Syntax.check_terms (ProofContext.init thy)) + singleton (Syntax.check_terms (ProofContext.init_global thy)) (Logic.mk_equals (Const (fname, dummyT), rhs)); in (def_name, def_prop) end; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/recdef.ML --- a/src/HOL/Tools/recdef.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/recdef.ML Tue May 04 20:30:22 2010 +0200 @@ -160,7 +160,7 @@ fun prepare_hints thy opt_src = let - val ctxt0 = ProofContext.init thy; + val ctxt0 = ProofContext.init_global thy; val ctxt = (case opt_src of NONE => ctxt0 @@ -172,7 +172,7 @@ fun prepare_hints_i thy () = let - val ctxt0 = ProofContext.init thy; + val ctxt0 = ProofContext.init_global thy; val {simps, congs, wfs} = get_global_hints thy; in (claset_of ctxt0, simpset_of ctxt0 addsimps simps, rev (map snd congs), wfs) end; @@ -234,7 +234,7 @@ val _ = requires_recdef thy; val _ = writeln ("Deferred recursive function " ^ quote name ^ " ..."); - val congs = eval_thms (ProofContext.init thy) raw_congs; + val congs = eval_thms (ProofContext.init_global thy) raw_congs; val (thy2, induct_rules) = tfl_fn thy congs name eqs; val ([induct_rules'], thy3) = thy2 diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/record.ML --- a/src/HOL/Tools/record.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/record.ML Tue May 04 20:30:22 2010 +0200 @@ -1038,7 +1038,7 @@ let val thm = if ! quick_and_dirty then Skip_Proof.make_thm thy prop else if Goal.future_enabled () then - Goal.future_result (ProofContext.init thy) (Future.fork_pri ~1 prf) prop + Goal.future_result (ProofContext.init_global thy) (Future.fork_pri ~1 prf) prop else prf () in Drule.export_without_context thm end; @@ -1048,7 +1048,7 @@ if ! quick_and_dirty then Skip_Proof.prove else if immediate orelse not (Goal.future_enabled ()) then Goal.prove else Goal.prove_future; - val prf = prv (ProofContext.init thy) [] asms prop tac; + val prf = prv (ProofContext.init_global thy) [] asms prop tac; in if stndrd then Drule.export_without_context prf else prf end; val prove_future_global = prove_common false; @@ -1090,7 +1090,7 @@ else mk_comp_id acc; val prop = lhs === rhs; val othm = - Goal.prove (ProofContext.init thy) [] [] prop + Goal.prove (ProofContext.init_global thy) [] [] prop (fn _ => simp_tac defset 1 THEN REPEAT_DETERM (Iso_Tuple_Support.iso_tuple_intros_tac 1) THEN @@ -1114,7 +1114,7 @@ else mk_comp (u' $ f') (u $ f); val prop = lhs === rhs; val othm = - Goal.prove (ProofContext.init thy) [] [] prop + Goal.prove (ProofContext.init_global thy) [] [] prop (fn _ => simp_tac defset 1 THEN REPEAT_DETERM (Iso_Tuple_Support.iso_tuple_intros_tac 1) THEN @@ -1155,7 +1155,7 @@ val (_, args) = strip_comb lhs; val simps = (if length args = 1 then get_accupd_simps else get_updupd_simps) thy lhs defset; in - Goal.prove (ProofContext.init thy) [] [] prop' + Goal.prove (ProofContext.init_global thy) [] [] prop' (fn _ => simp_tac (HOL_basic_ss addsimps (simps @ [K_record_comp])) 1 THEN TRY (simp_tac (HOL_basic_ss addsimps ex_simps addsimprocs ex_simprs) 1)) @@ -1247,7 +1247,7 @@ val insts = [("upd", cterm_of thy upd), ("acc", cterm_of thy acc)]; val prop = Thm.concl_of (named_cterm_instantiate insts updacc_cong_triv); in - Goal.prove (ProofContext.init thy) [] [] prop + Goal.prove (ProofContext.init_global thy) [] [] prop (fn _ => simp_tac simpset 1 THEN REPEAT_DETERM (Iso_Tuple_Support.iso_tuple_intros_tac 1) THEN @@ -2388,7 +2388,7 @@ if quiet_mode then () else writeln ("Defining record " ^ quote (Binding.str_of binding) ^ " ..."); - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun cert_typ T = Type.no_tvars (ProofContext.cert_typ ctxt T) handle TYPE (msg, _, _) => error msg; @@ -2438,7 +2438,7 @@ fun add_record_cmd quiet_mode (raw_params, binding) raw_parent raw_fields thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val params = map (apsnd (Typedecl.read_constraint ctxt)) raw_params; val ctxt1 = fold (Variable.declare_typ o TFree) params ctxt; val (parent, ctxt2) = read_parent raw_parent ctxt1; diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/refute.ML --- a/src/HOL/Tools/refute.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/refute.ML Tue May 04 20:30:22 2010 +0200 @@ -70,8 +70,6 @@ val is_IDT_constructor : theory -> string * typ -> bool val is_IDT_recursor : theory -> string * typ -> bool val is_const_of_class: theory -> string * typ -> bool - val monomorphic_term : Type.tyenv -> term -> term - val specialize_type : theory -> (string * typ) -> term -> term val string_of_typ : typ -> string val typ_of_dtyp : Datatype.descr -> (Datatype.dtyp * typ) list -> Datatype.dtyp -> typ end; (* signature REFUTE *) @@ -449,57 +447,8 @@ Term.all T $ Abs (x, T, abstract_over (Var ((x, i), T), t'))) vars t end; -(* ------------------------------------------------------------------------- *) -(* monomorphic_term: applies a type substitution 'typeSubs' for all type *) -(* variables in a term 't' *) -(* ------------------------------------------------------------------------- *) - - (* Type.tyenv -> Term.term -> Term.term *) - - fun monomorphic_term typeSubs t = - map_types (map_type_tvar - (fn v => - case Type.lookup typeSubs v of - NONE => - (* schematic type variable not instantiated *) - raise REFUTE ("monomorphic_term", - "no substitution for type variable " ^ fst (fst v) ^ - " in term " ^ Syntax.string_of_term_global Pure.thy t) - | SOME typ => - typ)) t; - -(* ------------------------------------------------------------------------- *) -(* specialize_type: given a constant 's' of type 'T', which is a subterm of *) -(* 't', where 't' has a (possibly) more general type, the *) -(* schematic type variables in 't' are instantiated to *) -(* match the type 'T' (may raise Type.TYPE_MATCH) *) -(* ------------------------------------------------------------------------- *) - - (* theory -> (string * Term.typ) -> Term.term -> Term.term *) - - fun specialize_type thy (s, T) t = - let - fun find_typeSubs (Const (s', T')) = - if s=s' then - SOME (Sign.typ_match thy (T', T) Vartab.empty) - handle Type.TYPE_MATCH => NONE - else - NONE - | find_typeSubs (Free _) = NONE - | find_typeSubs (Var _) = NONE - | find_typeSubs (Bound _) = NONE - | find_typeSubs (Abs (_, _, body)) = find_typeSubs body - | find_typeSubs (t1 $ t2) = - (case find_typeSubs t1 of SOME x => SOME x - | NONE => find_typeSubs t2) - in - case find_typeSubs t of - SOME typeSubs => - monomorphic_term typeSubs t - | NONE => - (* no match found - perhaps due to sort constraints *) - raise Type.TYPE_MATCH - end; +val monomorphic_term = Sledgehammer_Util.monomorphic_term +val specialize_type = Sledgehammer_Util.specialize_type (* ------------------------------------------------------------------------- *) (* is_const_of_class: returns 'true' iff 'Const (s, T)' is a constant that *) @@ -1357,7 +1306,6 @@ val subst_t = Term.subst_bounds (map Free frees, strip_t) in find_model thy (actual_params thy params) assm_ts subst_t true - handle REFUTE (s, s') => error ("REFUTE " ^ s ^ " " ^ s') (* ### *) end; (* ------------------------------------------------------------------------- *) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/simpdata.ML --- a/src/HOL/Tools/simpdata.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/simpdata.ML Tue May 04 20:30:22 2010 +0200 @@ -48,7 +48,7 @@ | _ $ (Const (@{const_name "Not"}, _) $ _) => th RS @{thm Eq_FalseI} | _ => th RS @{thm Eq_TrueI} -fun mk_eq_True r = +fun mk_eq_True (_: simpset) r = SOME (r RS @{thm meta_eq_to_obj_eq} RS @{thm Eq_TrueI}) handle Thm.THM _ => NONE; (* Produce theorems of the form @@ -80,7 +80,7 @@ end; (*Congruence rules for = (instead of ==)*) -fun mk_meta_cong rl = zero_var_indexes +fun mk_meta_cong (_: simpset) rl = zero_var_indexes (let val rl' = Seq.hd (TRYALL (fn i => fn st => rtac (lift_meta_eq_to_obj_eq i st) i st) rl) in mk_meta_eq rl' handle THM _ => @@ -95,7 +95,7 @@ fun res th = map (fn rl => th RS rl); (*exception THM*) fun res_fixed rls = if Thm.maxidx_of (Thm.adjust_maxidx_thm ~1 thm) = ~1 then res thm rls - else Variable.trade (K (fn [thm'] => res thm' rls)) (Variable.thm_context thm) [thm]; + else Variable.trade (K (fn [thm'] => res thm' rls)) (Variable.global_thm_context thm) [thm]; in case concl_of thm of Const (@{const_name Trueprop}, _) $ p => (case head_of p @@ -107,7 +107,7 @@ end; in atoms end; -fun mksimps pairs = +fun mksimps pairs (_: simpset) = map_filter (try mk_eq) o mk_atomize pairs o gen_all; fun unsafe_solver_tac prems = diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/split_rule.ML --- a/src/HOL/Tools/split_rule.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/split_rule.ML Tue May 04 20:30:22 2010 +0200 @@ -116,7 +116,7 @@ fun split_rule_goal ctxt xss rl = let - fun one_split i s = Tactic.rule_by_tactic (pair_tac ctxt s i); + fun one_split i s = Tactic.rule_by_tactic ctxt (pair_tac ctxt s i); fun one_goal (i, xs) = fold (one_split (i + 1)) xs; in rl diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/typecopy.ML --- a/src/HOL/Tools/typecopy.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/typecopy.ML Tue May 04 20:30:22 2010 +0200 @@ -52,7 +52,7 @@ fun typecopy (raw_tyco, raw_vs) raw_ty constr_proj thy = let val ty = Sign.certify_typ thy raw_ty; - val ctxt = ProofContext.init thy |> Variable.declare_typ ty; + val ctxt = ProofContext.init_global thy |> Variable.declare_typ ty; val vs = map (ProofContext.check_tfree ctxt) raw_vs; val tac = Tactic.rtac UNIV_witness 1; fun add_info tyco (({ abs_type = ty_abs, rep_type = ty_rep, Abs_name = c_abs, diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Tools/typedef.ML --- a/src/HOL/Tools/typedef.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Tools/typedef.ML Tue May 04 20:30:22 2010 +0200 @@ -182,7 +182,8 @@ val thy = ProofContext.theory_of set_lthy; val cert = Thm.cterm_of thy; val (defs, A) = - Local_Defs.export_cterm set_lthy (ProofContext.init thy) (cert set') ||> Thm.term_of; + Local_Defs.export_cterm set_lthy (ProofContext.init_global thy) (cert set') + ||> Thm.term_of; val ((RepC, AbsC, axiom_name, axiom), axiom_lthy) = set_lthy |> Local_Theory.theory_result (primitive_typedef typedef_name newT oldT Rep_name Abs_name A); @@ -282,7 +283,7 @@ val ((goal, goal_pat, typedef_result), lthy') = prepare_typedef prep_term def name (b, args, mx) set opt_morphs lthy; fun after_qed [[th]] = snd o typedef_result th; - in Proof.theorem_i NONE after_qed [[(goal, [goal_pat])]] lthy' end; + in Proof.theorem NONE after_qed [[(goal, [goal_pat])]] lthy' end; in diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Unix/Unix.thy --- a/src/HOL/Unix/Unix.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Unix/Unix.thy Tue May 04 20:30:22 2010 +0200 @@ -358,7 +358,7 @@ read: "access root path uid {Readable} = Some (Val (att, text)) \ root \(Read uid text path)\ root" | - write: + "write": "access root path uid {Writable} = Some (Val (att, text')) \ root \(Write uid text path)\ update path (Some (Val (att, text))) root" | @@ -436,7 +436,7 @@ case read with root' show ?thesis by cases auto next - case write + case "write" with root' show ?thesis by cases auto next case chmod diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Wellfounded.thy --- a/src/HOL/Wellfounded.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Wellfounded.thy Tue May 04 20:30:22 2010 +0200 @@ -68,7 +68,7 @@ assumes lin: "OFCLASS('a::ord, linorder_class)" shows "OFCLASS('a::ord, wellorder_class)" using lin by (rule wellorder_class.intro) - (blast intro: wellorder_axioms.intro wf_induct_rule [OF wf]) + (blast intro: class.wellorder_axioms.intro wf_induct_rule [OF wf]) lemma (in wellorder) wf: "wf {(x, y). x < y}" diff -r aace7a969410 -r 8629ac3efb19 src/HOL/Word/WordArith.thy --- a/src/HOL/Word/WordArith.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/Word/WordArith.thy Tue May 04 20:30:22 2010 +0200 @@ -17,7 +17,7 @@ by (auto simp del: word_uint.Rep_inject simp: word_uint.Rep_inject [symmetric]) -lemma signed_linorder: "linorder word_sle word_sless" +lemma signed_linorder: "class.linorder word_sle word_sless" proof qed (unfold word_sle_def word_sless_def, auto) diff -r aace7a969410 -r 8629ac3efb19 src/HOL/ex/Higher_Order_Logic.thy --- a/src/HOL/ex/Higher_Order_Logic.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/ex/Higher_Order_Logic.thy Tue May 04 20:30:22 2010 +0200 @@ -20,7 +20,7 @@ subsection {* Pure Logic *} classes type -defaultsort type +default_sort type typedecl o arities diff -r aace7a969410 -r 8629ac3efb19 src/HOL/ex/Lagrange.thy --- a/src/HOL/ex/Lagrange.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/ex/Lagrange.thy Tue May 04 20:30:22 2010 +0200 @@ -34,7 +34,7 @@ sq (x1*y2 + x2*y1 + x3*y4 - x4*y3) + sq (x1*y3 - x2*y4 + x3*y1 + x4*y2) + sq (x1*y4 + x2*y3 - x3*y2 + x4*y1)" -by (simp only: sq_def ring_simps) +by (simp only: sq_def field_simps) text {* A challenge by John Harrison. Takes about 12s on a 1.6GHz machine. *} @@ -50,6 +50,6 @@ sq (p1*u2 + q1*t2 - r1*w2 + s1*v2 - t1*q2 + u1*p2 - v1*s2 + w1*r2) + sq (p1*v2 + q1*w2 + r1*t2 - s1*u2 - t1*r2 + u1*s2 + v1*p2 - w1*q2) + sq (p1*w2 - q1*v2 + r1*u2 + s1*t2 - t1*s2 - u1*r2 + v1*q2 + w1*p2)" -by (simp only: sq_def ring_simps) +by (simp only: sq_def field_simps) end diff -r aace7a969410 -r 8629ac3efb19 src/HOL/ex/Landau.thy --- a/src/HOL/ex/Landau.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOL/ex/Landau.thy Tue May 04 20:30:22 2010 +0200 @@ -187,7 +187,7 @@ proof - interpret preorder_equiv less_eq_fun less_fun proof qed (simp_all add: less_fun_def less_eq_fun_refl, auto intro: less_eq_fun_trans) - show "preorder_equiv less_eq_fun less_fun" using preorder_equiv_axioms . + show "class.preorder_equiv less_eq_fun less_fun" using preorder_equiv_axioms . show "preorder_equiv.equiv less_eq_fun = equiv_fun" by (simp add: expand_fun_eq equiv_def equiv_fun_less_eq_fun) qed diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Adm.thy --- a/src/HOLCF/Adm.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Adm.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Cont begin -defaultsort cpo +default_sort cpo subsection {* Definitions *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Algebraic.thy --- a/src/HOLCF/Algebraic.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Algebraic.thy Tue May 04 20:30:22 2010 +0200 @@ -297,7 +297,7 @@ subsection {* Type constructor for finite deflations *} -defaultsort profinite +default_sort profinite typedef (open) 'a fin_defl = "{d::'a \ 'a. finite_deflation d}" by (fast intro: finite_deflation_approx) diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Cfun.thy --- a/src/HOLCF/Cfun.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Cfun.thy Tue May 04 20:30:22 2010 +0200 @@ -9,7 +9,7 @@ imports Pcpodef Ffun Product_Cpo begin -defaultsort cpo +default_sort cpo subsection {* Definition of continuous function type *} @@ -511,7 +511,7 @@ subsection {* Strictified functions *} -defaultsort pcpo +default_sort pcpo definition strictify :: "('a \ 'b) \ 'a \ 'b" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/CompactBasis.thy --- a/src/HOLCF/CompactBasis.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/CompactBasis.thy Tue May 04 20:30:22 2010 +0200 @@ -10,7 +10,7 @@ subsection {* Compact bases of bifinite domains *} -defaultsort profinite +default_sort profinite typedef (open) 'a compact_basis = "{x::'a::profinite. compact x}" by (fast intro: compact_approx) @@ -237,12 +237,12 @@ where "fold_pd g f t = fold1 f (g ` Rep_pd_basis t)" lemma fold_pd_PDUnit: - assumes "ab_semigroup_idem_mult f" + assumes "class.ab_semigroup_idem_mult f" shows "fold_pd g f (PDUnit x) = g x" unfolding fold_pd_def Rep_PDUnit by simp lemma fold_pd_PDPlus: - assumes "ab_semigroup_idem_mult f" + assumes "class.ab_semigroup_idem_mult f" shows "fold_pd g f (PDPlus t u) = f (fold_pd g f t) (fold_pd g f u)" proof - interpret ab_semigroup_idem_mult f by fact diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Cont.thy --- a/src/HOLCF/Cont.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Cont.thy Tue May 04 20:30:22 2010 +0200 @@ -14,7 +14,7 @@ of default class po *} -defaultsort po +default_sort po subsection {* Definitions *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/ConvexPD.thy --- a/src/HOLCF/ConvexPD.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/ConvexPD.thy Tue May 04 20:30:22 2010 +0200 @@ -412,7 +412,7 @@ (\x y. \ f. x\f +\ y\f)" lemma ACI_convex_bind: - "ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" + "class.ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" apply unfold_locales apply (simp add: convex_plus_assoc) apply (simp add: convex_plus_commute) diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Cprod.thy --- a/src/HOLCF/Cprod.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Cprod.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Bifinite begin -defaultsort cpo +default_sort cpo subsection {* Continuous case function for unit type *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Deflation.thy --- a/src/HOLCF/Deflation.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Deflation.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Cfun begin -defaultsort cpo +default_sort cpo subsection {* Continuous deflations *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Domain.thy --- a/src/HOLCF/Domain.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Domain.thy Tue May 04 20:30:22 2010 +0200 @@ -16,7 +16,7 @@ ("Tools/Domain/domain_extender.ML") begin -defaultsort pcpo +default_sort pcpo subsection {* Casedist *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/FOCUS/Fstream.thy --- a/src/HOLCF/FOCUS/Fstream.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/FOCUS/Fstream.thy Tue May 04 20:30:22 2010 +0200 @@ -12,7 +12,7 @@ imports "../ex/Stream" begin -defaultsort type +default_sort type types 'a fstream = "'a lift stream" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/FOCUS/Fstreams.thy --- a/src/HOLCF/FOCUS/Fstreams.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/FOCUS/Fstreams.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ theory Fstreams imports "../ex/Stream" begin -defaultsort type +default_sort type types 'a fstream = "('a lift) stream" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Fix.thy --- a/src/HOLCF/Fix.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Fix.thy Tue May 04 20:30:22 2010 +0200 @@ -9,7 +9,7 @@ imports Cfun begin -defaultsort pcpo +default_sort pcpo subsection {* Iteration *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Fixrec.thy --- a/src/HOLCF/Fixrec.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Fixrec.thy Tue May 04 20:30:22 2010 +0200 @@ -13,7 +13,7 @@ subsection {* Maybe monad type *} -defaultsort cpo +default_sort cpo pcpodef (open) 'a maybe = "UNIV::(one ++ 'a u) set" by simp_all @@ -463,7 +463,7 @@ subsection {* Match functions for built-in types *} -defaultsort pcpo +default_sort pcpo definition match_UU :: "'a \ 'c maybe \ 'c maybe" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/HOLCF.thy --- a/src/HOLCF/HOLCF.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/HOLCF.thy Tue May 04 20:30:22 2010 +0200 @@ -12,7 +12,7 @@ Sum_Cpo begin -defaultsort pcpo +default_sort pcpo text {* Legacy theorem names *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/Storage/Correctness.thy --- a/src/HOLCF/IOA/Storage/Correctness.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/Storage/Correctness.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports SimCorrectness Spec Impl begin -defaultsort type +default_sort type definition sim_relation :: "((nat * bool) * (nat set * bool)) set" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Abstraction.thy --- a/src/HOLCF/IOA/meta_theory/Abstraction.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Abstraction.thy Tue May 04 20:30:22 2010 +0200 @@ -9,7 +9,7 @@ uses ("automaton.ML") begin -defaultsort type +default_sort type definition cex_abs :: "('s1 => 's2) => ('a,'s1)execution => ('a,'s2)execution" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Automata.thy --- a/src/HOLCF/IOA/meta_theory/Automata.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Automata.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Asig begin -defaultsort type +default_sort type types ('a, 's) transition = "'s * 'a * 's" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/CompoTraces.thy --- a/src/HOLCF/IOA/meta_theory/CompoTraces.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/CompoTraces.thy Tue May 04 20:30:22 2010 +0200 @@ -67,7 +67,7 @@ "Finite (mksch A B$tr$x$y) --> Finite tr" -declaration {* fn _ => Simplifier.map_ss (fn ss => ss setmksym (K NONE)) *} +declaration {* fn _ => Simplifier.map_ss (fn ss => ss setmksym (K (K NONE))) *} subsection "mksch rewrite rules" @@ -967,7 +967,7 @@ done -declaration {* fn _ => Simplifier.map_ss (fn ss => ss setmksym (SOME o symmetric_fun)) *} +declaration {* fn _ => Simplifier.map_ss (fn ss => ss setmksym (K (SOME o symmetric_fun))) *} end diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/LiveIOA.thy --- a/src/HOLCF/IOA/meta_theory/LiveIOA.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/LiveIOA.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports TLS begin -defaultsort type +default_sort type types ('a, 's) live_ioa = "('a,'s)ioa * ('a,'s)ioa_temp" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Pred.thy --- a/src/HOLCF/IOA/meta_theory/Pred.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Pred.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Main begin -defaultsort type +default_sort type types 'a predicate = "'a => bool" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/RefMappings.thy --- a/src/HOLCF/IOA/meta_theory/RefMappings.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/RefMappings.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Traces begin -defaultsort type +default_sort type definition move :: "[('a,'s)ioa,('a,'s)pairs,'s,'a,'s] => bool" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Sequence.thy --- a/src/HOLCF/IOA/meta_theory/Sequence.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Sequence.thy Tue May 04 20:30:22 2010 +0200 @@ -8,9 +8,9 @@ imports Seq begin -defaultsort type +default_sort type -types 'a Seq = "'a::type lift seq" +types 'a Seq = "'a lift seq" consts diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Simulations.thy --- a/src/HOLCF/IOA/meta_theory/Simulations.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Simulations.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports RefCorrectness begin -defaultsort type +default_sort type definition is_simulation :: "[('s1 * 's2)set,('a,'s1)ioa,('a,'s2)ioa] => bool" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/TL.thy --- a/src/HOLCF/IOA/meta_theory/TL.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/TL.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Pred Sequence begin -defaultsort type +default_sort type types 'a temporal = "'a Seq predicate" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/TLS.thy --- a/src/HOLCF/IOA/meta_theory/TLS.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/TLS.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports IOA TL begin -defaultsort type +default_sort type types ('a, 's) ioa_temp = "('a option,'s)transition temporal" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/IOA/meta_theory/Traces.thy --- a/src/HOLCF/IOA/meta_theory/Traces.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/IOA/meta_theory/Traces.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Sequence Automata begin -defaultsort type +default_sort type types ('a,'s)pairs = "('a * 's) Seq" diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Lift.thy --- a/src/HOLCF/Lift.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Lift.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Discrete Up Countable begin -defaultsort type +default_sort type pcpodef 'a lift = "UNIV :: 'a discr u set" by simp_all diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/LowerPD.thy --- a/src/HOLCF/LowerPD.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/LowerPD.thy Tue May 04 20:30:22 2010 +0200 @@ -393,7 +393,7 @@ (\x y. \ f. x\f +\ y\f)" lemma ACI_lower_bind: - "ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" + "class.ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" apply unfold_locales apply (simp add: lower_plus_assoc) apply (simp add: lower_plus_commute) diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Product_Cpo.thy --- a/src/HOLCF/Product_Cpo.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Product_Cpo.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Adm begin -defaultsort cpo +default_sort cpo subsection {* Unit type is a pcpo *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Representable.thy --- a/src/HOLCF/Representable.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Representable.thy Tue May 04 20:30:22 2010 +0200 @@ -42,7 +42,7 @@ @{term rep}, unless specified otherwise. *} -defaultsort rep +default_sort rep subsection {* Representations of types *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Sprod.thy --- a/src/HOLCF/Sprod.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Sprod.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Bifinite begin -defaultsort pcpo +default_sort pcpo subsection {* Definition of strict product type *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Ssum.thy --- a/src/HOLCF/Ssum.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Ssum.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Tr begin -defaultsort pcpo +default_sort pcpo subsection {* Definition of strict sum type *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tools/Domain/domain_isomorphism.ML --- a/src/HOLCF/Tools/Domain/domain_isomorphism.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tools/Domain/domain_isomorphism.ML Tue May 04 20:30:22 2010 +0200 @@ -189,7 +189,7 @@ (K (beta_tac 1)); val tuple_unfold_thm = (@{thm def_cont_fix_eq} OF [tuple_fixdef_thm, cont_thm]) - |> Local_Defs.unfold (ProofContext.init thy) @{thms split_conv}; + |> Local_Defs.unfold (ProofContext.init_global thy) @{thms split_conv}; fun mk_unfold_thms [] thm = [] | mk_unfold_thms (n::[]) thm = [(n, thm)] @@ -380,7 +380,7 @@ fun read_typ thy str sorts = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy |> fold (Variable.declare_typ o TFree) sorts; val T = Syntax.read_typ ctxt str; in (T, Term.add_tfreesT T sorts) end; diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tools/Domain/domain_theorems.ML --- a/src/HOLCF/Tools/Domain/domain_theorems.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tools/Domain/domain_theorems.ML Tue May 04 20:30:22 2010 +0200 @@ -71,7 +71,7 @@ end; fun legacy_infer_term thy t = - let val ctxt = ProofContext.set_mode ProofContext.mode_schematic (ProofContext.init thy) + let val ctxt = ProofContext.set_mode ProofContext.mode_schematic (ProofContext.init_global thy) in singleton (Syntax.check_terms ctxt) (intern_term thy t) end; fun pg'' thy defs t tacs = @@ -347,7 +347,7 @@ (* ----- theorems concerning finiteness and induction ----------------------- *) - val global_ctxt = ProofContext.init thy; + val global_ctxt = ProofContext.init_global thy; val _ = trace " Proving ind..."; val ind = @@ -422,7 +422,7 @@ bot :: map (fn (c,_) => Long_Name.base_name c) cons; in adms @ flat (map2 one_eq bottoms eqs) end; -val inducts = Project_Rule.projections (ProofContext.init thy) ind; +val inducts = Project_Rule.projections (ProofContext.init_global thy) ind; fun ind_rule (dname, rule) = ((Binding.empty, [rule]), [Rule_Cases.case_names case_ns, Induct.induct_type dname]); @@ -470,7 +470,7 @@ local fun legacy_infer_term thy t = - singleton (Syntax.check_terms (ProofContext.init thy)) (intern_term thy t); + singleton (Syntax.check_terms (ProofContext.init_global thy)) (intern_term thy t); fun legacy_infer_prop thy t = legacy_infer_term thy (TypeInfer.constrain propT t); fun infer_props thy = map (apsnd (legacy_infer_prop thy)); fun add_defs_i x = PureThy.add_defs false (map Thm.no_attributes x); diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tools/fixrec.ML --- a/src/HOLCF/Tools/fixrec.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tools/fixrec.ML Tue May 04 20:30:22 2010 +0200 @@ -337,10 +337,10 @@ (* proves a block of pattern matching equations as theorems, using unfold *) fun make_simps ctxt (unfold_thm, eqns : (Attrib.binding * term) list) = let - val tacs = - [rtac (unfold_thm RS @{thm ssubst_lhs}) 1, - asm_simp_tac (simpset_of ctxt) 1]; - fun prove_term t = Goal.prove ctxt [] [] t (K (EVERY tacs)); + val ss = Simplifier.context ctxt (FixrecSimpData.get (Context.Proof ctxt)); + val rule = unfold_thm RS @{thm ssubst_lhs}; + val tac = rtac rule 1 THEN asm_simp_tac ss 1; + fun prove_term t = Goal.prove ctxt [] [] t (K tac); fun prove_eqn (bind, eqn_t) = (bind, prove_term eqn_t); in map prove_eqn eqns diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tools/pcpodef.ML --- a/src/HOLCF/Tools/pcpodef.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tools/pcpodef.ML Tue May 04 20:30:22 2010 +0200 @@ -170,7 +170,7 @@ (*rhs*) val tmp_ctxt = - ProofContext.init thy + ProofContext.init_global thy |> fold (Variable.declare_typ o TFree) raw_args; val set = prep_term tmp_ctxt raw_set; val tmp_ctxt' = tmp_ctxt |> Variable.declare_term set; @@ -207,7 +207,7 @@ val ((_, (_, below_ldef)), lthy4) = lthy3 |> Specification.definition (NONE, ((Binding.prefix_name "below_" (Binding.suffix_name "_def" name), []), below_eqn)); - val ctxt_thy = ProofContext.init (ProofContext.theory_of lthy4); + val ctxt_thy = ProofContext.init_global (ProofContext.theory_of lthy4); val below_def = singleton (ProofContext.export lthy4 ctxt_thy) below_ldef; val thy5 = lthy4 |> Class.prove_instantiation_instance @@ -322,24 +322,24 @@ fun gen_cpodef_proof prep_term prep_constraint ((def, name), (b, raw_args, mx), set, opt_morphs) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val args = map (apsnd (prep_constraint ctxt)) raw_args; val (goal1, goal2, make_result) = prepare_cpodef prep_term def name (b, args, mx) set opt_morphs thy; fun after_qed [[th1, th2]] = ProofContext.theory (snd o make_result th1 th2) | after_qed _ = raise Fail "cpodef_proof"; - in Proof.theorem_i NONE after_qed [[(goal1, []), (goal2, [])]] ctxt end; + in Proof.theorem NONE after_qed [[(goal1, []), (goal2, [])]] ctxt end; fun gen_pcpodef_proof prep_term prep_constraint ((def, name), (b, raw_args, mx), set, opt_morphs) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val args = map (apsnd (prep_constraint ctxt)) raw_args; val (goal1, goal2, make_result) = prepare_pcpodef prep_term def name (b, args, mx) set opt_morphs thy; fun after_qed [[th1, th2]] = ProofContext.theory (snd o make_result th1 th2) | after_qed _ = raise Fail "pcpodef_proof"; - in Proof.theorem_i NONE after_qed [[(goal1, []), (goal2, [])]] ctxt end; + in Proof.theorem NONE after_qed [[(goal1, []), (goal2, [])]] ctxt end; in diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tools/repdef.ML --- a/src/HOLCF/Tools/repdef.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tools/repdef.ML Tue May 04 20:30:22 2010 +0200 @@ -65,7 +65,7 @@ (*rhs*) val tmp_ctxt = - ProofContext.init thy + ProofContext.init_global thy |> fold (Variable.declare_typ o TFree) raw_args; val defl = prep_term tmp_ctxt raw_defl; val tmp_ctxt = tmp_ctxt |> Variable.declare_constraints defl; @@ -119,7 +119,7 @@ Specification.definition (NONE, (prj_bind, prj_eqn)) lthy; val ((_, (_, approx_ldef)), lthy) = Specification.definition (NONE, (approx_bind, approx_eqn)) lthy; - val ctxt_thy = ProofContext.init (ProofContext.theory_of lthy); + val ctxt_thy = ProofContext.init_global (ProofContext.theory_of lthy); val emb_def = singleton (ProofContext.export lthy ctxt_thy) emb_ldef; val prj_def = singleton (ProofContext.export lthy ctxt_thy) prj_ldef; val approx_def = singleton (ProofContext.export lthy ctxt_thy) approx_ldef; @@ -161,7 +161,7 @@ fun repdef_cmd ((def, name), (b, raw_args, mx), A, morphs) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val args = map (apsnd (Typedecl.read_constraint ctxt)) raw_args; in snd (gen_add_repdef Syntax.read_term def name (b, args, mx) A morphs thy) end; diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Tr.thy --- a/src/HOLCF/Tr.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Tr.thy Tue May 04 20:30:22 2010 +0200 @@ -62,7 +62,7 @@ subsection {* Case analysis *} -defaultsort pcpo +default_sort pcpo definition trifte :: "'c \ 'c \ tr \ 'c" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Universal.thy --- a/src/HOLCF/Universal.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Universal.thy Tue May 04 20:30:22 2010 +0200 @@ -340,7 +340,7 @@ subsection {* Universality of \emph{udom} *} -defaultsort bifinite +default_sort bifinite subsubsection {* Choosing a maximal element from a finite set *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/Up.thy --- a/src/HOLCF/Up.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/Up.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports Bifinite begin -defaultsort cpo +default_sort cpo subsection {* Definition of new type for lifting *} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/UpperPD.thy --- a/src/HOLCF/UpperPD.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/UpperPD.thy Tue May 04 20:30:22 2010 +0200 @@ -388,7 +388,7 @@ (\x y. \ f. x\f +\ y\f)" lemma ACI_upper_bind: - "ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" + "class.ab_semigroup_idem_mult (\x y. \ f. x\f +\ y\f)" apply unfold_locales apply (simp add: upper_plus_assoc) apply (simp add: upper_plus_commute) diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/ex/Domain_Proofs.thy --- a/src/HOLCF/ex/Domain_Proofs.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/ex/Domain_Proofs.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports HOLCF begin -defaultsort rep +default_sort rep (* diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/ex/Letrec.thy --- a/src/HOLCF/ex/Letrec.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/ex/Letrec.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports HOLCF begin -defaultsort pcpo +default_sort pcpo definition CLetrec :: "('a \ 'a \ 'b) \ 'b" where diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/ex/New_Domain.thy --- a/src/HOLCF/ex/New_Domain.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/ex/New_Domain.thy Tue May 04 20:30:22 2010 +0200 @@ -13,7 +13,7 @@ i.e. types in class @{text rep}. *} -defaultsort rep +default_sort rep text {* Provided that @{text rep} is the default sort, the @{text new_domain} diff -r aace7a969410 -r 8629ac3efb19 src/HOLCF/ex/Powerdomain_ex.thy --- a/src/HOLCF/ex/Powerdomain_ex.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/HOLCF/ex/Powerdomain_ex.thy Tue May 04 20:30:22 2010 +0200 @@ -8,7 +8,7 @@ imports HOLCF begin -defaultsort bifinite +default_sort bifinite subsection {* Monadic sorting example *} diff -r aace7a969410 -r 8629ac3efb19 src/LCF/LCF.thy --- a/src/LCF/LCF.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/LCF/LCF.thy Tue May 04 20:30:22 2010 +0200 @@ -14,7 +14,7 @@ subsection {* Natural Deduction Rules for LCF *} classes cpo < "term" -defaultsort cpo +default_sort cpo typedecl tr typedecl void diff -r aace7a969410 -r 8629ac3efb19 src/Provers/clasimp.ML --- a/src/Provers/clasimp.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Provers/clasimp.ML Tue May 04 20:30:22 2010 +0200 @@ -70,8 +70,14 @@ fun get_css context = (Classical.get_cs context, Simplifier.get_ss context); fun map_css f context = - let val (cs', ss') = f (get_css context) - in context |> Classical.map_cs (K cs') |> Simplifier.map_ss (K ss') end; + let + val (cs, ss) = get_css context; + val (cs', ss') = f (cs, Simplifier.context (Context.proof_of context) ss); + in + context + |> Classical.map_cs (K cs') + |> Simplifier.map_ss (K (Simplifier.inherit_context ss ss')) + end; fun clasimpset_of ctxt = (Classical.claset_of ctxt, Simplifier.simpset_of ctxt); diff -r aace7a969410 -r 8629ac3efb19 src/Provers/classical.ML --- a/src/Provers/classical.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Provers/classical.ML Tue May 04 20:30:22 2010 +0200 @@ -208,8 +208,11 @@ fun dup_intr th = zero_var_indexes (th RS classical); fun dup_elim th = - rule_by_tactic (TRYALL (etac revcut_rl)) - ((th RSN (2, revcut_rl)) |> Thm.assumption 2 |> Seq.hd); + let + val rl = (th RSN (2, revcut_rl)) |> Thm.assumption 2 |> Seq.hd; + val ctxt = ProofContext.init_global (Thm.theory_of_thm rl); + in rule_by_tactic ctxt (TRYALL (etac revcut_rl)) rl end; + (**** Classical rule sets ****) @@ -853,7 +856,7 @@ fun global_claset_of thy = let val (cs, ctxt_cs) = GlobalClaset.get thy - in context_cs (ProofContext.init thy) cs (ctxt_cs) end; + in context_cs (ProofContext.init_global thy) cs (ctxt_cs) end; (* context dependent components *) diff -r aace7a969410 -r 8629ac3efb19 src/Provers/hypsubst.ML --- a/src/Provers/hypsubst.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Provers/hypsubst.ML Tue May 04 20:30:22 2010 +0200 @@ -156,7 +156,7 @@ let val (k, _) = eq_var bnd true Bi val hyp_subst_ss = Simplifier.global_context (Thm.theory_of_thm st) empty_ss - setmksimps (mk_eqs bnd) + setmksimps (K (mk_eqs bnd)) in EVERY [rotate_tac k i, asm_lr_simp_tac hyp_subst_ss i, etac thin_rl i, rotate_tac (~k) i] end handle THM _ => no_tac | EQ_VAR => no_tac) i st diff -r aace7a969410 -r 8629ac3efb19 src/Provers/quantifier1.ML --- a/src/Provers/quantifier1.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Provers/quantifier1.ML Tue May 04 20:30:22 2010 +0200 @@ -113,7 +113,7 @@ in exqu [] end; fun prove_conv tac thy tu = - Goal.prove (ProofContext.init thy) [] [] (Logic.mk_equals tu) + Goal.prove (ProofContext.init_global thy) [] [] (Logic.mk_equals tu) (K (rtac iff_reflection 1 THEN tac)); fun qcomm_tac qcomm qI i = REPEAT_DETERM (rtac qcomm i THEN rtac qI i) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/args.ML --- a/src/Pure/Isar/args.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/args.ML Tue May 04 20:30:22 2010 +0200 @@ -205,7 +205,7 @@ >> (fn Type (c, _) => c | TFree (a, _) => a | _ => ""); fun const strict = - Scan.peek (fn ctxt => named_term (ProofContext.read_const (Context.proof_of ctxt) strict)) + Scan.peek (fn ctxt => named_term (ProofContext.read_const (Context.proof_of ctxt) strict dummyT)) >> (fn Const (c, _) => c | Free (x, _) => x | _ => ""); fun const_proper strict = diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/calculation.ML --- a/src/Pure/Isar/calculation.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/calculation.ML Tue May 04 20:30:22 2010 +0200 @@ -13,11 +13,11 @@ val sym_add: attribute val sym_del: attribute val symmetric: attribute - val also: (Facts.ref * Attrib.src list) list option -> bool -> Proof.state -> Proof.state Seq.seq - val also_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq - val finally: (Facts.ref * Attrib.src list) list option -> bool -> + val also: thm list option -> bool -> Proof.state -> Proof.state Seq.seq + val also_cmd: (Facts.ref * Attrib.src list) list option -> bool -> Proof.state -> Proof.state Seq.seq + val finally: thm list option -> bool -> Proof.state -> Proof.state Seq.seq + val finally_cmd: (Facts.ref * Attrib.src list) list option -> bool -> Proof.state -> Proof.state Seq.seq - val finally_i: thm list option -> bool -> Proof.state -> Proof.state Seq.seq val moreover: bool -> Proof.state -> Proof.state val ultimately: bool -> Proof.state -> Proof.state end; @@ -148,10 +148,10 @@ state |> maintain_calculation final calc)) end; -val also = calculate Proof.get_thmss false; -val also_i = calculate (K I) false; -val finally = calculate Proof.get_thmss true; -val finally_i = calculate (K I) true; +val also = calculate (K I) false; +val also_cmd = calculate Proof.get_thmss_cmd false; +val finally = calculate (K I) true; +val finally_cmd = calculate Proof.get_thmss_cmd true; (* moreover and ultimately *) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/class.ML --- a/src/Pure/Isar/class.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/class.ML Tue May 04 20:30:22 2010 +0200 @@ -32,7 +32,7 @@ fun calculate thy class sups base_sort param_map assm_axiom = let - val empty_ctxt = ProofContext.init thy; + val empty_ctxt = ProofContext.init_global thy; (* instantiation of canonical interpretation *) val aT = TFree (Name.aT, base_sort); @@ -100,10 +100,14 @@ (* reading and processing class specifications *) -fun prep_class_elems prep_decl thy sups proto_base_sort raw_elems = +fun prep_class_elems prep_decl thy sups raw_elems = let (* user space type system: only permits 'a type variable, improves towards 'a *) + val algebra = Sign.classes_of thy; + val inter_sort = curry (Sorts.inter_sort algebra); + val proto_base_sort = if null sups then Sign.defaultS thy + else fold inter_sort (map (base_sort thy) sups) []; val base_constraints = (map o apsnd) (map_type_tfree (K (TVar ((Name.aT, 0), proto_base_sort))) o fst o snd) (these_operations thy sups); @@ -111,17 +115,17 @@ if v = Name.aT then T else error ("No type variable other than " ^ Name.aT ^ " allowed in class specification") | T => T); - fun singleton_fixate thy algebra Ts = + fun singleton_fixate Ts = let fun extract f = (fold o fold_atyps) f Ts []; val tfrees = extract (fn TFree (v, sort) => insert (op =) (v, sort) | _ => I); val inferred_sort = extract - (fn TVar (_, sort) => curry (Sorts.inter_sort algebra) sort | _ => I); + (fn TVar (_, sort) => inter_sort sort | _ => I); val fixate_sort = if null tfrees then inferred_sort else case tfrees of [(_, a_sort)] => if Sorts.sort_le algebra (a_sort, inferred_sort) - then Sorts.inter_sort algebra (a_sort, inferred_sort) + then inter_sort a_sort inferred_sort else error ("Type inference imposes additional sort constraint " ^ Syntax.string_of_sort_global thy inferred_sort ^ " of type parameter " ^ Name.aT ^ " of sort " @@ -136,10 +140,10 @@ val init_class_body = fold (ProofContext.add_const_constraint o apsnd SOME) base_constraints #> redeclare_operations thy sups #> add_typ_check 10 "reject_bcd_etc" reject_bcd_etc - #> add_typ_check ~10 "singleton_fixate" (singleton_fixate thy (Sign.classes_of thy)); + #> add_typ_check ~10 "singleton_fixate" singleton_fixate; val raw_supexpr = (map (fn sup => (sup, (("", false), Expression.Positional []))) sups, []); - val ((raw_supparams, _, inferred_elems), _) = ProofContext.init thy + val ((raw_supparams, _, inferred_elems), _) = ProofContext.init_global thy |> prep_decl raw_supexpr init_class_body raw_elems; fun fold_element_types f (Element.Fixes fxs) = fold (fn (_, SOME T, _) => f T) fxs | fold_element_types f (Element.Constrains cnstrs) = fold (f o snd) cnstrs @@ -183,15 +187,14 @@ then error ("Duplicate parameter(s) in superclasses: " ^ (commas o map quote o duplicates (op =)) raw_supparam_names) else (); - val given_basesort = fold inter_sort (map (base_sort thy) sups) []; (* infer types and base sort *) val (base_sort, supparam_names, supexpr, inferred_elems) = - prep_class_elems thy sups given_basesort raw_elems; + prep_class_elems thy sups raw_elems; val sup_sort = inter_sort base_sort sups; (* process elements as class specification *) - val class_ctxt = begin sups base_sort (ProofContext.init thy); + val class_ctxt = begin sups base_sort (ProofContext.init_global thy); val ((_, _, syntax_elems), _) = class_ctxt |> Expression.cert_declaration supexpr I inferred_elems; fun check_vars e vs = if null vs @@ -273,22 +276,21 @@ #> pair (param_map, params, assm_axiom))) end; -fun gen_class prep_class_spec bname raw_supclasses raw_elems thy = +fun gen_class prep_class_spec b raw_supclasses raw_elems thy = let - val class = Sign.full_name thy bname; + val class = Sign.full_name thy b; val (((sups, supparam_names), (supsort, base_sort, supexpr)), (elems, global_syntax)) = prep_class_spec thy raw_supclasses raw_elems; in thy - |> Expression.add_locale bname Binding.empty supexpr elems + |> Expression.add_locale b (Binding.qualify true "class" b) supexpr elems |> snd |> Local_Theory.exit_global - |> adjungate_axclass bname class base_sort sups supsort supparam_names global_syntax + |> adjungate_axclass b class base_sort sups supsort supparam_names global_syntax ||> Theory.checkpoint |-> (fn (param_map, params, assm_axiom) => `(fn thy => calculate thy class sups base_sort param_map assm_axiom) #-> (fn (base_morph, eq_morph, export_morph, axiom, assm_intro, of_class) => - Locale.add_registration (class, base_morph $> eq_morph) NONE export_morph - (*FIXME should not modify base_morph, although admissible*) + Locale.add_registration (class, base_morph $> eq_morph (*FIXME duplication*)) (SOME (eq_morph, true)) export_morph #> register class sups params base_sort base_morph export_morph axiom assm_intro of_class)) |> Theory_Target.init (SOME class) |> pair class @@ -338,7 +340,7 @@ val subclass = gen_subclass (K I) user_proof; fun prove_subclass tac = gen_subclass (K I) (tactic_proof tac); -val subclass_cmd = gen_subclass (ProofContext.read_class o ProofContext.init) user_proof; +val subclass_cmd = gen_subclass (ProofContext.read_class o ProofContext.init_global) user_proof; end; (*local*) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/class_target.ML --- a/src/Pure/Isar/class_target.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/class_target.ML Tue May 04 20:30:22 2010 +0200 @@ -157,13 +157,13 @@ fun print_classes thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val algebra = Sign.classes_of thy; val arities = Symtab.empty |> Symtab.fold (fn (tyco, arities) => fold (fn (class, _) => Symtab.map_default (class, []) (insert (op =) tyco)) arities) - ((#arities o Sorts.rep_algebra) algebra); + (Sorts.arities_of algebra); val the_arities = these o Symtab.lookup arities; fun mk_arity class tyco = let @@ -209,6 +209,9 @@ (eq_morph, true) (export_morphism thy cls) thy; in fold amend (heritage thy [class]) thy end; +(*fun activate_defs class thms thy = Locale.amend_registration (class, base_morphism thy class) + (Element.eq_morphism thy thms, true) (export_morphism thy class) thy;*) + fun register_operation class (c, (t, some_def)) thy = let val base_sort = base_sort thy class; @@ -369,8 +372,8 @@ ProofContext.theory ((fold o fold) AxClass.add_classrel results); in thy - |> ProofContext.init - |> Proof.theorem_i NONE after_qed [[(mk_prop thy classrel, [])]] + |> ProofContext.init_global + |> Proof.theorem NONE after_qed [[(mk_prop thy classrel, [])]] end; in @@ -418,7 +421,7 @@ fun read_multi_arity thy (raw_tycos, raw_sorts, raw_sort) = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val all_arities = map (fn raw_tyco => ProofContext.read_arity ctxt (raw_tyco, raw_sorts, raw_sort)) raw_tycos; val tycos = map #1 all_arities; @@ -511,7 +514,7 @@ in thy |> Theory.checkpoint - |> ProofContext.init + |> ProofContext.init_global |> Instantiation.put (mk_instantiation ((tycos, vs, sort), params)) |> fold (Variable.declare_typ o TFree) vs |> fold (Variable.declare_names o Free o snd) params @@ -539,7 +542,7 @@ end; val instantiation_instance = gen_instantiation_instance (fn after_qed => fn ts => - Proof.theorem_i NONE (after_qed o map the_single) (map (fn t => [(t, [])]) ts)); + Proof.theorem NONE (after_qed o map the_single) (map (fn t => [(t, [])]) ts)); fun prove_instantiation_instance tac = gen_instantiation_instance (fn after_qed => fn ts => fn lthy => after_qed (map (fn t => Goal.prove lthy [] [] t @@ -551,7 +554,7 @@ fun prove_instantiation_exit_result f tac x lthy = let val morph = ProofContext.export_morphism lthy - (ProofContext.init (ProofContext.theory_of lthy)); + (ProofContext.init_global (ProofContext.theory_of lthy)); val y = f morph x; in lthy @@ -594,8 +597,8 @@ ((fold o fold) AxClass.add_arity results); in thy - |> ProofContext.init - |> Proof.theorem_i NONE after_qed (map (fn t => [(t, [])]) arities) + |> ProofContext.init_global + |> Proof.theorem NONE after_qed (map (fn t => [(t, [])]) arities) end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/code.ML --- a/src/Pure/Isar/code.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/code.ML Tue May 04 20:30:22 2010 +0200 @@ -337,7 +337,7 @@ fun cert_signature thy = Logic.varifyT_global o Type.cert_typ (build_tsig thy) o Type.no_tvars; fun read_signature thy = cert_signature thy o Type.strip_sorts - o Syntax.parse_typ (ProofContext.init thy); + o Syntax.parse_typ (ProofContext.init_global thy); fun expand_signature thy = Type.cert_typ_mode Type.mode_syntax (Sign.tsig_of thy); @@ -554,7 +554,7 @@ fun assert_eqn thy = error_thm (gen_assert_eqn thy true); -fun meta_rewrite thy = Local_Defs.meta_rewrite_rule (ProofContext.init thy); +fun meta_rewrite thy = Local_Defs.meta_rewrite_rule (ProofContext.init_global thy); fun mk_eqn thy = error_thm (gen_assert_eqn thy false) o apfst (meta_rewrite thy); @@ -778,7 +778,7 @@ val _ = if c = const_abs_eqn thy abs_thm then () else error ("Wrong head of abstract code equation,\nexpected constant " ^ string_of_const thy c ^ "\n" ^ Display.string_of_thm_global thy abs_thm); - in Abstract (Thm.freezeT abs_thm, tyco) end; + in Abstract (Thm.legacy_freezeT abs_thm, tyco) end; fun constrain_cert thy sorts (Equations (cert_thm, propers)) = let @@ -941,7 +941,7 @@ fun print_codesetup thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val exec = the_exec thy; fun pretty_equations const thms = (Pretty.block o Pretty.fbreaks) ( diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/constdefs.ML --- a/src/Pure/Isar/constdefs.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/constdefs.ML Tue May 04 20:30:22 2010 +0200 @@ -26,7 +26,7 @@ fun err msg ts = error (cat_lines (msg :: map (Syntax.string_of_term_global thy) ts)); - val thy_ctxt = ProofContext.init thy; + val thy_ctxt = ProofContext.init_global thy; val struct_ctxt = #2 (ProofContext.add_fixes structs thy_ctxt); val ((d, mx), var_ctxt) = (case raw_decl of @@ -62,7 +62,7 @@ fun gen_constdefs prep_vars prep_prop prep_att (raw_structs, specs) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val (structs, _) = prep_vars (map (fn (x, T) => (x, T, Structure)) raw_structs) ctxt; val (decls, thy') = fold_map (gen_constdef prep_vars prep_prop prep_att structs) specs thy; in Pretty.writeln (Proof_Display.pretty_consts ctxt (K true) decls); thy' end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/element.ML --- a/src/Pure/Isar/element.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/element.ML Tue May 04 20:30:22 2010 +0200 @@ -283,10 +283,10 @@ in fun witness_proof after_qed wit_propss = - gen_witness_proof (Proof.theorem_i NONE) (fn wits => fn _ => after_qed wits) + gen_witness_proof (Proof.theorem NONE) (fn wits => fn _ => after_qed wits) wit_propss []; -val witness_proof_eqs = gen_witness_proof (Proof.theorem_i NONE); +val witness_proof_eqs = gen_witness_proof (Proof.theorem NONE); fun witness_local_proof after_qed cmd wit_propss goal_ctxt int = gen_witness_proof (fn after_qed' => fn propss => diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/expression.ML --- a/src/Pure/Isar/expression.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/expression.ML Tue May 04 20:30:22 2010 +0200 @@ -642,7 +642,7 @@ |> Sign.declare_const ((Binding.conceal binding, predT), NoSyn) |> snd |> PureThy.add_defs false [((Binding.conceal (Thm.def_binding binding), Logic.mk_equals (head, body)), [])]; - val defs_ctxt = ProofContext.init defs_thy |> Variable.declare_term head; + val defs_ctxt = ProofContext.init_global defs_thy |> Variable.declare_term head; val cert = Thm.cterm_of defs_thy; @@ -729,7 +729,7 @@ error ("Duplicate definition of locale " ^ quote name); val ((fixed, deps, body_elems), (parms, ctxt')) = - prep_decl raw_import I raw_body (ProofContext.init thy); + prep_decl raw_import I raw_body (ProofContext.init_global thy); val text as (((_, exts'), _), defs) = eval ctxt' deps body_elems; val predicate_binding = @@ -795,7 +795,7 @@ fun gen_interpretation prep_expr parse_prop prep_attr expression equations theory = let - val ((propss, deps, export), expr_ctxt) = ProofContext.init theory + val ((propss, deps, export), expr_ctxt) = ProofContext.init_global theory |> prep_expr expression; val eqns = map (parse_prop expr_ctxt o snd) equations |> Syntax.check_terms expr_ctxt; @@ -809,7 +809,8 @@ val eqn_attrss = map2 (fn attrs => fn eqn => ((apsnd o map) (Attrib.attribute_i thy) attrs, [([eqn], [])])) attrss eqns; fun meta_rewrite thy = - map (Local_Defs.meta_rewrite_rule (ProofContext.init thy) #> Drule.abs_def) o maps snd; + map (Local_Defs.meta_rewrite_rule (ProofContext.init_global thy) #> Drule.abs_def) o + maps snd; in thy |> PureThy.note_thmss Thm.lemmaK eqn_attrss diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/isar_cmd.ML --- a/src/Pure/Isar/isar_cmd.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/isar_cmd.ML Tue May 04 20:30:22 2010 +0200 @@ -219,10 +219,10 @@ fun goal opt_chain goal stmt int = opt_chain #> goal NONE (K I) stmt int; -val have = goal I Proof.have; -val hence = goal Proof.chain Proof.have; -val show = goal I Proof.show; -val thus = goal Proof.chain Proof.show; +val have = goal I Proof.have_cmd; +val hence = goal Proof.chain Proof.have_cmd; +val show = goal I Proof.show_cmd; +val thus = goal Proof.chain Proof.show_cmd; (* local endings *) @@ -393,7 +393,7 @@ let val thy = Toplevel.theory_of state; val {classes = (space, algebra), ...} = Type.rep_tsig (Sign.tsig_of thy); - val {classes, ...} = Sorts.rep_algebra algebra; + val classes = Sorts.classes_of algebra; fun entry (c, (i, (_, cs))) = (i, {name = Name_Space.extern space c, ID = c, parents = cs, dir = "", unfold = true, path = ""}); @@ -403,7 +403,7 @@ in Present.display_graph gr end); fun thm_deps args = Toplevel.unknown_theory o Toplevel.keep (fn state => - Thm_Deps.thm_deps (Proof.get_thmss (Toplevel.enter_proof_body state) args)); + Thm_Deps.thm_deps (Proof.get_thmss_cmd (Toplevel.enter_proof_body state) args)); (* find unused theorems *) @@ -437,12 +437,12 @@ local fun string_of_stmts state args = - Proof.get_thmss state args + Proof.get_thmss_cmd state args |> map (Element.pretty_statement (Proof.context_of state) Thm.theoremK) |> Pretty.chunks2 |> Pretty.string_of; fun string_of_thms state args = - Pretty.string_of (Display.pretty_thms (Proof.context_of state) (Proof.get_thmss state args)); + Pretty.string_of (Display.pretty_thms (Proof.context_of state) (Proof.get_thmss_cmd state args)); fun string_of_prfs full state arg = Pretty.string_of @@ -460,7 +460,7 @@ end | SOME args => Pretty.chunks (map (Proof_Syntax.pretty_proof_of (Proof.context_of state) full) - (Proof.get_thmss state args))); + (Proof.get_thmss_cmd state args))); fun string_of_prop state s = let diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/isar_syn.ML --- a/src/Pure/Isar/isar_syn.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/isar_syn.ML Tue May 04 20:30:22 2010 +0200 @@ -96,8 +96,9 @@ >> (Toplevel.theory o AxClass.axiomatize_classrel_cmd)); val _ = - OuterSyntax.command "defaultsort" "declare default sort" K.thy_decl - (P.sort >> (Toplevel.theory o Sign.add_defsort)); + OuterSyntax.local_theory "default_sort" "declare default sort for explicit type variables" + K.thy_decl + (P.sort >> (fn s => fn lthy => Local_Theory.set_defsort (Syntax.read_sort lthy s) lthy)); (* types *) @@ -224,22 +225,22 @@ >> (fn (mode, args) => Specification.abbreviation_cmd mode args)); val _ = - OuterSyntax.local_theory "type_notation" "add notation for type constructors" K.thy_decl + OuterSyntax.local_theory "type_notation" "add concrete syntax for type constructors" K.thy_decl (opt_mode -- P.and_list1 (P.xname -- P.mixfix) >> (fn (mode, args) => Specification.type_notation_cmd true mode args)); val _ = - OuterSyntax.local_theory "no_type_notation" "delete notation for type constructors" K.thy_decl + OuterSyntax.local_theory "no_type_notation" "delete concrete syntax for type constructors" K.thy_decl (opt_mode -- P.and_list1 (P.xname -- P.mixfix) >> (fn (mode, args) => Specification.type_notation_cmd false mode args)); val _ = - OuterSyntax.local_theory "notation" "add notation for constants / fixed variables" K.thy_decl + OuterSyntax.local_theory "notation" "add concrete syntax for constants / fixed variables" K.thy_decl (opt_mode -- P.and_list1 (P.xname -- SpecParse.locale_mixfix) >> (fn (mode, args) => Specification.notation_cmd true mode args)); val _ = - OuterSyntax.local_theory "no_notation" "delete notation for constants / fixed variables" K.thy_decl + OuterSyntax.local_theory "no_notation" "delete concrete syntax for constants / fixed variables" K.thy_decl (opt_mode -- P.and_list1 (P.xname -- SpecParse.locale_mixfix) >> (fn (mode, args) => Specification.notation_cmd false mode args)); @@ -510,6 +511,13 @@ val _ = gen_theorem true Thm.corollaryK; val _ = + OuterSyntax.local_theory_to_proof "example_proof" + "example proof body, without any result" K.thy_schematic_goal + (Scan.succeed + (Specification.schematic_theorem_cmd "" NONE (K I) + Attrib.empty_binding [] (Element.Shows []) false #> Proof.enter_forward)); + +val _ = OuterSyntax.command "have" "state local goal" (K.tag_proof K.prf_goal) (SpecParse.statement >> ((Toplevel.print oo Toplevel.proof') o IsarCmd.have)); @@ -542,27 +550,27 @@ val _ = OuterSyntax.command "from" "forward chaining from given facts" (K.tag_proof K.prf_chain) - (facts >> (Toplevel.print oo (Toplevel.proof o Proof.from_thmss))); + (facts >> (Toplevel.print oo (Toplevel.proof o Proof.from_thmss_cmd))); val _ = OuterSyntax.command "with" "forward chaining from given and current facts" (K.tag_proof K.prf_chain) - (facts >> (Toplevel.print oo (Toplevel.proof o Proof.with_thmss))); + (facts >> (Toplevel.print oo (Toplevel.proof o Proof.with_thmss_cmd))); val _ = OuterSyntax.command "note" "define facts" (K.tag_proof K.prf_decl) - (SpecParse.name_facts >> (Toplevel.print oo (Toplevel.proof o Proof.note_thmss))); + (SpecParse.name_facts >> (Toplevel.print oo (Toplevel.proof o Proof.note_thmss_cmd))); val _ = OuterSyntax.command "using" "augment goal facts" (K.tag_proof K.prf_decl) - (facts >> (Toplevel.print oo (Toplevel.proof o Proof.using))); + (facts >> (Toplevel.print oo (Toplevel.proof o Proof.using_cmd))); val _ = OuterSyntax.command "unfolding" "unfold definitions in goal and facts" (K.tag_proof K.prf_decl) - (facts >> (Toplevel.print oo (Toplevel.proof o Proof.unfolding))); + (facts >> (Toplevel.print oo (Toplevel.proof o Proof.unfolding_cmd))); (* proof context *) @@ -570,17 +578,17 @@ val _ = OuterSyntax.command "fix" "fix local variables (Skolem constants)" (K.tag_proof K.prf_asm) - (P.fixes >> (Toplevel.print oo (Toplevel.proof o Proof.fix))); + (P.fixes >> (Toplevel.print oo (Toplevel.proof o Proof.fix_cmd))); val _ = OuterSyntax.command "assume" "assume propositions" (K.tag_proof K.prf_asm) - (SpecParse.statement >> (Toplevel.print oo (Toplevel.proof o Proof.assume))); + (SpecParse.statement >> (Toplevel.print oo (Toplevel.proof o Proof.assume_cmd))); val _ = OuterSyntax.command "presume" "assume propositions, to be established later" (K.tag_proof K.prf_asm) - (SpecParse.statement >> (Toplevel.print oo (Toplevel.proof o Proof.presume))); + (SpecParse.statement >> (Toplevel.print oo (Toplevel.proof o Proof.presume_cmd))); val _ = OuterSyntax.command "def" "local definition" @@ -588,24 +596,30 @@ (P.and_list1 (SpecParse.opt_thm_name ":" -- ((P.binding -- P.opt_mixfix) -- ((P.$$$ "\\" || P.$$$ "==") |-- P.!!! P.termp))) - >> (Toplevel.print oo (Toplevel.proof o Proof.def))); + >> (Toplevel.print oo (Toplevel.proof o Proof.def_cmd))); val _ = OuterSyntax.command "obtain" "generalized existence" (K.tag_proof K.prf_asm_goal) (P.parname -- Scan.optional (P.fixes --| P.where_) [] -- SpecParse.statement - >> (fn ((x, y), z) => Toplevel.print o Toplevel.proof' (Obtain.obtain x y z))); + >> (fn ((x, y), z) => Toplevel.print o Toplevel.proof' (Obtain.obtain_cmd x y z))); val _ = OuterSyntax.command "guess" "wild guessing (unstructured)" (K.tag_proof K.prf_asm_goal) - (Scan.optional P.fixes [] >> (Toplevel.print oo (Toplevel.proof' o Obtain.guess))); + (Scan.optional P.fixes [] >> (Toplevel.print oo (Toplevel.proof' o Obtain.guess_cmd))); val _ = OuterSyntax.command "let" "bind text variables" (K.tag_proof K.prf_decl) (P.and_list1 (P.and_list1 P.term -- (P.$$$ "=" |-- P.term)) - >> (Toplevel.print oo (Toplevel.proof o Proof.let_bind))); + >> (Toplevel.print oo (Toplevel.proof o Proof.let_bind_cmd))); + +val _ = + OuterSyntax.command "write" "add concrete syntax for constants / fixed variables" + (K.tag_proof K.prf_decl) + (opt_mode -- P.and_list1 (P.xname -- SpecParse.locale_mixfix) + >> (fn (mode, args) => Toplevel.print o Toplevel.proof (Proof.write_cmd mode args))); val case_spec = (P.$$$ "(" |-- P.!!! (P.xname -- Scan.repeat1 (P.maybe P.name) --| P.$$$ ")") || @@ -614,7 +628,7 @@ val _ = OuterSyntax.command "case" "invoke local context" (K.tag_proof K.prf_asm) - (case_spec >> (Toplevel.print oo (Toplevel.proof o Proof.invoke_case))); + (case_spec >> (Toplevel.print oo (Toplevel.proof o Proof.invoke_case_cmd))); (* proof structure *) @@ -711,12 +725,12 @@ val _ = OuterSyntax.command "also" "combine calculation and current facts" (K.tag_proof K.prf_decl) - (calc_args >> (Toplevel.proofs' o Calculation.also)); + (calc_args >> (Toplevel.proofs' o Calculation.also_cmd)); val _ = OuterSyntax.command "finally" "combine calculation and current facts, exhibit result" (K.tag_proof K.prf_chain) - (calc_args >> (Toplevel.proofs' o Calculation.finally)); + (calc_args >> (Toplevel.proofs' o Calculation.finally_cmd)); val _ = OuterSyntax.command "moreover" "augment calculation by current facts" diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/local_theory.ML --- a/src/Pure/Isar/local_theory.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/local_theory.ML Tue May 04 20:30:22 2010 +0200 @@ -40,6 +40,7 @@ local_theory -> (string * thm list) list * local_theory val declaration: bool -> declaration -> local_theory -> local_theory val syntax_declaration: bool -> declaration -> local_theory -> local_theory + val set_defsort: sort -> local_theory -> local_theory val type_notation: bool -> Syntax.mode -> (typ * mixfix) list -> local_theory -> local_theory val notation: bool -> Syntax.mode -> (term * mixfix) list -> local_theory -> local_theory val class_alias: binding -> class -> local_theory -> local_theory @@ -180,10 +181,11 @@ Morphism.binding_morphism (Name_Space.transform_binding (naming_of lthy)); fun target_morphism lthy = standard_morphism lthy (target_of lthy); -fun global_morphism lthy = standard_morphism lthy (ProofContext.init (ProofContext.theory_of lthy)); +fun global_morphism lthy = + standard_morphism lthy (ProofContext.init_global (ProofContext.theory_of lthy)); -(* basic operations *) +(* primitive operations *) fun operation f lthy = f (#operations (get_lthy lthy)) lthy; fun operation1 f x = operation (fn ops => f ops x); @@ -196,9 +198,16 @@ val declaration = checkpoint ooo operation2 #declaration; val syntax_declaration = checkpoint ooo operation2 #syntax_declaration; + + +(** basic derived operations **) + val notes = notes_kind ""; fun note (a, ths) = notes [(a, [(ths, [])])] #>> the_single; +fun set_defsort S = + syntax_declaration false (K (Context.mapping (Sign.set_defsort S) (ProofContext.set_defsort S))); + (* notation *) @@ -224,6 +233,9 @@ val const_alias = alias Sign.const_alias ProofContext.const_alias; + +(** init and exit **) + (* init *) fun init group theory_prefix operations target = @@ -259,7 +271,7 @@ fun exit_result_global f (x, lthy) = let val thy = exit_global lthy; - val thy_ctxt = ProofContext.init thy; + val thy_ctxt = ProofContext.init_global thy; val phi = standard_morphism lthy thy_ctxt; in (f phi x, thy) end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/locale.ML --- a/src/Pure/Isar/locale.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/locale.ML Tue May 04 20:30:22 2010 +0200 @@ -309,7 +309,7 @@ fun init name thy = activate_all name thy Element.init (Element.transfer_morphism o Context.theory_of) - ([], Context.Proof (ProofContext.init thy)) |-> put_idents |> Context.proof_of; + ([], Context.Proof (ProofContext.init_global thy)) |-> put_idents |> Context.proof_of; fun print_locale thy show_facts raw_name = let @@ -412,7 +412,7 @@ fun pretty_reg thy (name, morph) = let val name' = extern thy name; - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun prt_qual (qual, mand) = Pretty.str (qual ^ (if mand then "!" else "?")); fun prt_quals qs = Pretty.separate "." (map prt_qual qs) |> Pretty.block; val prt_term = Pretty.quote o Syntax.pretty_term ctxt; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/object_logic.ML --- a/src/Pure/Isar/object_logic.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/object_logic.ML Tue May 04 20:30:22 2010 +0200 @@ -186,7 +186,7 @@ fun atomize_prems ct = if Logic.has_meta_prems (Thm.term_of ct) then Conv.params_conv ~1 (K (Conv.prems_conv ~1 atomize)) - (ProofContext.init (Thm.theory_of_cterm ct)) ct + (ProofContext.init_global (Thm.theory_of_cterm ct)) ct else Conv.all_conv ct; val atomize_prems_tac = CONVERSION atomize_prems; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/obtain.ML --- a/src/Pure/Isar/obtain.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/obtain.ML Tue May 04 20:30:22 2010 +0200 @@ -39,14 +39,14 @@ signature OBTAIN = sig val thatN: string - val obtain: string -> (binding * string option * mixfix) list -> + val obtain: string -> (binding * typ option * mixfix) list -> + (Thm.binding * (term * term list) list) list -> bool -> Proof.state -> Proof.state + val obtain_cmd: string -> (binding * string option * mixfix) list -> (Attrib.binding * (string * string list) list) list -> bool -> Proof.state -> Proof.state - val obtain_i: string -> (binding * typ option * mixfix) list -> - (Thm.binding * (term * term list) list) list -> bool -> Proof.state -> Proof.state val result: (Proof.context -> tactic) -> thm list -> Proof.context -> ((string * cterm) list * thm list) * Proof.context - val guess: (binding * string option * mixfix) list -> bool -> Proof.state -> Proof.state - val guess_i: (binding * typ option * mixfix) list -> bool -> Proof.state -> Proof.state + val guess: (binding * typ option * mixfix) list -> bool -> Proof.state -> Proof.state + val guess_cmd: (binding * string option * mixfix) list -> bool -> Proof.state -> Proof.state end; structure Obtain: OBTAIN = @@ -148,26 +148,26 @@ fun after_qed _ = Proof.local_qed (NONE, false) #> `Proof.the_fact #-> (fn rule => - Proof.fix_i vars - #> Proof.assm_i (obtain_export fix_ctxt rule (map (cert o Free) parms)) asms); + Proof.fix vars + #> Proof.assm (obtain_export fix_ctxt rule (map (cert o Free) parms)) asms); in state |> Proof.enter_forward - |> Proof.have_i NONE (K I) [(Thm.empty_binding, [(obtain_prop, [])])] int + |> Proof.have NONE (K I) [(Thm.empty_binding, [(obtain_prop, [])])] int |> Proof.proof (SOME Method.succeed_text) |> Seq.hd - |> Proof.fix_i [(Binding.name thesisN, NONE, NoSyn)] - |> Proof.assume_i + |> Proof.fix [(Binding.name thesisN, NONE, NoSyn)] + |> Proof.assume [((Binding.name that_name, [Context_Rules.intro_query NONE]), [(that_prop, [])])] |> `Proof.the_facts ||> Proof.chain_facts chain_facts - ||> Proof.show_i NONE after_qed [(Thm.empty_binding, [(thesis, [])])] false + ||> Proof.show NONE after_qed [(Thm.empty_binding, [(thesis, [])])] false |-> Proof.refine_insert end; in -val obtain = gen_obtain Attrib.attribute ProofContext.read_vars ProofContext.read_propp; -val obtain_i = gen_obtain (K I) ProofContext.cert_vars ProofContext.cert_propp; +val obtain = gen_obtain (K I) ProofContext.cert_vars ProofContext.cert_propp; +val obtain_cmd = gen_obtain Attrib.attribute ProofContext.read_vars ProofContext.read_propp; end; @@ -290,8 +290,8 @@ in state' |> Proof.map_context (K ctxt') - |> Proof.fix_i (map (fn ((x, T), mx) => (Binding.name x, SOME T, mx)) parms) - |> `Proof.context_of |-> (fn fix_ctxt => Proof.assm_i + |> Proof.fix (map (fn ((x, T), mx) => (Binding.name x, SOME T, mx)) parms) + |> `Proof.context_of |-> (fn fix_ctxt => Proof.assm (obtain_export fix_ctxt rule (map cert ts)) [(Thm.empty_binding, asms)]) |> Proof.bind_terms Auto_Bind.no_facts end; @@ -307,7 +307,7 @@ state |> Proof.enter_forward |> Proof.begin_block - |> Proof.fix_i [(Binding.name Auto_Bind.thesisN, NONE, NoSyn)] + |> Proof.fix [(Binding.name Auto_Bind.thesisN, NONE, NoSyn)] |> Proof.chain_facts chain_facts |> Proof.local_goal print_result (K I) (apsnd (rpair I)) "guess" before_qed after_qed [(Thm.empty_binding, [Logic.mk_term goal, goal])] @@ -316,8 +316,8 @@ in -val guess = gen_guess ProofContext.read_vars; -val guess_i = gen_guess ProofContext.cert_vars; +val guess = gen_guess ProofContext.cert_vars; +val guess_cmd = gen_guess ProofContext.read_vars; end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/overloading.ML --- a/src/Pure/Isar/overloading.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/overloading.ML Tue May 04 20:30:22 2010 +0200 @@ -67,8 +67,8 @@ fun improve_term_check ts ctxt = let - val { primary_constraints, secondary_constraints, improve, subst, - consider_abbrevs, passed, ... } = ImprovableSyntax.get ctxt; + val { secondary_constraints, improve, subst, consider_abbrevs, passed, ... } = + ImprovableSyntax.get ctxt; val tsig = (Sign.tsig_of o ProofContext.theory_of) ctxt; val is_abbrev = consider_abbrevs andalso ProofContext.abbrev_mode ctxt; val passed_or_abbrev = passed orelse is_abbrev; @@ -156,7 +156,7 @@ in thy |> Theory.checkpoint - |> ProofContext.init + |> ProofContext.init_global |> OverloadingData.put overloading |> fold (fn ((_, ty), (v, _)) => Variable.declare_names (Free (v, ty))) overloading |> add_improvable_syntax diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/proof.ML --- a/src/Pure/Isar/proof.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/proof.ML Tue May 04 20:30:22 2010 +0200 @@ -41,37 +41,37 @@ val raw_goal: state -> {context: context, facts: thm list, goal: thm} val goal: state -> {context: context, facts: thm list, goal: thm} val simple_goal: state -> {context: context, goal: thm} - val match_bind: (string list * string) list -> state -> state - val match_bind_i: (term list * term) list -> state -> state - val let_bind: (string list * string) list -> state -> state - val let_bind_i: (term list * term) list -> state -> state - val fix: (binding * string option * mixfix) list -> state -> state - val fix_i: (binding * typ option * mixfix) list -> state -> state + val let_bind: (term list * term) list -> state -> state + val let_bind_cmd: (string list * string) list -> state -> state + val write: Syntax.mode -> (term * mixfix) list -> state -> state + val write_cmd: Syntax.mode -> (string * mixfix) list -> state -> state + val fix: (binding * typ option * mixfix) list -> state -> state + val fix_cmd: (binding * string option * mixfix) list -> state -> state val assm: Assumption.export -> + (Thm.binding * (term * term list) list) list -> state -> state + val assm_cmd: Assumption.export -> (Attrib.binding * (string * string list) list) list -> state -> state - val assm_i: Assumption.export -> - (Thm.binding * (term * term list) list) list -> state -> state - val assume: (Attrib.binding * (string * string list) list) list -> state -> state - val assume_i: (Thm.binding * (term * term list) list) list -> state -> state - val presume: (Attrib.binding * (string * string list) list) list -> state -> state - val presume_i: (Thm.binding * (term * term list) list) list -> state -> state - val def: (Attrib.binding * ((binding * mixfix) * (string * string list))) list -> state -> state - val def_i: (Thm.binding * ((binding * mixfix) * (term * term list))) list -> state -> state + val assume: (Thm.binding * (term * term list) list) list -> state -> state + val assume_cmd: (Attrib.binding * (string * string list) list) list -> state -> state + val presume: (Thm.binding * (term * term list) list) list -> state -> state + val presume_cmd: (Attrib.binding * (string * string list) list) list -> state -> state + val def: (Thm.binding * ((binding * mixfix) * (term * term list))) list -> state -> state + val def_cmd: (Attrib.binding * ((binding * mixfix) * (string * string list))) list -> state -> state val chain: state -> state val chain_facts: thm list -> state -> state - val get_thmss: state -> (Facts.ref * Attrib.src list) list -> thm list - val note_thmss: (Attrib.binding * (Facts.ref * Attrib.src list) list) list -> state -> state - val note_thmss_i: (Thm.binding * (thm list * attribute list) list) list -> state -> state - val from_thmss: ((Facts.ref * Attrib.src list) list) list -> state -> state - val from_thmss_i: ((thm list * attribute list) list) list -> state -> state - val with_thmss: ((Facts.ref * Attrib.src list) list) list -> state -> state - val with_thmss_i: ((thm list * attribute list) list) list -> state -> state - val using: ((Facts.ref * Attrib.src list) list) list -> state -> state - val using_i: ((thm list * attribute list) list) list -> state -> state - val unfolding: ((Facts.ref * Attrib.src list) list) list -> state -> state - val unfolding_i: ((thm list * attribute list) list) list -> state -> state - val invoke_case: string * string option list * Attrib.src list -> state -> state - val invoke_case_i: string * string option list * attribute list -> state -> state + val get_thmss_cmd: state -> (Facts.ref * Attrib.src list) list -> thm list + val note_thmss: (Thm.binding * (thm list * attribute list) list) list -> state -> state + val note_thmss_cmd: (Attrib.binding * (Facts.ref * Attrib.src list) list) list -> state -> state + val from_thmss: ((thm list * attribute list) list) list -> state -> state + val from_thmss_cmd: ((Facts.ref * Attrib.src list) list) list -> state -> state + val with_thmss: ((thm list * attribute list) list) list -> state -> state + val with_thmss_cmd: ((Facts.ref * Attrib.src list) list) list -> state -> state + val using: ((thm list * attribute list) list) list -> state -> state + val using_cmd: ((Facts.ref * Attrib.src list) list) list -> state -> state + val unfolding: ((thm list * attribute list) list) list -> state -> state + val unfolding_cmd: ((Facts.ref * Attrib.src list) list) list -> state -> state + val invoke_case: string * string option list * attribute list -> state -> state + val invoke_case_cmd: string * string option list * Attrib.src list -> state -> state val begin_block: state -> state val next_block: state -> state val end_block: state -> state @@ -87,9 +87,9 @@ ((binding * 'a list) * 'b) list -> state -> state val local_qed: Method.text option * bool -> state -> state val theorem: Method.text option -> (thm list list -> context -> context) -> + (term * term list) list list -> context -> state + val theorem_cmd: Method.text option -> (thm list list -> context -> context) -> (string * string list) list list -> context -> state - val theorem_i: Method.text option -> (thm list list -> context -> context) -> - (term * term list) list list -> context -> state val global_qed: Method.text option * bool -> state -> context val local_terminal_proof: Method.text * Method.text option -> state -> state val local_default_proof: state -> state @@ -102,13 +102,13 @@ val global_skip_proof: bool -> state -> context val global_done_proof: state -> context val have: Method.text option -> (thm list list -> state -> state) -> + (Thm.binding * (term * term list) list) list -> bool -> state -> state + val have_cmd: Method.text option -> (thm list list -> state -> state) -> (Attrib.binding * (string * string list) list) list -> bool -> state -> state - val have_i: Method.text option -> (thm list list -> state -> state) -> - (Thm.binding * (term * term list) list) list -> bool -> state -> state val show: Method.text option -> (thm list list -> state -> state) -> + (Thm.binding * (term * term list) list) list -> bool -> state -> state + val show_cmd: Method.text option -> (thm list list -> state -> state) -> (Attrib.binding * (string * string list) list) list -> bool -> state -> state - val show_i: Method.text option -> (thm list list -> state -> state) -> - (Thm.binding * (term * term list) list) list -> bool -> state -> state val schematic_goal: state -> bool val is_relevant: state -> bool val local_future_proof: (state -> ('a * state) Future.future) -> @@ -523,22 +523,40 @@ (** context elements **) -(* bindings *) +(* let bindings *) local fun gen_bind bind args state = state |> assert_forward - |> map_context (bind args #> snd) + |> map_context (bind true args #> snd) |> put_facts NONE; in -val match_bind = gen_bind (ProofContext.match_bind false); -val match_bind_i = gen_bind (ProofContext.match_bind_i false); -val let_bind = gen_bind (ProofContext.match_bind true); -val let_bind_i = gen_bind (ProofContext.match_bind_i true); +val let_bind = gen_bind ProofContext.match_bind_i; +val let_bind_cmd = gen_bind ProofContext.match_bind; + +end; + + +(* concrete syntax *) + +local + +fun gen_write prep_arg mode args = + assert_forward + #> map_context (fn ctxt => ctxt |> ProofContext.notation true mode (map (prep_arg ctxt) args)) + #> put_facts NONE; + +in + +val write = gen_write (K I); + +val write_cmd = + gen_write (fn ctxt => fn (c, mx) => + (ProofContext.read_const ctxt false (Syntax.mixfixT mx) c, mx)); end; @@ -554,8 +572,8 @@ in -val fix = gen_fix (fn ctxt => fn args => fst (ProofContext.read_vars args ctxt)); -val fix_i = gen_fix (K I); +val fix = gen_fix (K I); +val fix_cmd = gen_fix (fn ctxt => fn args => fst (ProofContext.read_vars args ctxt)); end; @@ -572,12 +590,12 @@ in -val assm = gen_assume ProofContext.add_assms Attrib.attribute; -val assm_i = gen_assume ProofContext.add_assms_i (K I); -val assume = assm Assumption.assume_export; -val assume_i = assm_i Assumption.assume_export; -val presume = assm Assumption.presume_export; -val presume_i = assm_i Assumption.presume_export; +val assm = gen_assume ProofContext.add_assms_i (K I); +val assm_cmd = gen_assume ProofContext.add_assms Attrib.attribute; +val assume = assm Assumption.assume_export; +val assume_cmd = assm_cmd Assumption.assume_export; +val presume = assm Assumption.presume_export; +val presume_cmd = assm_cmd Assumption.presume_export; end; @@ -605,8 +623,8 @@ in -val def = gen_def Attrib.attribute ProofContext.read_vars ProofContext.match_bind; -val def_i = gen_def (K I) ProofContext.cert_vars ProofContext.match_bind_i; +val def = gen_def (K I) ProofContext.cert_vars ProofContext.match_bind_i; +val def_cmd = gen_def Attrib.attribute ProofContext.read_vars ProofContext.match_bind; end; @@ -646,18 +664,18 @@ in -val note_thmss = gen_thmss (K []) I #2 Attrib.attribute ProofContext.get_fact; -val note_thmss_i = gen_thmss (K []) I #2 (K I) (K I); +val note_thmss = gen_thmss (K []) I #2 (K I) (K I); +val note_thmss_cmd = gen_thmss (K []) I #2 Attrib.attribute ProofContext.get_fact; -val from_thmss = gen_thmss (K []) chain #2 Attrib.attribute ProofContext.get_fact o no_binding; -val from_thmss_i = gen_thmss (K []) chain #2 (K I) (K I) o no_binding; +val from_thmss = gen_thmss (K []) chain #2 (K I) (K I) o no_binding; +val from_thmss_cmd = gen_thmss (K []) chain #2 Attrib.attribute ProofContext.get_fact o no_binding; -val with_thmss = gen_thmss the_facts chain #2 Attrib.attribute ProofContext.get_fact o no_binding; -val with_thmss_i = gen_thmss the_facts chain #2 (K I) (K I) o no_binding; +val with_thmss = gen_thmss the_facts chain #2 (K I) (K I) o no_binding; +val with_thmss_cmd = gen_thmss the_facts chain #2 Attrib.attribute ProofContext.get_fact o no_binding; val local_results = gen_thmss (K []) I I (K I) (K I) o map (apsnd Thm.simple_fact); -fun get_thmss state srcs = the_facts (note_thmss [((Binding.empty, []), srcs)] state); +fun get_thmss_cmd state srcs = the_facts (note_thmss_cmd [((Binding.empty, []), srcs)] state); end; @@ -686,10 +704,10 @@ in -val using = gen_using append_using (K (K I)) Attrib.attribute ProofContext.get_fact; -val using_i = gen_using append_using (K (K I)) (K I) (K I); -val unfolding = gen_using unfold_using unfold_goals Attrib.attribute ProofContext.get_fact; -val unfolding_i = gen_using unfold_using unfold_goals (K I) (K I); +val using = gen_using append_using (K (K I)) (K I) (K I); +val using_cmd = gen_using append_using (K (K I)) Attrib.attribute ProofContext.get_fact; +val unfolding = gen_using unfold_using unfold_goals (K I) (K I); +val unfolding_cmd = gen_using unfold_using unfold_goals Attrib.attribute ProofContext.get_fact; end; @@ -709,15 +727,15 @@ val assumptions = asms |> map (fn (a, ts) => ((qualified_binding a, atts), map (rpair []) ts)); in state' - |> assume_i assumptions + |> assume assumptions |> bind_terms Auto_Bind.no_facts - |> `the_facts |-> (fn thms => note_thmss_i [((Binding.name name, []), [(thms, [])])]) + |> `the_facts |-> (fn thms => note_thmss [((Binding.name name, []), [(thms, [])])]) end; in -val invoke_case = gen_invoke_case Attrib.attribute; -val invoke_case_i = gen_invoke_case (K I); +val invoke_case = gen_invoke_case (K I); +val invoke_case_cmd = gen_invoke_case Attrib.attribute; end; @@ -790,15 +808,16 @@ local -fun implicit_vars dest add props = +val is_var = + can (dest_TVar o Logic.dest_type o Logic.dest_term) orf + can (dest_Var o Logic.dest_term); + +fun implicit_vars props = let - val (explicit_vars, props') = take_prefix (can dest) props |>> map dest; - val vars = rev (subtract (op =) explicit_vars (fold add props [])); - val _ = - if null vars then () - else warning ("Goal statement contains unbound schematic variable(s): " ^ - commas_quote (map (Term.string_of_vname o fst) vars)); - in (rev vars, props') end; + val (var_props, _) = take_prefix is_var props; + val explicit_vars = fold Term.add_vars var_props []; + val vars = filter_out (member (op =) explicit_vars) (fold Term.add_vars props []); + in map (Logic.mk_term o Var) vars end; fun refine_terms n = refine (Method.Basic (K (RAW_METHOD @@ -823,11 +842,8 @@ |> map_context_result (fn ctxt => swap (prepp (ctxt, raw_propp))); val props = flat propss; - val (_, props') = - implicit_vars (dest_TVar o Logic.dest_type o Logic.dest_term) Term.add_tvars props; - val (vars, _) = implicit_vars (dest_Var o Logic.dest_term) Term.add_vars props'; - - val propss' = map (Logic.mk_term o Var) vars :: propss; + val vars = implicit_vars props; + val propss' = vars :: propss; val goal_propss = filter_out null propss'; val goal = cert (Logic.mk_conjunction_balanced (map Logic.mk_conjunction_balanced goal_propss)) @@ -851,11 +867,10 @@ fun generic_qed after_ctxt state = let - val (goal_ctxt, {statement, goal, after_qed, ...}) = current_goal state; + val (goal_ctxt, {statement = (_, stmt, _), goal, after_qed, ...}) = current_goal state; val outer_state = state |> close_block; val outer_ctxt = context_of outer_state; - val ((_, pos), stmt, _) = statement; val props = flat (tl stmt) |> Variable.exportT_terms goal_ctxt outer_ctxt; @@ -903,8 +918,8 @@ init ctxt |> generic_goal (prepp #> ProofContext.auto_fixes) "" before_qed (K I, after_qed) propp; -val theorem = global_goal ProofContext.bind_propp_schematic; -val theorem_i = global_goal ProofContext.bind_propp_schematic_i; +val theorem = global_goal ProofContext.bind_propp_schematic_i; +val theorem_cmd = global_goal ProofContext.bind_propp_schematic; fun global_qeds txt = end_proof true txt @@ -976,10 +991,10 @@ in -val have = gen_have Attrib.attribute ProofContext.bind_propp; -val have_i = gen_have (K I) ProofContext.bind_propp_i; -val show = gen_show Attrib.attribute ProofContext.bind_propp; -val show_i = gen_show (K I) ProofContext.bind_propp_i; +val have = gen_have (K I) ProofContext.bind_propp_i; +val have_cmd = gen_have Attrib.attribute ProofContext.bind_propp; +val show = gen_show (K I) ProofContext.bind_propp_i; +val show_cmd = gen_show Attrib.attribute ProofContext.bind_propp; end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/proof_context.ML --- a/src/Pure/Isar/proof_context.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/proof_context.ML Tue May 04 20:30:22 2010 +0200 @@ -9,7 +9,7 @@ signature PROOF_CONTEXT = sig val theory_of: Proof.context -> theory - val init: theory -> Proof.context + val init_global: theory -> Proof.context type mode val mode_default: mode val mode_stmt: mode @@ -28,6 +28,7 @@ val full_name: Proof.context -> binding -> string val syn_of: Proof.context -> Syntax.syntax val tsig_of: Proof.context -> Type.tsig + val set_defsort: sort -> Proof.context -> Proof.context val default_sort: Proof.context -> indexname -> sort val consts_of: Proof.context -> Consts.T val the_const_constraint: Proof.context -> string -> typ @@ -54,13 +55,13 @@ val cert_typ_abbrev: Proof.context -> typ -> typ val get_skolem: Proof.context -> string -> string val revert_skolem: Proof.context -> string -> string - val infer_type: Proof.context -> string -> typ + val infer_type: Proof.context -> string * typ -> typ val inferred_param: string -> Proof.context -> typ * Proof.context val inferred_fixes: Proof.context -> (string * typ) list * Proof.context val read_type_name: Proof.context -> bool -> string -> typ val read_type_name_proper: Proof.context -> bool -> string -> typ val read_const_proper: Proof.context -> bool -> string -> term - val read_const: Proof.context -> bool -> string -> term + val read_const: Proof.context -> bool -> typ -> string -> term val allow_dummies: Proof.context -> Proof.context val check_tvar: Proof.context -> indexname * sort -> indexname * sort val check_tfree: Proof.context -> string * sort -> string * sort @@ -178,17 +179,17 @@ datatype ctxt = Ctxt of - {mode: mode, (*inner syntax mode*) - naming: Name_Space.naming, (*local naming conventions*) - syntax: Local_Syntax.T, (*local syntax*) - tsigs: Type.tsig * Type.tsig, (*local/global type signature -- local name space only*) - consts: Consts.T * Consts.T, (*local/global consts -- local name space/abbrevs only*) - facts: Facts.T, (*local facts*) + {mode: mode, (*inner syntax mode*) + naming: Name_Space.naming, (*local naming conventions*) + syntax: Local_Syntax.T, (*local syntax*) + tsig: Type.tsig * Type.tsig, (*local/global type signature -- local name space / defsort only*) + consts: Consts.T * Consts.T, (*local/global consts -- local name space / abbrevs only*) + facts: Facts.T, (*local facts*) cases: (string * (Rule_Cases.T * bool)) list}; (*named case contexts*) -fun make_ctxt (mode, naming, syntax, tsigs, consts, facts, cases) = +fun make_ctxt (mode, naming, syntax, tsig, consts, facts, cases) = Ctxt {mode = mode, naming = naming, syntax = syntax, - tsigs = tsigs, consts = consts, facts = facts, cases = cases}; + tsig = tsig, consts = consts, facts = facts, cases = cases}; val local_naming = Name_Space.default_naming |> Name_Space.add_path "local"; @@ -204,39 +205,39 @@ fun rep_context ctxt = ContextData.get ctxt |> (fn Ctxt args => args); fun map_context f = - ContextData.map (fn Ctxt {mode, naming, syntax, tsigs, consts, facts, cases} => - make_ctxt (f (mode, naming, syntax, tsigs, consts, facts, cases))); + ContextData.map (fn Ctxt {mode, naming, syntax, tsig, consts, facts, cases} => + make_ctxt (f (mode, naming, syntax, tsig, consts, facts, cases))); -fun set_mode mode = map_context (fn (_, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, syntax, tsigs, consts, facts, cases)); +fun set_mode mode = map_context (fn (_, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, syntax, tsig, consts, facts, cases)); fun map_mode f = - map_context (fn (Mode {stmt, pattern, schematic, abbrev}, naming, syntax, tsigs, consts, facts, cases) => - (make_mode (f (stmt, pattern, schematic, abbrev)), naming, syntax, tsigs, consts, facts, cases)); + map_context (fn (Mode {stmt, pattern, schematic, abbrev}, naming, syntax, tsig, consts, facts, cases) => + (make_mode (f (stmt, pattern, schematic, abbrev)), naming, syntax, tsig, consts, facts, cases)); fun map_naming f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, f naming, syntax, tsigs, consts, facts, cases)); + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, f naming, syntax, tsig, consts, facts, cases)); fun map_syntax f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, f syntax, tsigs, consts, facts, cases)); + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, f syntax, tsig, consts, facts, cases)); -fun map_tsigs f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, syntax, f tsigs, consts, facts, cases)); +fun map_tsig f = + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, syntax, f tsig, consts, facts, cases)); fun map_consts f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, syntax, tsigs, f consts, facts, cases)); + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, syntax, tsig, f consts, facts, cases)); fun map_facts f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, syntax, tsigs, consts, f facts, cases)); + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, syntax, tsig, consts, f facts, cases)); fun map_cases f = - map_context (fn (mode, naming, syntax, tsigs, consts, facts, cases) => - (mode, naming, syntax, tsigs, consts, facts, f cases)); + map_context (fn (mode, naming, syntax, tsig, consts, facts, cases) => + (mode, naming, syntax, tsig, consts, facts, f cases)); val get_mode = #mode o rep_context; val restore_mode = set_mode o get_mode; @@ -254,7 +255,8 @@ val set_syntax_mode = map_syntax o Local_Syntax.set_mode; val restore_syntax_mode = map_syntax o Local_Syntax.restore_mode o syntax_of; -val tsig_of = #1 o #tsigs o rep_context; +val tsig_of = #1 o #tsig o rep_context; +val set_defsort = map_tsig o apfst o Type.set_defsort; fun default_sort ctxt = the_default (Type.defaultS (tsig_of ctxt)) o Variable.def_sort ctxt; val consts_of = #1 o #consts o rep_context; @@ -268,10 +270,10 @@ fun transfer_syntax thy ctxt = ctxt |> map_syntax (Local_Syntax.rebuild thy) |> - map_tsigs (fn tsigs as (local_tsig, global_tsig) => + map_tsig (fn tsig as (local_tsig, global_tsig) => let val thy_tsig = Sign.tsig_of thy in - if Type.eq_tsig (thy_tsig, global_tsig) then tsigs - else (Type.merge_tsigs (Syntax.pp ctxt) (local_tsig, thy_tsig), thy_tsig) + if Type.eq_tsig (thy_tsig, global_tsig) then tsig + else (Type.merge_tsig (Syntax.pp ctxt) (local_tsig, thy_tsig), thy_tsig) end) |> map_consts (fn consts as (local_consts, global_consts) => let val thy_consts = Sign.consts_of thy in @@ -436,11 +438,10 @@ (* inferred types of parameters *) fun infer_type ctxt x = - Term.fastype_of (singleton (Syntax.check_terms (set_mode mode_schematic ctxt)) - (Free (x, dummyT))); + Term.fastype_of (singleton (Syntax.check_terms (set_mode mode_schematic ctxt)) (Free x)); fun inferred_param x ctxt = - let val T = infer_type ctxt x + let val T = infer_type ctxt (x, dummyT) in (T, ctxt |> Variable.declare_term (Free (x, T))) end; fun inferred_fixes ctxt = @@ -503,13 +504,16 @@ fun read_const_proper ctxt strict = prep_const_proper ctxt strict o token_content; -fun read_const ctxt strict text = - let val (c, pos) = token_content text in +fun read_const ctxt strict ty text = + let + val (c, pos) = token_content text; + val _ = no_skolem false c; + in (case (lookup_skolem ctxt c, Variable.is_const ctxt c) of (SOME x, false) => (Position.report (Markup.name x (if can Name.dest_skolem x then Markup.skolem else Markup.free)) pos; - Free (x, infer_type ctxt x)) + Free (x, infer_type ctxt (x, ty))) | _ => prep_const_proper ctxt strict (c, pos)) end; @@ -608,21 +612,19 @@ (* types *) -fun get_sort ctxt raw_env = +fun get_sort ctxt raw_text = let val tsig = tsig_of ctxt; - fun eq ((xi, S), (xi', S')) = - Term.eq_ix (xi, xi') andalso Type.eq_sort tsig (S, S'); - val env = distinct eq raw_env; + val text = distinct (op =) (map (apsnd (Type.minimize_sort tsig)) raw_text); val _ = - (case duplicates (eq_fst (op =)) env of + (case duplicates (eq_fst (op =)) text of [] => () | dups => error ("Inconsistent sort constraints for type variable(s) " ^ commas_quote (map (Term.string_of_vname' o fst) dups))); fun lookup xi = - (case AList.lookup (op =) env xi of + (case AList.lookup (op =) text xi of NONE => NONE | SOME S => if S = dummyS then NONE else SOME S); @@ -738,8 +740,8 @@ let val (syms, pos) = Syntax.parse_token Markup.sort text; val S = Syntax.standard_parse_sort ctxt (syn_of ctxt) (syms, pos) - handle ERROR msg => cat_error msg ("Failed to parse sort" ^ Position.str_of pos) - in S end; + handle ERROR msg => cat_error msg ("Failed to parse sort" ^ Position.str_of pos) + in Type.minimize_sort (tsig_of ctxt) S end; fun parse_typ ctxt text = let @@ -1142,8 +1144,8 @@ (* aliases *) -fun class_alias b c ctxt = (map_tsigs o apfst) (Type.class_alias (naming_of ctxt) b c) ctxt; -fun type_alias b c ctxt = (map_tsigs o apfst) (Type.type_alias (naming_of ctxt) b c) ctxt; +fun class_alias b c ctxt = (map_tsig o apfst) (Type.class_alias (naming_of ctxt) b c) ctxt; +fun type_alias b c ctxt = (map_tsig o apfst) (Type.type_alias (naming_of ctxt) b c) ctxt; fun const_alias b c ctxt = (map_consts o apfst) (Consts.alias (naming_of ctxt) b c) ctxt; @@ -1175,16 +1177,6 @@ (* fixes *) -local - -fun prep_mixfix (x, T, mx) = - if mx <> NoSyn andalso mx <> Structure andalso - (can Name.dest_internal x orelse can Name.dest_skolem x) then - error ("Illegal mixfix syntax for internal/skolem constant " ^ quote x) - else (Local_Syntax.Fixed, (x, T, mx)); - -in - fun add_fixes raw_vars ctxt = let val (vars, _) = cert_vars raw_vars ctxt; @@ -1192,13 +1184,11 @@ val ctxt'' = ctxt' |> fold_map declare_var (map2 (fn x' => fn (_, T, mx) => (x', T, mx)) xs' vars) - |-> (map_syntax o Local_Syntax.add_syntax (theory_of ctxt) o map prep_mixfix); + |-> (map_syntax o Local_Syntax.add_syntax (theory_of ctxt) o map (pair Local_Syntax.Fixed)); val _ = (vars ~~ xs') |> List.app (fn ((b, _, _), x') => Context_Position.report_visible ctxt (Markup.fixed_decl x') (Binding.pos_of b)); in (xs', ctxt'') end; -end; - (* fixes vs. frees *) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/proof_display.ML --- a/src/Pure/Isar/proof_display.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/proof_display.ML Tue May 04 20:30:22 2010 +0200 @@ -48,7 +48,7 @@ fun pretty_theorems_diff verbose prev_thys thy = let - val pretty_fact = ProofContext.pretty_fact (ProofContext.init thy); + val pretty_fact = ProofContext.pretty_fact (ProofContext.init_global thy); val facts = PureThy.facts_of thy; val thmss = Facts.dest_static (map PureThy.facts_of prev_thys) facts diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/rule_cases.ML --- a/src/Pure/Isar/rule_cases.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/rule_cases.ML Tue May 04 20:30:22 2010 +0200 @@ -368,7 +368,7 @@ map (Name.internal o Name.clean o fst) (Logic.strip_params prem) in Logic.list_rename_params (xs, prem) end; fun rename_prems prop = - let val (As, C) = Logic.strip_horn (Thm.prop_of rule) + let val (As, C) = Logic.strip_horn prop in Logic.list_implies (map rename As, C) end; in Thm.equal_elim (Thm.reflexive (Drule.cterm_fun rename_prems (Thm.cprop_of rule))) rule end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/skip_proof.ML --- a/src/Pure/Isar/skip_proof.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/skip_proof.ML Tue May 04 20:30:22 2010 +0200 @@ -39,6 +39,6 @@ else tac args st); fun prove_global thy xs asms prop tac = - Drule.export_without_context (prove (ProofContext.init thy) xs asms prop tac); + Drule.export_without_context (prove (ProofContext.init_global thy) xs asms prop tac); end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/specification.ML --- a/src/Pure/Isar/specification.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/specification.ML Tue May 04 20:30:22 2010 +0200 @@ -169,7 +169,7 @@ fun gen_axioms do_print prep raw_vars raw_specs thy = let - val ((vars, specs), _) = prep raw_vars raw_specs (ProofContext.init thy); + val ((vars, specs), _) = prep raw_vars raw_specs (ProofContext.init_global thy); val xs = map (fn ((b, T), _) => (Name.of_binding b, T)) vars; (*consts*) @@ -284,7 +284,7 @@ val type_notation_cmd = gen_type_notation (fn ctxt => ProofContext.read_type_name ctxt false); val notation = gen_notation (K I); -val notation_cmd = gen_notation (fn ctxt => ProofContext.read_const ctxt false); +val notation_cmd = gen_notation (fn ctxt => ProofContext.read_const ctxt false dummyT); end; @@ -403,7 +403,7 @@ goal_ctxt |> ProofContext.note_thmss "" [((Binding.name Auto_Bind.assmsN, []), [(prems, [])])] |> snd - |> Proof.theorem_i before_qed after_qed' (map snd stmt) + |> Proof.theorem before_qed after_qed' (map snd stmt) |> (case facts of NONE => I | SOME ths => Proof.refine_insert ths) |> tap (fn state => not schematic andalso Proof.schematic_goal state andalso error "Illegal schematic goal statement") diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/theory_target.ML --- a/src/Pure/Isar/theory_target.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/theory_target.ML Tue May 04 20:30:22 2010 +0200 @@ -114,7 +114,7 @@ fun import_export_proof ctxt (name, raw_th) = let val thy = ProofContext.theory_of ctxt; - val thy_ctxt = ProofContext.init thy; + val thy_ctxt = ProofContext.init_global thy; val certT = Thm.ctyp_of thy; val cert = Thm.cterm_of thy; @@ -213,7 +213,7 @@ fun abbrev (ta as Target {target, is_locale, is_class, ...}) prmode ((b, mx), t) lthy = let - val thy_ctxt = ProofContext.init (ProofContext.theory_of lthy); + val thy_ctxt = ProofContext.init_global (ProofContext.theory_of lthy); val target_ctxt = Local_Theory.target_of lthy; val (mx1, mx2, mx3) = fork_mixfix ta mx; @@ -286,7 +286,7 @@ fun define ta ((b, mx), ((name, atts), rhs)) lthy = let val thy = ProofContext.theory_of lthy; - val thy_ctxt = ProofContext.init thy; + val thy_ctxt = ProofContext.init_global thy; val name' = Thm.def_binding_optional b name; @@ -342,7 +342,7 @@ fun init_ctxt (Target {target, is_locale, is_class, instantiation, overloading}) = if not (null (#1 instantiation)) then Class_Target.init_instantiation instantiation else if not (null overloading) then Overloading.init overloading - else if not is_locale then ProofContext.init + else if not is_locale then ProofContext.init_global else if not is_class then Locale.init target else Class_Target.init target; @@ -364,7 +364,7 @@ fun gen_overloading prep_const raw_ops thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val ops = raw_ops |> map (fn (name, const, checked) => (name, Term.dest_Const (prep_const ctxt const), checked)); in thy |> init_lthy_ctxt (make_target "" false false ([], [], []) ops) end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/toplevel.ML --- a/src/Pure/Isar/toplevel.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/toplevel.ML Tue May 04 20:30:22 2010 +0200 @@ -661,6 +661,7 @@ if immediate orelse null proof_trs orelse OuterKeyword.is_schematic_goal (name_of tr) orelse + exists (OuterKeyword.is_qed_global o name_of) proof_trs orelse not (can proof_of st') orelse Proof.is_relevant (proof_of st') then diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Isar/typedecl.ML --- a/src/Pure/Isar/typedecl.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Isar/typedecl.ML Tue May 04 20:30:22 2010 +0200 @@ -82,10 +82,12 @@ (* type abbreviations *) +local + fun gen_abbrev prep_typ (b, vs, mx) raw_rhs lthy = let val Type (name, _) = global_type lthy (b, map (rpair dummyS) vs); - val rhs = prep_typ lthy raw_rhs + val rhs = prep_typ b lthy raw_rhs handle ERROR msg => cat_error msg ("in type abbreviation " ^ quote (Binding.str_of b)); in lthy @@ -94,8 +96,23 @@ |> pair name end; -val abbrev = gen_abbrev ProofContext.cert_typ_syntax; -val abbrev_cmd = gen_abbrev ProofContext.read_typ_syntax; +fun read_abbrev b ctxt raw_rhs = + let + val rhs = ProofContext.read_typ_syntax (ctxt |> ProofContext.set_defsort []) raw_rhs; + val ignored = Term.fold_atyps_sorts (fn (_, []) => I | (T, _) => insert (op =) T) rhs []; + val _ = + if null ignored then () + else warning ("Ignoring sort constraints in type variables(s): " ^ + commas_quote (map (Syntax.string_of_typ ctxt) (rev ignored)) ^ + "\nin type abbreviation " ^ quote (Binding.str_of b)); + in rhs end; + +in + +val abbrev = gen_abbrev (K ProofContext.cert_typ_syntax); +val abbrev_cmd = gen_abbrev read_abbrev; + +end; fun abbrev_global decl rhs = Theory_Target.init NONE diff -r aace7a969410 -r 8629ac3efb19 src/Pure/ML/ml_thms.ML --- a/src/Pure/ML/ml_thms.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/ML/ml_thms.ML Tue May 04 20:30:22 2010 +0200 @@ -64,7 +64,7 @@ fun after_qed res goal_ctxt = put_thms (i, map prep_result (ProofContext.export goal_ctxt ctxt (flat res))) goal_ctxt; val ctxt' = ctxt - |> Proof.theorem_i NONE after_qed propss + |> Proof.theorem NONE after_qed propss |> Proof.global_terminal_proof methods; val (a, background') = background |> ML_Antiquote.variant "lemma" ||> put_thms (i, the_thms ctxt' i); diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Proof/extraction.ML --- a/src/Pure/Proof/extraction.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Proof/extraction.ML Tue May 04 20:30:22 2010 +0200 @@ -136,8 +136,7 @@ | strip_abs n (Abs (_, _, t)) = strip_abs (n-1) t | strip_abs _ _ = error "strip_abs: not an abstraction"; -fun prf_subst_TVars tye = - map_proof_terms (subst_TVars tye) (typ_subst_TVars tye); +val prf_subst_TVars = map_proof_types o typ_subst_TVars; fun relevant_vars types prop = List.foldr (fn (Var ((a, _), T), vs) => (case strip_type T of diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Proof/proof_syntax.ML --- a/src/Pure/Proof/proof_syntax.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Proof/proof_syntax.ML Tue May 04 20:30:22 2010 +0200 @@ -45,7 +45,7 @@ thy |> Theory.copy |> Sign.root_path - |> Sign.add_defsort_i [] + |> Sign.set_defsort [] |> Sign.add_types [(Binding.name "proof", 0, NoSyn)] |> fold (snd oo Sign.declare_const) [((Binding.name "Appt", [proofT, aT] ---> proofT), Mixfix ("(1_ %/ _)", [4, 5], 4)), @@ -206,7 +206,7 @@ |> add_proof_syntax |> add_proof_atom_consts (map (Long_Name.append "axm") axm_names @ map (Long_Name.append "thm") thm_names) - |> ProofContext.init + |> ProofContext.init_global |> ProofContext.allow_dummies |> ProofContext.set_mode ProofContext.mode_schematic; in diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Proof/reconstruct.ML --- a/src/Pure/Proof/reconstruct.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Proof/reconstruct.ML Tue May 04 20:30:22 2010 +0200 @@ -376,8 +376,7 @@ val varify = map_type_tfree (fn p as (a, S) => if member (op =) tfrees p then TVar ((a, ~1), S) else TFree p) in - (maxidx', prfs', map_proof_terms (subst_TVars tye o - map_types varify) (typ_subst_TVars tye o varify) prf) + (maxidx', prfs', map_proof_types (typ_subst_TVars tye o varify) prf) end | expand maxidx prfs prf = (maxidx, prfs, prf); diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Syntax/syn_trans.ML --- a/src/Pure/Syntax/syn_trans.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Syntax/syn_trans.ML Tue May 04 20:30:22 2010 +0200 @@ -567,14 +567,7 @@ Term.list_comb (term_of ast, map term_of asts) | term_of (ast as Ast.Appl _) = raise Ast.AST ("ast_to_term: malformed ast", [ast]); - val free_fixed = Term.map_aterms - (fn t as Const (c, T) => - (case try Lexicon.unmark_fixed c of - NONE => t - | SOME x => Free (x, T)) - | t => t); - - val exn_results = map (Exn.capture (term_of #> free_fixed)) asts; + val exn_results = map (Exn.capture term_of) asts; val exns = map_filter Exn.get_exn exn_results; val results = map_filter Exn.get_result exn_results in (case (results, exns) of ([], exn :: _) => reraise exn | _ => results) end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Syntax/syntax.ML --- a/src/Pure/Syntax/syntax.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Syntax/syntax.ML Tue May 04 20:30:22 2010 +0200 @@ -850,10 +850,10 @@ val read_term = singleton o read_terms; val read_prop = singleton o read_props; -val read_sort_global = read_sort o ProofContext.init; -val read_typ_global = read_typ o ProofContext.init; -val read_term_global = read_term o ProofContext.init; -val read_prop_global = read_prop o ProofContext.init; +val read_sort_global = read_sort o ProofContext.init_global; +val read_typ_global = read_typ o ProofContext.init_global; +val read_term_global = read_term o ProofContext.init_global; +val read_prop_global = read_prop o ProofContext.init_global; (* pretty = uncheck + unparse *) @@ -876,7 +876,7 @@ structure PrettyGlobal = Proof_Data(type T = bool fun init _ = false); val is_pretty_global = PrettyGlobal.get; val set_pretty_global = PrettyGlobal.put; -val init_pretty_global = set_pretty_global true o ProofContext.init; +val init_pretty_global = set_pretty_global true o ProofContext.init_global; val pretty_term_global = pretty_term o init_pretty_global; val pretty_typ_global = pretty_typ o init_pretty_global; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Syntax/type_ext.ML --- a/src/Pure/Syntax/type_ext.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Syntax/type_ext.ML Tue May 04 20:30:22 2010 +0200 @@ -110,8 +110,7 @@ fun decode_term get_sort map_const map_free tm = let - val sort_env = term_sorts tm; - val decodeT = typ_of_term (get_sort sort_env); + val decodeT = typ_of_term (get_sort (term_sorts tm)); fun decode (Const ("_constrain", _) $ t $ typ) = type_constraint (decodeT typ) (decode t) @@ -121,11 +120,14 @@ | decode (Abs (x, T, t)) = Abs (x, T, decode t) | decode (t $ u) = decode t $ decode u | decode (Const (a, T)) = - let val c = - (case try Lexicon.unmark_const a of - SOME c => c - | NONE => snd (map_const a)) - in Const (c, T) end + (case try Lexicon.unmark_fixed a of + SOME x => Free (x, T) + | NONE => + let val c = + (case try Lexicon.unmark_const a of + SOME c => c + | NONE => snd (map_const a)) + in Const (c, T) end) | decode (Free (a, T)) = (case (map_free a, map_const a) of (SOME x, _) => Free (x, T) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/Thy/thy_output.ML --- a/src/Pure/Thy/thy_output.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/Thy/thy_output.ML Tue May 04 20:30:22 2010 +0200 @@ -453,7 +453,7 @@ fun pretty_term_typ ctxt (style, t) = let val t' = style t - in pretty_term ctxt (TypeInfer.constrain (Term.fastype_of t) t) end; + in pretty_term ctxt (TypeInfer.constrain (Term.fastype_of t') t') end; fun pretty_term_typeof ctxt (style, t) = Syntax.pretty_typ ctxt (Term.fastype_of (style t)); @@ -574,7 +574,7 @@ val prop_src = (case Args.dest_src source of ((a, arg :: _), pos) => Args.src ((a, [arg]), pos)); val _ = context - |> Proof.theorem_i NONE (K I) [[(prop, [])]] + |> Proof.theorem NONE (K I) [[(prop, [])]] |> Proof.global_terminal_proof methods; in output (maybe_pretty_source (pretty_term context) prop_src [prop]) end); diff -r aace7a969410 -r 8629ac3efb19 src/Pure/axclass.ML --- a/src/Pure/axclass.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/axclass.ML Tue May 04 20:30:22 2010 +0200 @@ -2,43 +2,40 @@ Author: Markus Wenzel, TU Muenchen Type classes defined as predicates, associated with a record of -parameters. +parameters. Proven class relations and type arities. *) signature AX_CLASS = sig - val define_class: binding * class list -> string list -> - (Thm.binding * term list) list -> theory -> class * theory + type info = {def: thm, intro: thm, axioms: thm list, params: (string * typ) list} + val get_info: theory -> class -> info + val class_of_param: theory -> string -> class option + val instance_name: string * class -> string + val thynames_of_arity: theory -> class * string -> string list + val param_of_inst: theory -> string * string -> string + val inst_of_param: theory -> string -> (string * string) option + val unoverload: theory -> thm -> thm + val overload: theory -> thm -> thm + val unoverload_conv: theory -> conv + val overload_conv: theory -> conv + val lookup_inst_param: Consts.T -> ((string * string) * 'a) list -> string * typ -> 'a option + val unoverload_const: theory -> string * typ -> string + val cert_classrel: theory -> class * class -> class * class + val read_classrel: theory -> xstring * xstring -> class * class + val declare_overloaded: string * typ -> theory -> term * theory + val define_overloaded: binding -> string * term -> theory -> thm * theory val add_classrel: thm -> theory -> theory val add_arity: thm -> theory -> theory val prove_classrel: class * class -> tactic -> theory -> theory val prove_arity: string * sort list * sort -> tactic -> theory -> theory - val get_info: theory -> class -> - {def: thm, intro: thm, axioms: thm list, params: (string * typ) list} - val class_intros: theory -> thm list - val class_of_param: theory -> string -> class option - val cert_classrel: theory -> class * class -> class * class - val read_classrel: theory -> xstring * xstring -> class * class + val define_class: binding * class list -> string list -> + (Thm.binding * term list) list -> theory -> class * theory val axiomatize_class: binding * class list -> theory -> theory val axiomatize_class_cmd: binding * xstring list -> theory -> theory val axiomatize_classrel: (class * class) list -> theory -> theory val axiomatize_classrel_cmd: (xstring * xstring) list -> theory -> theory val axiomatize_arity: arity -> theory -> theory val axiomatize_arity_cmd: xstring * string list * string -> theory -> theory - val instance_name: string * class -> string - val declare_overloaded: string * typ -> theory -> term * theory - val define_overloaded: binding -> string * term -> theory -> thm * theory - val unoverload: theory -> thm -> thm - val overload: theory -> thm -> thm - val unoverload_conv: theory -> conv - val overload_conv: theory -> conv - val unoverload_const: theory -> string * typ -> string - val lookup_inst_param: Consts.T -> ((string * string) * 'a) list -> string * typ -> 'a option - val param_of_inst: theory -> string * string -> string - val inst_of_param: theory -> string -> (string * string) option - val thynames_of_arity: theory -> class * string -> string list - val introN: string - val axiomsN: string end; structure AxClass: AX_CLASS = @@ -46,6 +43,18 @@ (** theory data **) +(* axclass info *) + +type info = + {def: thm, + intro: thm, + axioms: thm list, + params: (string * typ) list}; + +fun make_axclass (def, intro, axioms, params): info = + {def = def, intro = intro, axioms = axioms, params = params}; + + (* class parameters (canonical order) *) type param = string * class; @@ -57,190 +66,259 @@ " for " ^ Pretty.string_of_sort pp [c] ^ (if c = c' then "" else " and " ^ Pretty.string_of_sort pp [c']))); -fun merge_params _ ([], qs) = qs - | merge_params pp (ps, qs) = - fold_rev (fn q => if member (op =) ps q then I else add_param pp q) qs ps; + +(* setup data *) + +datatype data = Data of + {axclasses: info Symtab.table, + params: param list, + proven_classrels: thm Symreltab.table, + proven_arities: ((class * sort list) * (thm * string)) list Symtab.table, + (*arity theorems with theory name*) + inst_params: + (string * thm) Symtab.table Symtab.table * + (*constant name ~> type constructor ~> (constant name, equation)*) + (string * string) Symtab.table (*constant name ~> (constant name, type constructor)*), + diff_classrels: (class * class) list}; + +fun make_data + (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) = + Data {axclasses = axclasses, params = params, proven_classrels = proven_classrels, + proven_arities = proven_arities, inst_params = inst_params, + diff_classrels = diff_classrels}; + +fun diff_table tab1 tab2 = + Symreltab.fold (fn (x, _) => if Symreltab.defined tab2 x then I else cons x) tab1 []; + +structure Data = Theory_Data_PP +( + type T = data; + val empty = + make_data (Symtab.empty, [], Symreltab.empty, Symtab.empty, (Symtab.empty, Symtab.empty), []); + val extend = I; + fun merge pp + (Data {axclasses = axclasses1, params = params1, proven_classrels = proven_classrels1, + proven_arities = proven_arities1, inst_params = inst_params1, + diff_classrels = diff_classrels1}, + Data {axclasses = axclasses2, params = params2, proven_classrels = proven_classrels2, + proven_arities = proven_arities2, inst_params = inst_params2, + diff_classrels = diff_classrels2}) = + let + val axclasses' = Symtab.merge (K true) (axclasses1, axclasses2); + val params' = + if null params1 then params2 + else fold_rev (fn p => if member (op =) params1 p then I else add_param pp p) params2 params1; + + (*transitive closure of classrels and arity completion is done in Theory.at_begin hook*) + val proven_classrels' = Symreltab.join (K #1) (proven_classrels1, proven_classrels2); + val proven_arities' = + Symtab.join (K (Library.merge (eq_fst op =))) (proven_arities1, proven_arities2); + + val diff_classrels' = + diff_table proven_classrels1 proven_classrels2 @ + diff_table proven_classrels2 proven_classrels1 @ + diff_classrels1 @ diff_classrels2; + + val inst_params' = + (Symtab.join (K (Symtab.merge (K true))) (#1 inst_params1, #1 inst_params2), + Symtab.merge (K true) (#2 inst_params1, #2 inst_params2)); + in + make_data + (axclasses', params', proven_classrels', proven_arities', inst_params', diff_classrels') + end; +); + +fun map_data f = + Data.map (fn Data {axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels} => + make_data (f (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels))); + +fun map_axclasses f = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) => + (f axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels)); + +fun map_params f = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) => + (axclasses, f params, proven_classrels, proven_arities, inst_params, diff_classrels)); + +fun map_proven_classrels f = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) => + (axclasses, params, f proven_classrels, proven_arities, inst_params, diff_classrels)); + +fun map_proven_arities f = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) => + (axclasses, params, proven_classrels, f proven_arities, inst_params, diff_classrels)); + +fun map_inst_params f = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, diff_classrels) => + (axclasses, params, proven_classrels, proven_arities, f inst_params, diff_classrels)); + +val clear_diff_classrels = + map_data (fn (axclasses, params, proven_classrels, proven_arities, inst_params, _) => + (axclasses, params, proven_classrels, proven_arities, inst_params, [])); + +val rep_data = Data.get #> (fn Data args => args); + +val axclasses_of = #axclasses o rep_data; +val params_of = #params o rep_data; +val proven_classrels_of = #proven_classrels o rep_data; +val proven_arities_of = #proven_arities o rep_data; +val inst_params_of = #inst_params o rep_data; +val diff_classrels_of = #diff_classrels o rep_data; -(* axclasses *) - -val introN = "intro"; -val superN = "super"; -val axiomsN = "axioms"; +(* axclasses with parameters *) -datatype axclass = AxClass of - {def: thm, - intro: thm, - axioms: thm list, - params: (string * typ) list}; +fun get_info thy c = + (case Symtab.lookup (axclasses_of thy) c of + SOME info => info + | NONE => error ("No such axclass: " ^ quote c)); -type axclasses = axclass Symtab.table * param list; +fun all_params_of thy S = + let val params = params_of thy; + in fold (fn (x, c) => if Sign.subsort thy (S, [c]) then cons x else I) params [] end; -fun make_axclass ((def, intro, axioms), params) = AxClass - {def = def, intro = intro, axioms = axioms, params = params}; - -fun merge_axclasses pp ((tab1, params1), (tab2, params2)) : axclasses = - (Symtab.merge (K true) (tab1, tab2), merge_params pp (params1, params2)); +fun class_of_param thy = AList.lookup (op =) (params_of thy); -(* instances *) +(* maintain instances *) val classrel_prefix = "classrel_"; val arity_prefix = "arity_"; -type instances = - ((class * class) * thm) list * (*classrel theorems*) - ((class * sort list) * (thm * string)) list Symtab.table; (*arity theorems with theory name*) - -fun merge_instances ((classrel1, arities1): instances, (classrel2, arities2)) = - (merge (eq_fst op =) (classrel1, classrel2), - Symtab.join (K (merge (eq_fst op =))) (arities1, arities2)); - - -(* instance parameters *) - -type inst_params = - (string * thm) Symtab.table Symtab.table - (*constant name ~> type constructor ~> (constant name, equation)*) - * (string * string) Symtab.table; (*constant name ~> (constant name, type constructor)*) - -fun merge_inst_params ((const_param1, param_const1), (const_param2, param_const2)) = - (Symtab.join (K (Symtab.merge (K true))) (const_param1, const_param2), - Symtab.merge (K true) (param_const1, param_const2)); - - -(* setup data *) - -structure AxClassData = Theory_Data_PP -( - type T = axclasses * (instances * inst_params); - val empty = ((Symtab.empty, []), (([], Symtab.empty), (Symtab.empty, Symtab.empty))); - val extend = I; - fun merge pp ((axclasses1, (instances1, inst_params1)), (axclasses2, (instances2, inst_params2))) = - (merge_axclasses pp (axclasses1, axclasses2), - (merge_instances (instances1, instances2), merge_inst_params (inst_params1, inst_params2))); -); +fun instance_name (a, c) = Long_Name.base_name c ^ "_" ^ Long_Name.base_name a; -(* maintain axclasses *) - -val get_axclasses = #1 o AxClassData.get; -val map_axclasses = AxClassData.map o apfst; - -val lookup_def = Symtab.lookup o #1 o get_axclasses; - -fun get_info thy c = - (case lookup_def thy c of - SOME (AxClass info) => info - | NONE => error ("No such axclass: " ^ quote c)); +infix 0 RSO; -fun class_intros thy = - let - fun add_intro c = - (case lookup_def thy c of SOME (AxClass {intro, ...}) => cons intro | _ => I); - val classes = Sign.all_classes thy; - in map (Thm.class_triv thy) classes @ fold add_intro classes [] end; - - -fun get_params thy pred = - let val params = #2 (get_axclasses thy); - in fold (fn (x, c) => if pred c then cons x else I) params [] end; - -fun all_params_of thy S = get_params thy (fn c => Sign.subsort thy (S, [c])); - -fun class_of_param thy = AList.lookup (op =) (#2 (get_axclasses thy)); - - -(* maintain instances *) - -fun instance_name (a, c) = Long_Name.base_name c ^ "_" ^ Long_Name.base_name a; - -val get_instances = #1 o #2 o AxClassData.get; -val map_instances = AxClassData.map o apsnd o apfst; - +fun (SOME a) RSO (SOME b) = SOME (a RS b) + | x RSO NONE = x + | NONE RSO y = y; fun the_classrel thy (c1, c2) = - (case AList.lookup (op =) (#1 (get_instances thy)) (c1, c2) of - SOME th => Thm.transfer thy th + (case Symreltab.lookup (proven_classrels_of thy) (c1, c2) of + SOME thm => Thm.transfer thy thm | NONE => error ("Unproven class relation " ^ - Syntax.string_of_classrel (ProofContext.init thy) [c1, c2])); + Syntax.string_of_classrel (ProofContext.init_global thy) [c1, c2])); + +fun put_trancl_classrel ((c1, c2), th) thy = + let + val classes = Sorts.classes_of (Sign.classes_of thy); + val classrels = proven_classrels_of thy; + + fun reflcl_classrel (c1', c2') = + if c1' = c2' then NONE else SOME (the_classrel thy (c1', c2')); + fun gen_classrel (c1_pred, c2_succ) = + let + val th' = + the ((reflcl_classrel (c1_pred, c1) RSO SOME th) RSO reflcl_classrel (c2, c2_succ)) + |> Drule.instantiate' [SOME (ctyp_of thy (TVar ((Name.aT, 0), [])))] [] + |> Thm.close_derivation; + in ((c1_pred, c2_succ), th') end; -fun put_classrel arg = map_instances (fn (classrel, arities) => - (insert (eq_fst op =) arg classrel, arities)); + val new_classrels = + Library.map_product pair (c1 :: Graph.imm_preds classes c1) (c2 :: Graph.imm_succs classes c2) + |> filter_out ((op =) orf Symreltab.defined classrels) + |> map gen_classrel; + val needed = not (null new_classrels); + in + (needed, + if needed then map_proven_classrels (fold Symreltab.update new_classrels) thy + else thy) + end; + +fun complete_classrels thy = + let + val classrels = proven_classrels_of thy; + val diff_classrels = diff_classrels_of thy; + val (needed, thy') = (false, thy) |> + fold (fn rel => fn (needed, thy) => + put_trancl_classrel (rel, Symreltab.lookup classrels rel |> the) thy + |>> (fn b => needed orelse b)) + diff_classrels; + in + if null diff_classrels then NONE + else SOME (clear_diff_classrels thy') + end; fun the_arity thy a (c, Ss) = - (case AList.lookup (op =) (Symtab.lookup_list (#2 (get_instances thy)) a) (c, Ss) of - SOME (th, _) => Thm.transfer thy th + (case AList.lookup (op =) (Symtab.lookup_list (proven_arities_of thy) a) (c, Ss) of + SOME (thm, _) => Thm.transfer thy thm | NONE => error ("Unproven type arity " ^ - Syntax.string_of_arity (ProofContext.init thy) (a, Ss, [c]))); + Syntax.string_of_arity (ProofContext.init_global thy) (a, Ss, [c]))); fun thynames_of_arity thy (c, a) = - Symtab.lookup_list (#2 (get_instances thy)) a + Symtab.lookup_list (proven_arities_of thy) a |> map_filter (fn ((c', _), (_, name)) => if c = c' then SOME name else NONE) |> rev; -fun insert_arity_completions thy (t, ((c, Ss), (th, thy_name))) arities = +fun insert_arity_completions thy t ((c, Ss), ((th, thy_name))) (finished, arities) = let val algebra = Sign.classes_of thy; + val ars = Symtab.lookup_list arities t; val super_class_completions = Sign.super_classes thy c - |> filter_out (fn c1 => exists (fn ((c2, Ss2), _) => c1 = c2 - andalso Sorts.sorts_le algebra (Ss2, Ss)) (Symtab.lookup_list arities t)); - val completions = map (fn c1 => (Sorts.classrel_derivation algebra - (fn (th, c2) => fn c3 => th RS the_classrel thy (c2, c3)) (th, c) c1 - |> Thm.close_derivation, c1)) super_class_completions; - val arities' = fold (fn (th1, c1) => Symtab.cons_list (t, ((c1, Ss), (th1, thy_name)))) - completions arities; - in (null completions, arities') end; + |> filter_out (fn c1 => exists (fn ((c2, Ss2), _) => + c1 = c2 andalso Sorts.sorts_le algebra (Ss2, Ss)) ars); + + val names = Name.invents Name.context Name.aT (length Ss); + val std_vars = map (fn a => SOME (ctyp_of thy (TVar ((a, 0), [])))) names; + + val completions = super_class_completions |> map (fn c1 => + let + val th1 = + (th RS the_classrel thy (c, c1)) + |> Drule.instantiate' std_vars [] + |> Thm.close_derivation; + in ((th1, thy_name), c1) end); + + val finished' = finished andalso null completions; + val arities' = fold (fn (th, c1) => Symtab.cons_list (t, ((c1, Ss), th))) completions arities; + in (finished', arities') end; fun put_arity ((t, Ss, c), th) thy = - let - val arity' = (t, ((c, Ss), (th, Context.theory_name thy))); - in + let val ar = ((c, Ss), (th, Context.theory_name thy)) in thy - |> map_instances (fn (classrel, arities) => (classrel, - arities - |> Symtab.insert_list (eq_fst op =) arity' - |> insert_arity_completions thy arity' - |> snd)) + |> map_proven_arities + (Symtab.insert_list (eq_fst op =) (t, ar) #> + curry (insert_arity_completions thy t ar) true #> #2) end; fun complete_arities thy = let - val arities = snd (get_instances thy); - val (finished, arities') = arities - |> fold_map (insert_arity_completions thy) (Symtab.dest_list arities); + val arities = proven_arities_of thy; + val (finished, arities') = + Symtab.fold (fn (t, ars) => fold (insert_arity_completions thy t) ars) arities (true, arities); in - if forall I finished then NONE - else SOME (thy |> map_instances (fn (classrel, _) => (classrel, arities'))) + if finished then NONE + else SOME (map_proven_arities (K arities') thy) end; -val _ = Context.>> (Context.map_theory (Theory.at_begin complete_arities)); +val _ = Context.>> (Context.map_theory + (Theory.at_begin complete_classrels #> Theory.at_begin complete_arities)); + +val the_classrel_prf = Thm.proof_of oo the_classrel; +val the_arity_prf = Thm.proof_of ooo the_arity; (* maintain instance parameters *) -val get_inst_params = #2 o #2 o AxClassData.get; -val map_inst_params = AxClassData.map o apsnd o apsnd; - fun get_inst_param thy (c, tyco) = - case Symtab.lookup ((the_default Symtab.empty o Symtab.lookup (fst (get_inst_params thy))) c) tyco - of SOME c' => c' - | NONE => error ("No instance parameter for constant " ^ quote c - ^ " on type constructor " ^ quote tyco); + (case Symtab.lookup (the_default Symtab.empty (Symtab.lookup (#1 (inst_params_of thy)) c)) tyco of + SOME c' => c' + | NONE => error ("No instance parameter for constant " ^ quote c ^ " on type " ^ quote tyco)); -fun add_inst_param (c, tyco) inst = (map_inst_params o apfst - o Symtab.map_default (c, Symtab.empty)) (Symtab.update_new (tyco, inst)) - #> (map_inst_params o apsnd) (Symtab.update_new (fst inst, (c, tyco))); +fun add_inst_param (c, tyco) inst = + (map_inst_params o apfst o Symtab.map_default (c, Symtab.empty)) (Symtab.update_new (tyco, inst)) + #> (map_inst_params o apsnd) (Symtab.update_new (#1 inst, (c, tyco))); -val inst_of_param = Symtab.lookup o snd o get_inst_params; -val param_of_inst = fst oo get_inst_param; +val inst_of_param = Symtab.lookup o #2 o inst_params_of; +val param_of_inst = #1 oo get_inst_param; -fun inst_thms thy = (Symtab.fold (Symtab.fold (cons o snd o snd) o snd) o fst) - (get_inst_params thy) []; +fun inst_thms thy = + Symtab.fold (Symtab.fold (cons o #2 o #2) o #2) (#1 (inst_params_of thy)) []; -fun get_inst_tyco consts = try (fst o dest_Type o the_single o Consts.typargs consts); +fun get_inst_tyco consts = try (#1 o dest_Type o the_single o Consts.typargs consts); fun unoverload thy = MetaSimplifier.simplify true (inst_thms thy); fun overload thy = MetaSimplifier.simplify true (map Thm.symmetric (inst_thms thy)); @@ -248,18 +326,20 @@ fun unoverload_conv thy = MetaSimplifier.rewrite true (inst_thms thy); fun overload_conv thy = MetaSimplifier.rewrite true (map Thm.symmetric (inst_thms thy)); -fun lookup_inst_param consts params (c, T) = case get_inst_tyco consts (c, T) - of SOME tyco => AList.lookup (op =) params (c, tyco) - | NONE => NONE; +fun lookup_inst_param consts params (c, T) = + (case get_inst_tyco consts (c, T) of + SOME tyco => AList.lookup (op =) params (c, tyco) + | NONE => NONE); fun unoverload_const thy (c_ty as (c, _)) = - if is_some (class_of_param thy c) - then case get_inst_tyco (Sign.consts_of thy) c_ty - of SOME tyco => try (param_of_inst thy) (c, tyco) |> the_default c - | NONE => c + if is_some (class_of_param thy c) then + (case get_inst_tyco (Sign.consts_of thy) c_ty of + SOME tyco => try (param_of_inst thy) (c, tyco) |> the_default c + | NONE => c) else c; + (** instances **) (* class relations *) @@ -277,7 +357,7 @@ in (c1, c2) end; fun read_classrel thy raw_rel = - cert_classrel thy (pairself (ProofContext.read_class (ProofContext.init thy)) raw_rel) + cert_classrel thy (pairself (ProofContext.read_class (ProofContext.init_global thy)) raw_rel) handle TYPE (msg, _, _) => error msg; @@ -297,7 +377,7 @@ | NONE => error ("Not a class parameter: " ^ quote c)); val tyco = inst_tyco_of thy (c, T); val name_inst = instance_name (tyco, class) ^ "_inst"; - val c' = Long_Name.base_name c ^ "_" ^ Long_Name.base_name tyco; + val c' = instance_name (tyco, c); val T' = Type.strip_sorts T; in thy @@ -309,7 +389,7 @@ #>> apsnd Thm.varifyT_global #-> (fn (_, thm) => add_inst_param (c, tyco) (c'', thm) #> PureThy.add_thm ((Binding.conceal (Binding.name c'), thm), []) - #> snd + #> #2 #> pair (Const (c, T)))) ||> Sign.restore_naming thy end; @@ -320,8 +400,7 @@ val tyco = inst_tyco_of thy (c, T); val (c', eq) = get_inst_param thy (c, tyco); val prop = Logic.mk_equals (Const (c', T), t); - val b' = Thm.def_binding_optional - (Binding.name (Long_Name.base_name c ^ "_" ^ Long_Name.base_name tyco)) b; + val b' = Thm.def_binding_optional (Binding.name (instance_name (tyco, c))) b; in thy |> Thm.add_def false false (b', prop) @@ -331,6 +410,8 @@ (* primitive rules *) +val shyps_topped = forall null o #shyps o Thm.rep_thm; + fun add_classrel raw_th thy = let val th = Thm.strip_shyps (Thm.transfer thy raw_th); @@ -338,10 +419,14 @@ fun err () = raise THM ("add_classrel: malformed class relation", 0, [th]); val rel = Logic.dest_classrel prop handle TERM _ => err (); val (c1, c2) = cert_classrel thy rel handle TYPE _ => err (); + val th' = th + |> Drule.instantiate' [SOME (ctyp_of thy (TVar ((Name.aT, 0), [c1])))] [] + |> Thm.unconstrain_allTs; + val _ = shyps_topped th' orelse raise Fail "add_classrel: nontop shyps after unconstrain"; in thy |> Sign.primitive_classrel (c1, c2) - |> put_classrel ((c1, c2), Thm.close_derivation (Drule.unconstrainTs th)) + |> (#2 oo put_trancl_classrel) ((c1, c2), th') |> perhaps complete_arities end; @@ -351,17 +436,24 @@ val prop = Thm.plain_prop_of th; fun err () = raise THM ("add_arity: malformed type arity", 0, [th]); val (t, Ss, c) = Logic.dest_arity prop handle TERM _ => err (); - val T = Type (t, map TFree (Name.names Name.context Name.aT Ss)); + + val args = Name.names Name.context Name.aT Ss; + val T = Type (t, map TFree args); + val std_vars = map (fn (a, S) => SOME (ctyp_of thy (TVar ((a, 0), S)))) args; + val missing_params = Sign.complete_sort thy [c] |> maps (these o Option.map #params o try (get_info thy)) |> filter_out (fn (const, _) => can (get_inst_param thy) (const, t)) |> (map o apsnd o map_atyps) (K T); - val _ = map (Sign.certify_sort thy) Ss = Ss orelse err (); + val th' = th + |> Drule.instantiate' std_vars [] + |> Thm.unconstrain_allTs; + val _ = shyps_topped th' orelse raise Fail "add_arity: nontop shyps after unconstrain"; in thy - |> fold (snd oo declare_overloaded) missing_params + |> fold (#2 oo declare_overloaded) missing_params |> Sign.primitive_arity (t, Ss, [c]) - |> put_arity ((t, Ss, c), Thm.close_derivation (Drule.unconstrainTs th)) + |> put_arity ((t, Ss, c), th') end; @@ -369,7 +461,7 @@ fun prove_classrel raw_rel tac thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val (c1, c2) = cert_classrel thy raw_rel; val th = Goal.prove ctxt [] [] (Logic.mk_classrel (c1, c2)) (K tac) handle ERROR msg => cat_error msg ("The error(s) above occurred while trying to prove class relation " ^ @@ -383,7 +475,7 @@ fun prove_arity raw_arity tac thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val arity = ProofContext.cert_arity ctxt raw_arity; val names = map (prefix arity_prefix) (Logic.name_arities arity); val props = Logic.mk_arities arity; @@ -417,7 +509,7 @@ fun define_class (bclass, raw_super) raw_params raw_specs thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val pp = Syntax.pp ctxt; @@ -483,25 +575,24 @@ def_thy |> Sign.qualified_path true bconst |> PureThy.note_thmss "" - [((Binding.name introN, []), [([Drule.export_without_context raw_intro], [])]), - ((Binding.name superN, []), [(map Drule.export_without_context raw_classrel, [])]), - ((Binding.name axiomsN, []), + [((Binding.name "intro", []), [([Drule.export_without_context raw_intro], [])]), + ((Binding.name "super", []), [(map Drule.export_without_context raw_classrel, [])]), + ((Binding.name "axioms", []), [(map (fn th => Drule.export_without_context (class_triv RS th)) raw_axioms, [])])] ||> Sign.restore_naming def_thy; (* result *) - val axclass = make_axclass ((def, intro, axioms), params); + val axclass = make_axclass (def, intro, axioms, params); val result_thy = facts_thy - |> fold put_classrel (map (pair class) super ~~ classrel) + |> fold (#2 oo put_trancl_classrel) (map (pair class) super ~~ classrel) |> Sign.qualified_path false bconst - |> PureThy.note_thmss "" (name_atts ~~ map Thm.simple_fact (unflat axiomss axioms)) |> snd + |> PureThy.note_thmss "" (name_atts ~~ map Thm.simple_fact (unflat axiomss axioms)) |> #2 |> Sign.restore_naming facts_thy - |> map_axclasses (fn (axclasses, parameters) => - (Symtab.update (class, axclass) axclasses, - fold (fn (x, _) => add_param pp (x, class)) params parameters)); + |> map_axclasses (Symtab.update (class, axclass)) + |> map_params (fold (fn (x, _) => add_param pp (x, class)) params); in (class, result_thy) end; @@ -511,8 +602,7 @@ local -(* old-style axioms *) - +(*old-style axioms*) fun add_axiom (b, prop) = Thm.add_axiom (b, prop) #-> (fn (_, thm) => PureThy.add_thm ((b, Drule.export_without_context thm), [])); @@ -533,7 +623,7 @@ (map (prefix classrel_prefix o Logic.name_classrel)) add_classrel; fun ax_arity prep = - axiomatize (prep o ProofContext.init) Logic.mk_arities + axiomatize (prep o ProofContext.init_global) Logic.mk_arities (map (prefix arity_prefix) o Logic.name_arities) add_arity; fun class_const c = @@ -553,7 +643,7 @@ in val axiomatize_class = ax_class Sign.certify_class cert_classrel; -val axiomatize_class_cmd = ax_class (ProofContext.read_class o ProofContext.init) read_classrel; +val axiomatize_class_cmd = ax_class (ProofContext.read_class o ProofContext.init_global) read_classrel; val axiomatize_classrel = ax_classrel cert_classrel; val axiomatize_classrel_cmd = ax_classrel read_classrel; val axiomatize_arity = ax_arity ProofContext.cert_arity; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/codegen.ML --- a/src/Pure/codegen.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/codegen.ML Tue May 04 20:30:22 2010 +0200 @@ -822,7 +822,7 @@ val generate_code_i = gen_generate_code Sign.cert_term; val generate_code = - gen_generate_code (Syntax.read_term o ProofContext.allow_dummies o ProofContext.init); + gen_generate_code (Syntax.read_term o ProofContext.allow_dummies o ProofContext.init_global); (**** Reflection ****) @@ -908,7 +908,7 @@ fun eval_term thy t = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val e = let val _ = (null (Term.add_tvars t []) andalso null (Term.add_tfrees t [])) orelse diff -r aace7a969410 -r 8629ac3efb19 src/Pure/context.ML --- a/src/Pure/context.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/context.ML Tue May 04 20:30:22 2010 +0200 @@ -20,7 +20,7 @@ structure ProofContext: sig val theory_of: Proof.context -> theory - val init: theory -> Proof.context + val init_global: theory -> Proof.context end end; @@ -481,7 +481,7 @@ structure ProofContext = struct val theory_of = theory_of_proof; - fun init thy = Proof.Context (init_data thy, check_thy thy); + fun init_global thy = Proof.Context (init_data thy, check_thy thy); end; structure Proof_Data = @@ -529,7 +529,7 @@ fun proof_map f = the_proof o f o Proof; val theory_of = cases I ProofContext.theory_of; -val proof_of = cases ProofContext.init I; +val proof_of = cases ProofContext.init_global I; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/display.ML --- a/src/Pure/display.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/display.ML Tue May 04 20:30:22 2010 +0200 @@ -182,7 +182,8 @@ val extern_const = Name_Space.extern (#1 constants); val {classes, default, types, ...} = Type.rep_tsig tsig; val (class_space, class_algebra) = classes; - val {classes, arities} = Sorts.rep_algebra class_algebra; + val classes = Sorts.classes_of class_algebra; + val arities = Sorts.arities_of class_algebra; val clsses = Name_Space.dest_table (class_space, Symtab.make (Graph.dest classes)); val tdecls = Name_Space.dest_table types; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/drule.ML --- a/src/Pure/drule.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/drule.ML Tue May 04 20:30:22 2010 +0200 @@ -106,7 +106,6 @@ val dummy_thm: thm val sort_constraintI: thm val sort_constraint_eq: thm - val unconstrainTs: thm -> thm val with_subgoal: int -> (thm -> thm) -> thm -> thm val comp_no_flatten: thm * int -> int -> thm -> thm val rename_bvars: (string * string) list -> thm -> thm @@ -204,12 +203,6 @@ (** Standardization of rules **) -(* type classes and sorts *) - -fun unconstrainTs th = - fold (Thm.unconstrainT o Thm.ctyp_of (Thm.theory_of_thm th) o TVar) - (Thm.fold_terms Term.add_tvars th []) th; - (*Generalization over a list of variables*) val forall_intr_list = fold_rev forall_intr; @@ -314,7 +307,7 @@ Similar code in type/freeze_thaw*) fun legacy_freeze_thaw_robust th = - let val fth = Thm.freezeT th + let val fth = Thm.legacy_freezeT th val thy = Thm.theory_of_thm fth val {prop, tpairs, ...} = rep_thm fth in @@ -336,7 +329,7 @@ (*Basic version of the function above. No option to rename Vars apart in thaw. The Frees created from Vars have nice names.*) fun legacy_freeze_thaw th = - let val fth = Thm.freezeT th + let val fth = Thm.legacy_freezeT th val thy = Thm.theory_of_thm fth val {prop, tpairs, ...} = rep_thm fth in diff -r aace7a969410 -r 8629ac3efb19 src/Pure/goal.ML --- a/src/Pure/goal.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/goal.ML Tue May 04 20:30:22 2010 +0200 @@ -137,7 +137,8 @@ Thm.adjust_maxidx_thm ~1 #> Drule.implies_intr_list assms #> Drule.forall_intr_list fixes #> - Thm.generalize (map #1 tfrees, []) 0); + Thm.generalize (map #1 tfrees, []) 0 #> + Thm.strip_shyps); val local_result = Thm.future global_result global_prop |> Thm.instantiate (instT, []) @@ -211,7 +212,7 @@ fun prove ctxt xs asms prop tac = hd (prove_common true ctxt xs asms [prop] tac); fun prove_global thy xs asms prop tac = - Drule.export_without_context (prove (ProofContext.init thy) xs asms prop tac); + Drule.export_without_context (prove (ProofContext.init_global thy) xs asms prop tac); diff -r aace7a969410 -r 8629ac3efb19 src/Pure/meta_simplifier.ML --- a/src/Pure/meta_simplifier.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/meta_simplifier.ML Tue May 04 20:30:22 2010 +0200 @@ -51,10 +51,10 @@ val addsimprocs: simpset * simproc list -> simpset val delsimprocs: simpset * simproc list -> simpset val mksimps: simpset -> thm -> thm list - val setmksimps: simpset * (thm -> thm list) -> simpset - val setmkcong: simpset * (thm -> thm) -> simpset - val setmksym: simpset * (thm -> thm option) -> simpset - val setmkeqTrue: simpset * (thm -> thm option) -> simpset + val setmksimps: simpset * (simpset -> thm -> thm list) -> simpset + val setmkcong: simpset * (simpset -> thm -> thm) -> simpset + val setmksym: simpset * (simpset -> thm -> thm option) -> simpset + val setmkeqTrue: simpset * (simpset -> thm -> thm option) -> simpset val settermless: simpset * (term * term -> bool) -> simpset val setsubgoaler: simpset * (simpset -> int -> tactic) -> simpset val setloop': simpset * (simpset -> int -> tactic) -> simpset @@ -92,10 +92,10 @@ {congs: (string * thm) list * string list, procs: proc Net.net, mk_rews: - {mk: thm -> thm list, - mk_cong: thm -> thm, - mk_sym: thm -> thm option, - mk_eq_True: thm -> thm option, + {mk: simpset -> thm -> thm list, + mk_cong: simpset -> thm -> thm, + mk_sym: simpset -> thm -> thm option, + mk_eq_True: simpset -> thm -> thm option, reorient: theory -> term list -> term -> term -> bool}, termless: term * term -> bool, subgoal_tac: simpset -> int -> tactic, @@ -115,6 +115,7 @@ val the_context: simpset -> Proof.context val context: Proof.context -> simpset -> simpset val global_context: theory -> simpset -> simpset + val with_context: Proof.context -> (simpset -> simpset) -> simpset -> simpset val debug_bounds: bool Unsynchronized.ref val set_reorient: (theory -> term list -> term -> term -> bool) -> simpset -> simpset val set_solvers: solver list -> simpset -> simpset @@ -181,13 +182,6 @@ mk_eq_True: turn P into P == True; termless: relation for ordered rewriting;*) -type mk_rews = - {mk: thm -> thm list, - mk_cong: thm -> thm, - mk_sym: thm -> thm option, - mk_eq_True: thm -> thm option, - reorient: theory -> term list -> term -> term -> bool}; - datatype simpset = Simpset of {rules: rrule Net.net, @@ -197,7 +191,12 @@ context: Proof.context option} * {congs: (string * thm) list * string list, procs: proc Net.net, - mk_rews: mk_rews, + mk_rews: + {mk: simpset -> thm -> thm list, + mk_cong: simpset -> thm -> thm, + mk_sym: simpset -> thm -> thm option, + mk_eq_True: simpset -> thm -> thm option, + reorient: theory -> term list -> term -> term -> bool}, termless: term * term -> bool, subgoal_tac: simpset -> int -> tactic, loop_tacs: (string * (simpset -> int -> tactic)) list, @@ -300,7 +299,7 @@ in fun print_term_global ss warn a thy t = - print_term ss warn (K a) t (ProofContext.init thy); + print_term ss warn (K a) t (ProofContext.init_global thy); fun if_enabled (Simpset ({context, ...}, _)) flag f = (case context of @@ -328,7 +327,8 @@ print_term_global ss true a (Thm.theory_of_thm th) (Thm.full_prop_of th); fun cond_warn_thm a (ss as Simpset ({context, ...}, _)) th = - if is_some context then () else warn_thm a ss th; + if (case context of NONE => true | SOME ctxt => Context_Position.is_visible ctxt) + then warn_thm a ss th else (); end; @@ -355,14 +355,18 @@ fun context ctxt = map_simpset1 (fn (rules, prems, bounds, depth, _) => (rules, prems, bounds, depth, SOME ctxt)); -val global_context = context o ProofContext.init; +val global_context = context o ProofContext.init_global; fun activate_context thy ss = let val ctxt = the_context ss; - val ctxt' = Context.raw_transfer (Theory.merge (thy, ProofContext.theory_of ctxt)) ctxt; + val ctxt' = ctxt + |> Context.raw_transfer (Theory.merge (thy, ProofContext.theory_of ctxt)) + |> Context_Position.set_visible false; in context ctxt' ss end; +fun with_context ctxt f ss = inherit_context ss (f (context ctxt ss)); + (* maintain simp rules *) @@ -458,8 +462,8 @@ else (lhs, rhs) end; -fun mk_eq_True (Simpset (_, {mk_rews = {mk_eq_True, ...}, ...})) (thm, name) = - (case mk_eq_True thm of +fun mk_eq_True (ss as Simpset (_, {mk_rews = {mk_eq_True, ...}, ...})) (thm, name) = + (case mk_eq_True ss thm of NONE => [] | SOME eq_True => let @@ -495,7 +499,7 @@ if reorient thy prems rhs lhs then mk_eq_True ss (thm, name) else - (case mk_sym thm of + (case mk_sym ss thm of NONE => [] | SOME thm' => let val (_, _, lhs', elhs', rhs', _) = decomp_simp thm' @@ -503,8 +507,8 @@ else rrule_eq_True (thm, name, lhs, elhs, rhs, ss, thm) end; -fun extract_rews (Simpset (_, {mk_rews = {mk, ...}, ...}), thms) = - maps (fn thm => map (rpair (Thm.get_name_hint thm)) (mk thm)) thms; +fun extract_rews (ss as Simpset (_, {mk_rews = {mk, ...}, ...}), thms) = + maps (fn thm => map (rpair (Thm.get_name_hint thm)) (mk ss thm)) thms; fun extract_safe_rrules (ss, thm) = maps (orient_rrule ss) (extract_rews (ss, [thm])); @@ -588,7 +592,7 @@ if is_full_cong thm then NONE else SOME a); in ((xs', weak'), procs, mk_rews, termless, subgoal_tac, loop_tacs, solvers) end); -fun mk_cong (Simpset (_, {mk_rews = {mk_cong = f, ...}, ...})) = f; +fun mk_cong (ss as Simpset (_, {mk_rews = {mk_cong = f, ...}, ...})) = f ss; in @@ -674,7 +678,7 @@ in -fun mksimps (Simpset (_, {mk_rews = {mk, ...}, ...})) = mk; +fun mksimps (ss as Simpset (_, {mk_rews = {mk, ...}, ...})) = mk ss; fun ss setmksimps mk = ss |> map_mk_rews (fn (_, mk_cong, mk_sym, mk_eq_True, reorient) => (mk, mk_cong, mk_sym, mk_eq_True, reorient)); @@ -762,14 +766,14 @@ init_ss mk_rews termless subgoal_tac solvers |> inherit_context ss; -val basic_mk_rews: mk_rews = - {mk = fn th => if can Logic.dest_equals (Thm.concl_of th) then [th] else [], - mk_cong = I, - mk_sym = SOME o Drule.symmetric_fun, - mk_eq_True = K NONE, - reorient = default_reorient}; - -val empty_ss = init_ss basic_mk_rews Term_Ord.termless (K (K no_tac)) ([], []); +val empty_ss = + init_ss + {mk = fn _ => fn th => if can Logic.dest_equals (Thm.concl_of th) then [th] else [], + mk_cong = K I, + mk_sym = K (SOME o Drule.symmetric_fun), + mk_eq_True = K (K NONE), + reorient = default_reorient} + Term_Ord.termless (K (K no_tac)) ([], []); (* merge *) (*NOTE: ignores some fields of 2nd simpset*) @@ -834,7 +838,6 @@ in if msg then trace_thm (fn () => "SUCCEEDED") ss thm' else (); SOME thm'' end handle THM _ => let - val thy = Thm.theory_of_thm thm; val _ $ _ $ prop0 = Thm.prop_of thm; in trace_thm (fn () => "Proved wrong thm (Check subgoaler?)") ss thm'; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/old_goals.ML --- a/src/Pure/old_goals.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/old_goals.ML Tue May 04 20:30:22 2010 +0200 @@ -219,7 +219,7 @@ fun simple_read_term thy T s = let - val ctxt = ProofContext.init thy + val ctxt = ProofContext.init_global thy |> ProofContext.allow_dummies |> ProofContext.set_mode ProofContext.mode_schematic; val parse = if T = propT then Syntax.parse_prop else Syntax.parse_term; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/proofterm.ML --- a/src/Pure/proofterm.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/proofterm.ML Tue May 04 20:30:22 2010 +0200 @@ -58,8 +58,10 @@ val strip_combt: proof -> proof * term option list val strip_combP: proof -> proof * proof list val strip_thm: proof_body -> proof_body - val map_proof_terms_option: (term -> term option) -> (typ -> typ option) -> proof -> proof + val map_proof_terms_same: term Same.operation -> typ Same.operation -> proof Same.operation + val map_proof_types_same: typ Same.operation -> proof Same.operation val map_proof_terms: (term -> term) -> (typ -> typ) -> proof -> proof + val map_proof_types: (typ -> typ) -> proof -> proof val fold_proof_terms: (term -> 'a -> 'a) -> (typ -> 'a -> 'a) -> proof -> 'a -> 'a val maxidx_proof: proof -> int -> int val size_of_proof: proof -> int @@ -80,7 +82,7 @@ val implies_intr_proof: term -> proof -> proof val forall_intr_proof: term -> string -> proof -> proof val varify_proof: term -> (string * sort) list -> proof -> proof - val freezeT: term -> proof -> proof + val legacy_freezeT: term -> proof -> proof val rotate_proof: term list -> term -> int -> proof -> proof val permute_prems_prf: term list -> int -> int -> proof -> proof val generalize: string list * string list -> int -> proof -> proof @@ -106,6 +108,8 @@ val combination: term -> term -> term -> term -> typ -> proof -> proof -> proof val equal_intr: term -> term -> proof -> proof -> proof val equal_elim: term -> term -> proof -> proof -> proof + val strip_shyps_proof: Sorts.algebra -> (typ * sort) list -> (typ * sort) list -> + sort list -> proof -> proof val axm_proof: string -> term -> proof val oracle_proof: string -> term -> oracle * proof val promise_proof: theory -> serial -> term -> proof @@ -273,10 +277,8 @@ val mk_Abst = fold_rev (fn (s, T:typ) => fn prf => Abst (s, NONE, prf)); fun mk_AbsP (i, prf) = funpow i (fn prf => AbsP ("H", NONE, prf)) prf; -fun map_proof_terms_option f g = +fun map_proof_same term typ ofclass = let - val term = Same.function f; - val typ = Same.function g; val typs = Same.map typ; fun proof (Abst (s, T, prf)) = @@ -292,22 +294,23 @@ (proof prf1 %% Same.commit proof prf2 handle Same.SAME => prf1 %% proof prf2) | proof (PAxm (a, prop, SOME Ts)) = PAxm (a, prop, SOME (typs Ts)) - | proof (OfClass (T, c)) = OfClass (typ T, c) + | proof (OfClass T_c) = ofclass T_c | proof (Oracle (a, prop, SOME Ts)) = Oracle (a, prop, SOME (typs Ts)) | proof (Promise (i, prop, Ts)) = Promise (i, prop, typs Ts) | proof (PThm (i, ((a, prop, SOME Ts), body))) = PThm (i, ((a, prop, SOME (typs Ts)), body)) | proof _ = raise Same.SAME; - in Same.commit proof end; + in proof end; + +fun map_proof_terms_same term typ = map_proof_same term typ (fn (T, c) => OfClass (typ T, c)); +fun map_proof_types_same typ = map_proof_terms_same (Term_Subst.map_types_same typ) typ; fun same eq f x = let val x' = f x in if eq (x, x') then raise Same.SAME else x' end; -fun map_proof_terms f g = - map_proof_terms_option - (fn t => SOME (same (op =) f t) handle Same.SAME => NONE) - (fn T => SOME (same (op =) g T) handle Same.SAME => NONE); +fun map_proof_terms f g = Same.commit (map_proof_terms_same (same (op =) f) (same (op =) g)); +fun map_proof_types f = Same.commit (map_proof_types_same (same (op =) f)); fun fold_proof_terms f g (Abst (_, SOME T, prf)) = g T #> fold_proof_terms f g prf | fold_proof_terms f g (Abst (_, NONE, prf)) = fold_proof_terms f g prf @@ -652,7 +655,7 @@ in -fun freezeT t prf = +fun legacy_freezeT t prf = let val used = OldTerm.it_term_types OldTerm.add_typ_tfree_names (t, []) and tvars = map #1 (OldTerm.it_term_types OldTerm.add_typ_tvars (t, [])); @@ -696,17 +699,17 @@ (***** generalization *****) fun generalize (tfrees, frees) idx = - map_proof_terms_option - (Term_Subst.generalize_option (tfrees, frees) idx) - (Term_Subst.generalizeT_option tfrees idx); + Same.commit (map_proof_terms_same + (Term_Subst.generalize_same (tfrees, frees) idx) + (Term_Subst.generalizeT_same tfrees idx)); (***** instantiation *****) fun instantiate (instT, inst) = - map_proof_terms_option - (Term_Subst.instantiate_option (instT, map (apsnd remove_types) inst)) - (Term_Subst.instantiateT_option instT); + Same.commit (map_proof_terms_same + (Term_Subst.instantiate_same (instT, map (apsnd remove_types) inst)) + (Term_Subst.instantiateT_same instT)); (***** lifting *****) @@ -757,9 +760,8 @@ end; fun incr_indexes i = - map_proof_terms_option - (Same.capture (Logic.incr_indexes_same ([], i))) - (Same.capture (Logic.incr_tvar_same i)); + Same.commit (map_proof_terms_same + (Logic.incr_indexes_same ([], i)) (Logic.incr_tvar_same i)); (***** proof by assumption *****) @@ -884,6 +886,22 @@ equal_elim_axm %> remove_types A %> remove_types B %% prf1 %% prf2; +(**** sort hypotheses ****) + +fun strip_shyps_proof algebra present witnessed extra_sorts prf = + let + fun get S2 (T, S1) = if Sorts.sort_le algebra (S1, S2) then SOME T else NONE; + val extra = map (fn S => (TFree (Name.aT, S), S)) extra_sorts; + val replacements = present @ extra @ witnessed; + fun replace T = + if exists (fn (T', _) => T' = T) present then raise Same.SAME + else + (case get_first (get (Type.sort_of_atyp T)) replacements of + SOME T' => T' + | NONE => raise Fail "strip_shyps_proof: bad type variable in proof term"); + in Same.commit (map_proof_types_same (Term_Subst.map_atypsT_same replace)) prf end; + + (***** axioms and theorems *****) val proofs = Unsynchronized.ref 2; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/sign.ML --- a/src/Pure/sign.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/sign.ML Tue May 04 20:30:22 2010 +0200 @@ -25,6 +25,7 @@ val super_classes: theory -> class -> class list val minimize_sort: theory -> sort -> sort val complete_sort: theory -> sort -> sort + val set_defsort: sort -> theory -> theory val defaultS: theory -> sort val subsort: theory -> sort * sort -> bool val of_sort: theory -> typ * sort -> bool @@ -68,8 +69,6 @@ val cert_prop: theory -> term -> term val no_frees: Pretty.pp -> term -> term val no_vars: Pretty.pp -> term -> term - val add_defsort: string -> theory -> theory - val add_defsort_i: sort -> theory -> theory val add_types: (binding * int * mixfix) list -> theory -> theory val add_nonterminals: binding list -> theory -> theory val add_type_abbrev: binding * string list * typ -> theory -> theory @@ -156,7 +155,7 @@ val naming = Name_Space.default_naming; val syn = Syntax.merge_syntaxes syn1 syn2; - val tsig = Type.merge_tsigs pp (tsig1, tsig2); + val tsig = Type.merge_tsig pp (tsig1, tsig2); val consts = Consts.merge (consts1, consts2); in make_sign (naming, syn, tsig, consts) end; ); @@ -198,6 +197,7 @@ val minimize_sort = Sorts.minimize_sort o classes_of; val complete_sort = Sorts.complete_sort o classes_of; +val set_defsort = map_tsig o Type.set_defsort; val defaultS = Type.defaultS o tsig_of; val subsort = Type.subsort o tsig_of; val of_sort = Type.of_sort o tsig_of; @@ -334,15 +334,6 @@ (** signature extension functions **) (*exception ERROR/TYPE*) -(* add default sort *) - -fun gen_add_defsort prep_sort s thy = - thy |> map_tsig (Type.set_defsort (prep_sort thy s)); - -val add_defsort = gen_add_defsort Syntax.read_sort_global; -val add_defsort_i = gen_add_defsort certify_sort; - - (* add type constructors *) fun add_types types thy = thy |> map_sign (fn (naming, syn, tsig, consts) => @@ -370,7 +361,7 @@ fun gen_syntax change_gram parse_typ mode args thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun prep (c, T, mx) = (c, certify_typ_mode Type.mode_syntax thy (parse_typ ctxt T), mx) handle ERROR msg => cat_error msg ("in syntax declaration " ^ quote c); in thy |> map_syn (change_gram (is_logtype thy) mode (map prep args)) end; @@ -407,7 +398,7 @@ fun gen_add_consts parse_typ raw_args thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val prepT = Type.no_tvars o Term.no_dummyT o certify_typ thy o parse_typ ctxt; fun prep (b, raw_T, mx) = let @@ -506,7 +497,7 @@ fun gen_trrules f args thy = thy |> map_syn (fn syn => let val rules = map (Syntax.map_trrule (apfst (intern_type thy))) args - in f (ProofContext.init thy) (is_logtype thy) syn rules syn end); + in f (ProofContext.init_global thy) (is_logtype thy) syn rules syn end); val add_trrules = gen_trrules Syntax.update_trrules; val del_trrules = gen_trrules Syntax.remove_trrules; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/simplifier.ML --- a/src/Pure/simplifier.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/simplifier.ML Tue May 04 20:30:22 2010 +0200 @@ -37,6 +37,7 @@ val the_context: simpset -> Proof.context val context: Proof.context -> simpset -> simpset val global_context: theory -> simpset -> simpset + val with_context: Proof.context -> (simpset -> simpset) -> simpset -> simpset val simproc_i: theory -> string -> term list -> (theory -> simpset -> term -> thm option) -> simproc val simproc: theory -> string -> string list @@ -108,7 +109,7 @@ ); val get_ss = SimpsetData.get; -val map_ss = SimpsetData.map; +fun map_ss f context = SimpsetData.map (with_context (Context.proof_of context) f) context; (* attributes *) @@ -126,7 +127,7 @@ fun map_simpset f = Context.theory_map (map_ss f); fun change_simpset f = Context.>> (Context.map_theory (map_simpset f)); fun global_simpset_of thy = - MetaSimplifier.context (ProofContext.init thy) (get_ss (Context.Theory thy)); + MetaSimplifier.context (ProofContext.init_global thy) (get_ss (Context.Theory thy)); fun Addsimprocs args = change_simpset (fn ss => ss addsimprocs args); fun Delsimprocs args = change_simpset (fn ss => ss delsimprocs args); @@ -411,7 +412,7 @@ empty_ss setsubgoaler asm_simp_tac setSSolver safe_solver setSolver unsafe_solver - setmksimps mksimps + setmksimps (K mksimps) end)); end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/sorts.ML --- a/src/Pure/sorts.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/sorts.ML Tue May 04 20:30:22 2010 +0200 @@ -25,9 +25,8 @@ val insert_term: term -> sort OrdList.T -> sort OrdList.T val insert_terms: term list -> sort OrdList.T -> sort OrdList.T type algebra - val rep_algebra: algebra -> - {classes: serial Graph.T, - arities: (class * (class * sort list)) list Symtab.table} + val classes_of: algebra -> serial Graph.T + val arities_of: algebra -> (class * (class * sort list)) list Symtab.table val all_classes: algebra -> class list val super_classes: algebra -> class -> class list val class_less: algebra -> class * class -> bool @@ -116,10 +115,8 @@ {classes: serial Graph.T, arities: (class * (class * sort list)) list Symtab.table}; -fun rep_algebra (Algebra args) = args; - -val classes_of = #classes o rep_algebra; -val arities_of = #arities o rep_algebra; +fun classes_of (Algebra {classes, ...}) = classes; +fun arities_of (Algebra {arities, ...}) = arities; fun make_algebra (classes, arities) = Algebra {classes = classes, arities = arities}; @@ -192,7 +189,7 @@ if can (Graph.get_node (classes_of algebra)) c then c else raise TYPE ("Undeclared class: " ^ quote c, [], []); -fun certify_sort classes = minimize_sort classes o map (certify_class classes); +fun certify_sort classes = map (certify_class classes); diff -r aace7a969410 -r 8629ac3efb19 src/Pure/tactic.ML --- a/src/Pure/tactic.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/tactic.ML Tue May 04 20:30:22 2010 +0200 @@ -7,7 +7,7 @@ signature BASIC_TACTIC = sig val trace_goalno_tac: (int -> tactic) -> int -> tactic - val rule_by_tactic: tactic -> thm -> thm + val rule_by_tactic: Proof.context -> tactic -> thm -> thm val assume_tac: int -> tactic val eq_assume_tac: int -> tactic val compose_tac: (bool * thm * int) -> int -> tactic @@ -86,14 +86,14 @@ Seq.make(fn()=> seqcell)); (*Makes a rule by applying a tactic to an existing rule*) -fun rule_by_tactic tac rl = +fun rule_by_tactic ctxt tac rl = let - val ctxt = Variable.thm_context rl; - val ((_, [st]), ctxt') = Variable.import true [rl] ctxt; + val ctxt' = Variable.declare_thm rl ctxt; + val ((_, [st]), ctxt'') = Variable.import true [rl] ctxt'; in (case Seq.pull (tac st) of NONE => raise THM ("rule_by_tactic", 0, [rl]) - | SOME (st', _) => zero_var_indexes (singleton (Variable.export ctxt' ctxt) st')) + | SOME (st', _) => zero_var_indexes (singleton (Variable.export ctxt'' ctxt') st')) end; @@ -188,9 +188,6 @@ let val (_, _, Bi, _) = dest_state (st, i) in Term.rename_wrt_term Bi (Logic.strip_params Bi) end; -(*params of subgoal i as they are printed*) -fun params_of_state i st = rev (innermost_params i st); - (*** Applications of cut_rl ***) diff -r aace7a969410 -r 8629ac3efb19 src/Pure/term_subst.ML --- a/src/Pure/term_subst.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/term_subst.ML Tue May 04 20:30:22 2010 +0200 @@ -13,6 +13,8 @@ val map_atyps_option: (typ -> typ option) -> term -> term option val map_types_option: (typ -> typ option) -> term -> term option val map_aterms_option: (term -> term option) -> term -> term option + val generalizeT_same: string list -> int -> typ Same.operation + val generalize_same: string list * string list -> int -> term Same.operation val generalize: string list * string list -> int -> term -> term val generalizeT: string list -> int -> typ -> typ val generalize_option: string list * string list -> int -> term -> term option @@ -21,12 +23,12 @@ val instantiate_maxidx: ((indexname * sort) * (typ * int)) list * ((indexname * typ) * (term * int)) list -> term -> int -> term * int + val instantiateT: ((indexname * sort) * typ) list -> typ -> typ val instantiate: ((indexname * sort) * typ) list * ((indexname * typ) * term) list -> term -> term - val instantiateT: ((indexname * sort) * typ) list -> typ -> typ - val instantiate_option: ((indexname * sort) * typ) list * ((indexname * typ) * term) list -> - term -> term option - val instantiateT_option: ((indexname * sort) * typ) list -> typ -> typ option + val instantiateT_same: ((indexname * sort) * typ) list -> typ Same.operation + val instantiate_same: ((indexname * sort) * typ) list * ((indexname * typ) * term) list -> + term Same.operation val zero_var_indexes: term -> term val zero_var_indexes_inst: term list -> ((indexname * sort) * typ) list * ((indexname * typ) * term) list @@ -70,8 +72,6 @@ (* generalization of fixed variables *) -local - fun generalizeT_same [] _ _ = raise Same.SAME | generalizeT_same tfrees idx ty = let @@ -99,16 +99,12 @@ | gen (t $ u) = (gen t $ Same.commit gen u handle Same.SAME => t $ gen u); in gen tm end; -in - -fun generalize names i tm = generalize_same names i tm handle Same.SAME => tm; -fun generalizeT names i ty = generalizeT_same names i ty handle Same.SAME => ty; +fun generalize names i tm = Same.commit (generalize_same names i) tm; +fun generalizeT names i ty = Same.commit (generalizeT_same names i) ty; fun generalize_option names i tm = SOME (generalize_same names i tm) handle Same.SAME => NONE; fun generalizeT_option names i ty = SOME (generalizeT_same names i ty) handle Same.SAME => NONE; -end; - (* instantiation of schematic variables (types before terms) -- recomputes maxidx *) @@ -118,7 +114,7 @@ fun no_indexes1 inst = map no_index inst; fun no_indexes2 (inst1, inst2) = (map no_index inst1, map no_index inst2); -fun instantiateT_same maxidx instT ty = +fun instT_same maxidx instT ty = let fun maxify i = if i > ! maxidx then maxidx := i else (); @@ -134,11 +130,11 @@ | subst_typs [] = raise Same.SAME; in subst_typ ty end; -fun instantiate_same maxidx (instT, inst) tm = +fun inst_same maxidx (instT, inst) tm = let fun maxify i = if i > ! maxidx then maxidx := i else (); - val substT = instantiateT_same maxidx instT; + val substT = instT_same maxidx instT; fun subst (Const (c, T)) = Const (c, substT T) | subst (Free (x, T)) = Free (x, substT T) | subst (Var ((x, i), T)) = @@ -158,31 +154,23 @@ fun instantiateT_maxidx instT ty i = let val maxidx = Unsynchronized.ref i - in (instantiateT_same maxidx instT ty handle Same.SAME => ty, ! maxidx) end; + in (Same.commit (instT_same maxidx instT) ty, ! maxidx) end; fun instantiate_maxidx insts tm i = let val maxidx = Unsynchronized.ref i - in (instantiate_same maxidx insts tm handle Same.SAME => tm, ! maxidx) end; + in (Same.commit (inst_same maxidx insts) tm, ! maxidx) end; fun instantiateT [] ty = ty - | instantiateT instT ty = - (instantiateT_same (Unsynchronized.ref ~1) (no_indexes1 instT) ty - handle Same.SAME => ty); + | instantiateT instT ty = Same.commit (instT_same (Unsynchronized.ref ~1) (no_indexes1 instT)) ty; fun instantiate ([], []) tm = tm - | instantiate insts tm = - (instantiate_same (Unsynchronized.ref ~1) (no_indexes2 insts) tm - handle Same.SAME => tm); + | instantiate insts tm = Same.commit (inst_same (Unsynchronized.ref ~1) (no_indexes2 insts)) tm; -fun instantiateT_option [] _ = NONE - | instantiateT_option instT ty = - (SOME (instantiateT_same (Unsynchronized.ref ~1) (no_indexes1 instT) ty) - handle Same.SAME => NONE); +fun instantiateT_same [] _ = raise Same.SAME + | instantiateT_same instT ty = instT_same (Unsynchronized.ref ~1) (no_indexes1 instT) ty; -fun instantiate_option ([], []) _ = NONE - | instantiate_option insts tm = - (SOME (instantiate_same (Unsynchronized.ref ~1) (no_indexes2 insts) tm) - handle Same.SAME => NONE); +fun instantiate_same ([], []) _ = raise Same.SAME + | instantiate_same insts tm = inst_same (Unsynchronized.ref ~1) (no_indexes2 insts) tm; end; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/theory.ML --- a/src/Pure/theory.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/theory.ML Tue May 04 20:30:22 2010 +0200 @@ -238,7 +238,7 @@ fun check_def thy unchecked overloaded (b, tm) defs = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val name = Sign.full_name thy b; val ((lhs, rhs), _) = Primitive_Defs.dest_def ctxt Term.is_Const (K false) (K false) tm handle TERM (msg, _) => error msg; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/thm.ML --- a/src/Pure/thm.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/thm.ML Tue May 04 20:30:22 2010 +0200 @@ -69,7 +69,6 @@ val weaken: cterm -> thm -> thm val weaken_sorts: sort list -> cterm -> cterm val extra_shyps: thm -> sort list - val strip_shyps: thm -> thm (*meta rules*) val assume: cterm -> thm @@ -92,8 +91,6 @@ val instantiate: (ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm val instantiate_cterm: (ctyp * ctyp) list * (cterm * cterm) list -> cterm -> cterm val trivial: cterm -> thm - val of_class: ctyp * class -> thm - val unconstrainT: ctyp -> thm -> thm val dest_state: thm * int -> (term * term) list * term list * term * term val lift_rule: cterm -> thm -> thm val incr_indexes: int -> thm -> thm @@ -139,7 +136,11 @@ val adjust_maxidx_thm: int -> thm -> thm val varifyT_global: thm -> thm val varifyT_global': (string * sort) list -> thm -> ((string * sort) * indexname) list * thm - val freezeT: thm -> thm + val of_class: ctyp * class -> thm + val strip_shyps: thm -> thm + val unconstrainT: ctyp -> thm -> thm + val unconstrain_allTs: thm -> thm + val legacy_freezeT: thm -> thm val assumption: int -> thm -> thm Seq.seq val eq_assumption: int -> thm -> thm val rotate_rule: int -> int -> thm -> thm @@ -475,26 +476,6 @@ val sorts' = Sorts.union sorts more_sorts; in Cterm {thy_ref = Theory.check_thy thy, t = t, T = T, maxidx = maxidx, sorts = sorts'} end; - - -(** sort contexts of theorems **) - -(*remove extra sorts that are witnessed by type signature information*) -fun strip_shyps (thm as Thm (_, {shyps = [], ...})) = thm - | strip_shyps (thm as Thm (der, {thy_ref, tags, maxidx, shyps, hyps, tpairs, prop})) = - let - val thy = Theory.deref thy_ref; - val present = (fold_terms o fold_types o fold_atyps_sorts) (insert (eq_snd op =)) thm []; - val extra = fold (Sorts.remove_sort o #2) present shyps; - val witnessed = Sign.witness_sorts thy present extra; - val extra' = fold (Sorts.remove_sort o #2) witnessed extra - |> Sorts.minimal_sorts (Sign.classes_of thy); - val shyps' = fold (Sorts.insert_sort o #2) present extra'; - in - Thm (der, {thy_ref = Theory.check_thy thy, tags = tags, maxidx = maxidx, - shyps = shyps', hyps = hyps, tpairs = tpairs, prop = prop}) - end; - (*dangling sort constraints of a thm*) fun extra_shyps (th as Thm (_, {shyps, ...})) = Sorts.subtract (fold_terms Sorts.insert_term th []) shyps; @@ -531,6 +512,9 @@ fun deriv_rule1 f = deriv_rule2 (K f) empty_deriv; fun deriv_rule0 prf = deriv_rule1 I (make_deriv [] [] [] prf); +fun deriv_rule_unconditional f (Deriv {promises, body = PBody {oracles, thms, proof}}) = + make_deriv promises oracles thms (f proof); + (* fulfilled proofs *) @@ -564,14 +548,13 @@ (* future rule *) -fun future_result i orig_thy orig_shyps orig_prop raw_thm = +fun future_result i orig_thy orig_shyps orig_prop thm = let + fun err msg = raise THM ("future_result: " ^ msg, 0, [thm]); + val Thm (Deriv {promises, ...}, {thy_ref, shyps, hyps, tpairs, prop, ...}) = thm; + + val _ = Theory.eq_thy (Theory.deref thy_ref, orig_thy) orelse err "bad theory"; val _ = Theory.check_thy orig_thy; - val thm = strip_shyps (transfer orig_thy raw_thm); - val _ = Theory.check_thy orig_thy; - fun err msg = raise THM ("future_result: " ^ msg, 0, [thm]); - - val Thm (Deriv {promises, ...}, {shyps, hyps, tpairs, prop, ...}) = thm; val _ = prop aconv orig_prop orelse err "bad prop"; val _ = null tpairs orelse err "bad tpairs"; val _ = null hyps orelse err "bad hyps"; @@ -1219,6 +1202,25 @@ else raise THM ("of_class: type not of class " ^ Syntax.string_of_sort_global thy [c], 0, []) end; +(*Remove extra sorts that are witnessed by type signature information*) +fun strip_shyps (thm as Thm (_, {shyps = [], ...})) = thm + | strip_shyps (thm as Thm (der, {thy_ref, tags, maxidx, shyps, hyps, tpairs, prop})) = + let + val thy = Theory.deref thy_ref; + val algebra = Sign.classes_of thy; + + val present = (fold_terms o fold_types o fold_atyps_sorts) (insert (eq_fst op =)) thm []; + val extra = fold (Sorts.remove_sort o #2) present shyps; + val witnessed = Sign.witness_sorts thy present extra; + val extra' = fold (Sorts.remove_sort o #2) witnessed extra + |> Sorts.minimal_sorts algebra; + val shyps' = fold (Sorts.insert_sort o #2) present extra'; + in + Thm (deriv_rule_unconditional (Pt.strip_shyps_proof algebra present witnessed extra') der, + {thy_ref = Theory.check_thy thy, tags = tags, maxidx = maxidx, + shyps = shyps', hyps = hyps, tpairs = tpairs, prop = prop}) + end; + (*Internalize sort constraints of type variable*) fun unconstrainT (Ctyp {thy_ref = thy_ref1, T, ...}) @@ -1240,6 +1242,11 @@ prop = Logic.list_implies (constraints, unconstrain prop)}) end; +fun unconstrain_allTs th = + fold (unconstrainT o ctyp_of (theory_of_thm th) o TVar) + (fold_terms Term.add_tvars th []) th; + + (* Replace all TFrees not fixed or in the hyps by new TVars *) fun varifyT_global' fixed (Thm (der, {thy_ref, maxidx, shyps, hyps, tpairs, prop, ...})) = let @@ -1260,14 +1267,14 @@ val varifyT_global = #2 o varifyT_global' []; -(* Replace all TVars by new TFrees *) -fun freezeT (Thm (der, {thy_ref, shyps, hyps, tpairs, prop, ...})) = +(* Replace all TVars by TFrees that are often new *) +fun legacy_freezeT (Thm (der, {thy_ref, shyps, hyps, tpairs, prop, ...})) = let val prop1 = attach_tpairs tpairs prop; val prop2 = Type.legacy_freeze prop1; val (ts, prop3) = Logic.strip_prems (length tpairs, [], prop2); in - Thm (deriv_rule1 (Pt.freezeT prop1) der, + Thm (deriv_rule1 (Pt.legacy_freezeT prop1) der, {thy_ref = thy_ref, tags = [], maxidx = maxidx_of_term prop2, diff -r aace7a969410 -r 8629ac3efb19 src/Pure/type.ML --- a/src/Pure/type.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/type.ML Tue May 04 20:30:22 2010 +0200 @@ -32,6 +32,7 @@ val inter_sort: tsig -> sort * sort -> sort val cert_class: tsig -> class -> class val cert_sort: tsig -> sort -> sort + val minimize_sort: tsig -> sort -> sort val witness_sorts: tsig -> (typ * sort) list -> sort list -> (typ * sort) list type mode val mode_default: mode @@ -52,6 +53,7 @@ val arity_sorts: Pretty.pp -> tsig -> string -> sort -> sort list (*special treatment of type vars*) + val sort_of_atyp: typ -> sort val strip_sorts: typ -> typ val no_tvars: typ -> typ val varify_global: (string * sort) list -> term -> ((string * sort) * indexname) list * term @@ -88,7 +90,7 @@ val hide_type: bool -> string -> tsig -> tsig val add_arity: Pretty.pp -> arity -> tsig -> tsig val add_classrel: Pretty.pp -> class * class -> tsig -> tsig - val merge_tsigs: Pretty.pp -> tsig * tsig -> tsig + val merge_tsig: Pretty.pp -> tsig * tsig -> tsig end; structure Type: TYPE = @@ -159,6 +161,7 @@ fun cert_class (TSig {classes, ...}) = Sorts.certify_class (#2 classes); fun cert_sort (TSig {classes, ...}) = Sorts.certify_sort (#2 classes); +fun minimize_sort (TSig {classes, ...}) = Sorts.minimize_sort (#2 classes); fun witness_sorts (TSig {classes, log_types, ...}) = Sorts.witness_sorts (#2 classes) log_types; @@ -269,6 +272,13 @@ (** special treatment of type vars **) +(* sort_of_atyp *) + +fun sort_of_atyp (TFree (_, S)) = S + | sort_of_atyp (TVar (_, S)) = S + | sort_of_atyp T = raise TYPE ("sort_of_atyp", [T], []); + + (* strip_sorts *) fun strip_sorts (Type (a, Ts)) = Type (a, map strip_sorts Ts) @@ -619,7 +629,7 @@ (* merge type signatures *) -fun merge_tsigs pp (tsig1, tsig2) = +fun merge_tsig pp (tsig1, tsig2) = let val (TSig {classes = (space1, classes1), default = default1, types = types1, log_types = _}) = tsig1; diff -r aace7a969410 -r 8629ac3efb19 src/Pure/variable.ML --- a/src/Pure/variable.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Pure/variable.ML Tue May 04 20:30:22 2010 +0200 @@ -28,7 +28,7 @@ val declare_typ: typ -> Proof.context -> Proof.context val declare_prf: Proofterm.proof -> Proof.context -> Proof.context val declare_thm: thm -> Proof.context -> Proof.context - val thm_context: thm -> Proof.context + val global_thm_context: thm -> Proof.context val variant_frees: Proof.context -> term list -> (string * 'a) list -> (string * 'a) list val bind_term: indexname * term option -> Proof.context -> Proof.context val expand_binds: Proof.context -> term -> term @@ -235,7 +235,7 @@ val declare_prf = Proofterm.fold_proof_terms declare_internal (declare_internal o Logic.mk_type); val declare_thm = Thm.fold_terms declare_internal; -fun thm_context th = declare_thm th (ProofContext.init (Thm.theory_of_thm th)); +fun global_thm_context th = declare_thm th (ProofContext.init_global (Thm.theory_of_thm th)); (* renaming term/type frees *) @@ -376,9 +376,9 @@ val (mk_tfrees, frees) = export_inst (declare_prf prf inner) outer; val tfrees = mk_tfrees []; val idx = Proofterm.maxidx_proof prf ~1 + 1; - val gen_term = Term_Subst.generalize_option (tfrees, frees) idx; - val gen_typ = Term_Subst.generalizeT_option tfrees idx; - in Proofterm.map_proof_terms_option gen_term gen_typ prf end; + val gen_term = Term_Subst.generalize_same (tfrees, frees) idx; + val gen_typ = Term_Subst.generalizeT_same tfrees idx; + in Same.commit (Proofterm.map_proof_terms_same gen_term gen_typ) prf end; fun gen_export (mk_tfrees, frees) ths = diff -r aace7a969410 -r 8629ac3efb19 src/Sequents/LK0.thy --- a/src/Sequents/LK0.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/Sequents/LK0.thy Tue May 04 20:30:22 2010 +0200 @@ -15,7 +15,7 @@ global classes "term" -defaultsort "term" +default_sort "term" consts diff -r aace7a969410 -r 8629ac3efb19 src/Sequents/simpdata.ML --- a/src/Sequents/simpdata.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Sequents/simpdata.ML Tue May 04 20:30:22 2010 +0200 @@ -42,13 +42,13 @@ Display.string_of_thm_without_context th)); (*Replace premises x=y, X<->Y by X==Y*) -val mk_meta_prems = - rule_by_tactic +fun mk_meta_prems ctxt = + rule_by_tactic ctxt (REPEAT_FIRST (resolve_tac [@{thm meta_eq_to_obj_eq}, @{thm def_imp_iff}])); (*Congruence rules for = or <-> (instead of ==)*) -fun mk_meta_cong rl = - Drule.export_without_context(mk_meta_eq (mk_meta_prems rl)) +fun mk_meta_cong ss rl = + Drule.export_without_context (mk_meta_eq (mk_meta_prems (Simplifier.the_context ss) rl)) handle THM _ => error("Premises and conclusion of congruence rules must use =-equality or <->"); @@ -71,7 +71,7 @@ setsubgoaler asm_simp_tac setSSolver (mk_solver "safe" safe_solver) setSolver (mk_solver "unsafe" unsafe_solver) - setmksimps (map mk_meta_eq o atomize o gen_all) + setmksimps (K (map mk_meta_eq o atomize o gen_all)) setmkcong mk_meta_cong; val LK_simps = diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_eval.ML --- a/src/Tools/Code/code_eval.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_eval.ML Tue May 04 20:30:22 2010 +0200 @@ -1,4 +1,4 @@ -(* Title: Tools/code/code_eval.ML_ +(* Title: Tools/code/code_eval.ML Author: Florian Haftmann, TU Muenchen Runtime services building on code generation into implementation language SML. @@ -9,7 +9,7 @@ val target: string val eval: string option -> string * (unit -> 'a) option Unsynchronized.ref -> ((term -> term) -> 'a -> 'a) -> theory -> term -> string list -> 'a - val evaluation_code: theory -> string list -> string list + val evaluation_code: theory -> string -> string list -> string list -> string * ((string * string) list * (string * string) list) val setup: theory -> theory end; @@ -23,12 +23,14 @@ val eval_struct_name = "Code"; -fun evaluation_code thy tycos consts = +fun evaluation_code thy struct_name_hint tycos consts = let val (consts', (naming, program)) = Code_Thingol.consts_program thy false consts; val tycos' = map (the o Code_Thingol.lookup_tyco naming) tycos; + val struct_name = if struct_name_hint = "" then eval_struct_name + else struct_name_hint; val (ml_code, target_names) = Code_ML.evaluation_code_of thy target - eval_struct_name naming program (consts' @ tycos'); + struct_name naming program (consts' @ tycos'); val (consts'', tycos'') = chop (length consts') target_names; val consts_map = map2 (fn const => fn NONE => error ("Constant " ^ (quote o Code.string_of_const thy) const @@ -45,7 +47,7 @@ fun eval some_target reff postproc thy t args = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun evaluator naming program ((_, (_, ty)), t) deps = let val _ = if Code_Thingol.contains_dictvar t then @@ -84,7 +86,8 @@ val (struct_name', ctxt') = if struct_name = "" then ML_Antiquote.variant eval_struct_name ctxt else (struct_name, ctxt); - val acc_code = Lazy.lazy (fn () => evaluation_code (ProofContext.theory_of ctxt) tycos' consts'); + val acc_code = Lazy.lazy + (fn () => evaluation_code (ProofContext.theory_of ctxt) eval_struct_name tycos' consts'); in CodeAntiqData.put ((tycos', consts'), (false, (struct_name', acc_code))) ctxt' end; fun register_const const = register_code [] [const]; @@ -94,19 +97,6 @@ fun print_const const all_struct_name tycos_map consts_map = (Long_Name.append all_struct_name o the o AList.lookup (op =) consts_map) const; -fun print_datatype tyco constrs all_struct_name tycos_map consts_map = - let - val upperize = implode o nth_map 0 Symbol.to_ascii_upper o explode; - fun check_base name name'' = - if upperize (Long_Name.base_name name) = upperize name'' - then () else error ("Name as printed " ^ quote name'' - ^ "\ndiffers from logical base name " ^ quote (Long_Name.base_name name) ^ "; sorry."); - val tyco'' = (the o AList.lookup (op =) tycos_map) tyco; - val constrs'' = map (the o AList.lookup (op =) consts_map) constrs; - val _ = check_base tyco tyco''; - val _ = map2 check_base constrs constrs''; - in "datatype " ^ tyco'' ^ " = datatype " ^ Long_Name.append all_struct_name tyco'' end; - fun print_code is_first print_it ctxt = let val (_, (_, (struct_code_name, acc_code))) = CodeAntiqData.get ctxt; @@ -125,28 +115,119 @@ val background' = register_const const background; in (print_code is_first (print_const const), background') end; -fun ml_code_datatype_antiq (raw_tyco, raw_constrs) background = +end; (*local*) + + +(** reflection support **) + +fun check_datatype thy tyco consts = + let + val constrs = (map fst o snd o Code.get_type thy) tyco; + val missing_constrs = subtract (op =) consts constrs; + val _ = if null missing_constrs then [] + else error ("Missing constructor(s) " ^ commas (map quote missing_constrs) + ^ " for datatype " ^ quote tyco); + val false_constrs = subtract (op =) constrs consts; + val _ = if null false_constrs then [] + else error ("Non-constructor(s) " ^ commas (map quote false_constrs) + ^ " for datatype " ^ quote tyco); + in () end; + +fun add_eval_tyco (tyco, tyco') thy = + let + val k = Sign.arity_number thy tyco; + fun pr pr' fxy [] = tyco' + | pr pr' fxy [ty] = + Code_Printer.concat [pr' Code_Printer.BR ty, tyco'] + | pr pr' fxy tys = + Code_Printer.concat [Code_Printer.enum "," "(" ")" (map (pr' Code_Printer.BR) tys), tyco'] + in + thy + |> Code_Target.add_syntax_tyco target tyco (SOME (k, pr)) + end; + +fun add_eval_constr (const, const') thy = let - val thy = ProofContext.theory_of background; - val tyco = Sign.intern_type thy raw_tyco; - val constrs = map (Code.check_const thy) raw_constrs; - val constrs' = (map fst o snd o Code.get_type thy) tyco; - val _ = if eq_set (op =) (constrs, constrs') then () - else error ("Type " ^ quote tyco ^ ": given constructors diverge from real constructors") - val is_first = is_first_occ background; - val background' = register_datatype tyco constrs background; - in (print_code is_first (print_datatype tyco constrs), background') end; + val k = Code.args_number thy const; + fun pr pr' fxy ts = Code_Printer.brackify fxy + (const' :: the_list (Code_ML.print_tuple pr' Code_Printer.BR (map fst ts))); + in + thy + |> Code_Target.add_syntax_const target const (SOME (Code_Printer.simple_const_syntax (k, pr))) + end; + +fun add_eval_const (const, const') = Code_Target.add_syntax_const target + const (SOME (Code_Printer.simple_const_syntax (0, (K o K o K) const'))); -end; (*local*) +fun process (code_body, (tyco_map, (constr_map, const_map))) module_name NONE thy = + let + val pr = Code_Printer.str o Long_Name.append module_name; + in + thy + |> Code_Target.add_reserved target module_name + |> Context.theory_map (ML_Context.exec (fn () => ML_Context.eval true Position.none code_body)) + |> fold (add_eval_tyco o apsnd pr) tyco_map + |> fold (add_eval_constr o apsnd pr) constr_map + |> fold (add_eval_const o apsnd pr) const_map + end + | process (code_body, _) _ (SOME file_name) thy = + let + val preamble = "(* Generated from " ^ Path.implode (ThyLoad.thy_path (Context.theory_name thy)) + ^ "; DO NOT EDIT! *)"; + val _ = File.write (Path.explode file_name) (preamble ^ "\n\n" ^ code_body); + in + thy + end; + +fun gen_code_reflect prep_type prep_const raw_datatypes raw_functions module_name some_file thy = + let + val datatypes = map (fn (raw_tyco, raw_cos) => + (prep_type thy raw_tyco, map (prep_const thy) raw_cos)) raw_datatypes; + val _ = map (uncurry (check_datatype thy)) datatypes; + val tycos = map fst datatypes; + val constrs = maps snd datatypes; + val functions = map (prep_const thy) raw_functions; + val result = evaluation_code thy module_name tycos (constrs @ functions) + |> (apsnd o apsnd) (chop (length constrs)); + in + thy + |> process result module_name some_file + end; + +val code_reflect = gen_code_reflect Code_Target.cert_tyco Code.check_const; +val code_reflect_cmd = gen_code_reflect Code_Target.read_tyco Code.read_const; (** Isar setup **) val _ = ML_Context.add_antiq "code" (fn _ => Args.term >> ml_code_antiq); -val _ = ML_Context.add_antiq "code_datatype" (fn _ => - (Args.type_name true --| Scan.lift (Args.$$$ "=") - -- (Args.term ::: Scan.repeat (Scan.lift (Args.$$$ "|") |-- Args.term))) - >> ml_code_datatype_antiq); + +local + +structure P = OuterParse +and K = OuterKeyword + +val datatypesK = "datatypes"; +val functionsK = "functions"; +val fileK = "file"; +val andK = "and" + +val _ = List.app K.keyword [datatypesK, functionsK]; + +val parse_datatype = (P.name --| P.$$$ "=" -- (P.term ::: (Scan.repeat (P.$$$ "|" |-- P.term)))); + +in + +val _ = + OuterSyntax.command "code_reflect" "enrich runtime environment with generated code" + K.thy_decl (P.name -- Scan.optional (P.$$$ datatypesK |-- (parse_datatype + ::: Scan.repeat (P.$$$ andK |-- parse_datatype))) [] + -- Scan.optional (P.$$$ functionsK |-- Scan.repeat1 P.name) [] + -- Scan.option (P.$$$ fileK |-- P.name) + >> (fn (((module_name, raw_datatypes), raw_functions), some_file) => Toplevel.theory + (code_reflect_cmd raw_datatypes raw_functions module_name some_file))); + +end; (*local*) val setup = Code_Target.extend_target (target, (Code_ML.target_SML, K I)); diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_haskell.ML --- a/src/Tools/Code/code_haskell.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_haskell.ML Tue May 04 20:30:22 2010 +0200 @@ -109,10 +109,9 @@ let val (p, vars') = print_bind tyvars some_thm NOBR pat vars; in semicolon [p, str "->", print_term tyvars some_thm vars' NOBR body] end; - in brackify_block fxy - (concat [str "case", print_term tyvars some_thm vars NOBR t, str "of", str "{"]) + in Pretty.block_enclose + (concat [str "(case", print_term tyvars some_thm vars NOBR t, str "of", str "{"], str "})") (map print_select clauses) - (str "}") end | print_case tyvars some_thm vars fxy ((_, []), _) = (brackify fxy o Pretty.breaks o map str) ["error", "\"empty case\""]; @@ -309,10 +308,10 @@ fun serialize_haskell module_prefix raw_module_name string_classes labelled_name raw_reserved includes raw_module_alias - syntax_class syntax_tyco syntax_const (code_of_pretty, code_writeln) program cs destination = + syntax_class syntax_tyco syntax_const (code_of_pretty, code_writeln) program stmt_names destination = let - val stmt_names = Code_Target.stmt_names_of_destination destination; - val module_name = if null stmt_names then raw_module_name else SOME "Code"; + val presentation_stmt_names = Code_Target.stmt_names_of_destination destination; + val module_name = if null presentation_stmt_names then raw_module_name else SOME "Code"; val reserved = fold (insert (op =) o fst) includes raw_reserved; val (deresolver, hs_program) = haskell_program_of_program labelled_name module_name module_prefix reserved raw_module_alias program; @@ -365,13 +364,13 @@ ); in print_module module_name' content end; fun serialize_module2 (_, (_, (stmts, _))) = Pretty.chunks2 (map_filter - (fn (name, (_, SOME stmt)) => if null stmt_names - orelse member (op =) stmt_names name + (fn (name, (_, SOME stmt)) => if null presentation_stmt_names + orelse member (op =) presentation_stmt_names name then SOME (print_stmt false (name, stmt)) else NONE | (_, (_, NONE)) => NONE) stmts); val serialize_module = - if null stmt_names then serialize_module1 else pair "" o serialize_module2; + if null presentation_stmt_names then serialize_module1 else pair "" o serialize_module2; fun check_destination destination = (File.check destination; destination); fun write_module destination (modlname, content) = diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_ml.ML --- a/src/Tools/Code/code_ml.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_ml.ML Tue May 04 20:30:22 2010 +0200 @@ -1,4 +1,4 @@ -(* Title: Tools/code/code_ml.ML_ +(* Title: Tools/code/code_ml.ML Author: Florian Haftmann, TU Muenchen Serializer for SML and OCaml. @@ -9,6 +9,8 @@ val target_SML: string val evaluation_code_of: theory -> string -> string -> Code_Thingol.naming -> Code_Thingol.program -> string list -> string * string option list + val print_tuple: (Code_Printer.fixity -> 'a -> Pretty.T) + -> Code_Printer.fixity -> 'a list -> Pretty.T option val setup: theory -> theory end; diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_preproc.ML --- a/src/Tools/Code/code_preproc.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_preproc.ML Tue May 04 20:30:22 2010 +0200 @@ -87,7 +87,7 @@ fun add_unfold_post raw_thm thy = let - val thm = Local_Defs.meta_rewrite_rule (ProofContext.init thy) raw_thm; + val thm = Local_Defs.meta_rewrite_rule (ProofContext.init_global thy) raw_thm; val thm_sym = Thm.symmetric thm; in thy |> map_pre_post (fn (pre, post) => @@ -157,7 +157,7 @@ fun print_codeproc thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val pre = (#pre o the_thmproc) thy; val post = (#post o the_thmproc) thy; val functrans = (map fst o #functrans o the_thmproc) thy; diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_scala.ML --- a/src/Tools/Code/code_scala.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_scala.ML Tue May 04 20:30:22 2010 +0200 @@ -340,10 +340,10 @@ fun serialize_scala raw_module_name labelled_name raw_reserved includes raw_module_alias - _ syntax_tyco syntax_const (code_of_pretty, code_writeln) program cs destination = + _ syntax_tyco syntax_const (code_of_pretty, code_writeln) program stmt_names destination = let - val stmt_names = Code_Target.stmt_names_of_destination destination; - val module_name = if null stmt_names then raw_module_name else SOME "Code"; + val presentation_stmt_names = Code_Target.stmt_names_of_destination destination; + val module_name = if null presentation_stmt_names then raw_module_name else SOME "Code"; val reserved = fold (insert (op =) o fst) includes raw_reserved; val (deresolver, (the_module_name, sca_program)) = scala_program_of_program labelled_name module_name reserved raw_module_alias program; diff -r aace7a969410 -r 8629ac3efb19 src/Tools/Code/code_target.ML --- a/src/Tools/Code/code_target.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/Code/code_target.ML Tue May 04 20:30:22 2010 +0200 @@ -6,6 +6,9 @@ signature CODE_TARGET = sig + val cert_tyco: theory -> string -> string + val read_tyco: theory -> string -> string + type serializer type literals = Code_Printer.literals val add_target: string * (serializer * literals) -> theory -> theory @@ -276,7 +279,7 @@ (Symtab.lookup module_alias) (Symtab.lookup class') (Symtab.lookup tyco') (Symtab.lookup const') (Code_Printer.string_of_pretty width, Code_Printer.writeln_pretty width) - program4 names2 + program4 names1 end; fun mount_serializer thy alt_serializer target some_width module args naming program names = diff -r aace7a969410 -r 8629ac3efb19 src/Tools/WWW_Find/find_theorems.ML --- a/src/Tools/WWW_Find/find_theorems.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/WWW_Find/find_theorems.ML Tue May 04 20:30:22 2010 +0200 @@ -210,7 +210,7 @@ fun do_find () = let - val ctxt = ProofContext.init (theory thy_name); + val ctxt = ProofContext.init_global (theory thy_name); val query = get_query (); val (othmslen, thms) = apsnd rev (Find_Theorems.find_theorems ctxt NONE (SOME limit) with_dups query); diff -r aace7a969410 -r 8629ac3efb19 src/Tools/induct.ML --- a/src/Tools/induct.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/induct.ML Tue May 04 20:30:22 2010 +0200 @@ -46,7 +46,8 @@ val coinduct_pred: string -> attribute val coinduct_del: attribute val map_simpset: (simpset -> simpset) -> Context.generic -> Context.generic - val add_simp_rule: attribute + val induct_simp_add: attribute + val induct_simp_del: attribute val no_simpN: string val casesN: string val inductN: string @@ -320,8 +321,14 @@ val coinduct_del = del_att map3; fun map_simpset f = InductData.map (map4 f); -fun add_simp_rule (ctxt, thm) = - (map_simpset (fn ss => ss addsimps [thm]) ctxt, thm); + +fun induct_simp f = + Thm.declaration_attribute (fn thm => fn context => + (map_simpset + (Simplifier.with_context (Context.proof_of context) (fn ss => f (ss, [thm]))) context)); + +val induct_simp_add = induct_simp (op addsimps); +val induct_simp_del = induct_simp (op delsimps); end; @@ -359,7 +366,7 @@ "declaration of induction rule" #> Attrib.setup @{binding coinduct} (attrib coinduct_type coinduct_pred coinduct_del) "declaration of coinduction rule" #> - Attrib.setup @{binding induct_simp} (Scan.succeed add_simp_rule) + Attrib.setup @{binding induct_simp} (Attrib.add_del induct_simp_add induct_simp_del) "declaration of rules for simplifying induction or cases rules"; end; diff -r aace7a969410 -r 8629ac3efb19 src/Tools/jEdit/README_BUILD --- a/src/Tools/jEdit/README_BUILD Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/jEdit/README_BUILD Tue May 04 20:30:22 2010 +0200 @@ -74,3 +74,17 @@ releases. (See http://java.sun.com/j2se/1.5.0/docs/guide/jpda/conninv.html) ----------------------------------------------------------------------- + + +Known problems with Mac OS +========================== + +- The MacOSX plugin disrupts regular C-X/C/V operations, e.g. between + the editor and the Console plugin, which is a standard swing text + box. Similar for search boxes etc. + +- Anti-aliasing does not really work as well as for Linux or Windows. + (General Apple/Swing problem?) + +- Font.createFont mangles the font family of non-regular fonts, + e.g. bold. diff -r aace7a969410 -r 8629ac3efb19 src/Tools/nbe.ML --- a/src/Tools/nbe.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/nbe.ML Tue May 04 20:30:22 2010 +0200 @@ -105,14 +105,14 @@ |> Drule.cterm_rule Thm.varifyT_global |> Thm.instantiate_cterm (Thm.certify_inst thy (map (fn (v, ((sort, _), sort')) => (((v, 0), sort), TFree (v, sort'))) vs, [])) - |> Drule.cterm_rule Thm.freezeT + |> Drule.cterm_rule Thm.legacy_freezeT |> conv |> Thm.varifyT_global |> fold (fn (v, (_, sort')) => Thm.unconstrainT (certT (TVar ((v, 0), sort')))) vs |> Thm.certify_instantiate (map (fn (v, ((sort, _), _)) => (((v, 0), []), TVar ((v, 0), sort))) vs, []) |> strip_of_class - |> Thm.freezeT + |> Thm.legacy_freezeT end; fun lift_triv_classes_rew thy rew t = @@ -521,7 +521,7 @@ fun compile_eval thy program vs_t deps = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val (gr, (_, idx_tab)) = Nbe_Functions.change thy (ensure_stmts ctxt program); in diff -r aace7a969410 -r 8629ac3efb19 src/Tools/quickcheck.ML --- a/src/Tools/quickcheck.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/Tools/quickcheck.ML Tue May 04 20:30:22 2010 +0200 @@ -295,7 +295,7 @@ fun quickcheck_params_cmd args thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val f = fold (parse_test_param ctxt) args; in thy diff -r aace7a969410 -r 8629ac3efb19 src/ZF/Main_ZF.thy --- a/src/ZF/Main_ZF.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/Main_ZF.thy Tue May 04 20:30:22 2010 +0200 @@ -71,7 +71,7 @@ declaration {* fn _ => - Simplifier.map_ss (fn ss => ss setmksimps (map mk_eq o Ord_atomize o gen_all)) + Simplifier.map_ss (fn ss => ss setmksimps (K (map mk_eq o Ord_atomize o gen_all))) *} end diff -r aace7a969410 -r 8629ac3efb19 src/ZF/OrdQuant.thy --- a/src/ZF/OrdQuant.thy Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/OrdQuant.thy Tue May 04 20:30:22 2010 +0200 @@ -363,7 +363,7 @@ ZF_mem_pairs); *} declaration {* fn _ => - Simplifier.map_ss (fn ss => ss setmksimps (map mk_eq o Ord_atomize o gen_all)) + Simplifier.map_ss (fn ss => ss setmksimps (K (map mk_eq o Ord_atomize o gen_all))) *} text {* Setting up the one-point-rule simproc *} diff -r aace7a969410 -r 8629ac3efb19 src/ZF/Tools/datatype_package.ML --- a/src/ZF/Tools/datatype_package.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/Tools/datatype_package.ML Tue May 04 20:30:22 2010 +0200 @@ -401,7 +401,7 @@ fun add_datatype (sdom, srec_tms) scon_ty_lists (raw_monos, raw_type_intrs, raw_type_elims) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; fun read_is strs = map (Syntax.parse_term ctxt #> TypeInfer.constrain @{typ i}) strs |> Syntax.check_terms ctxt; diff -r aace7a969410 -r 8629ac3efb19 src/ZF/Tools/ind_cases.ML --- a/src/ZF/Tools/ind_cases.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/Tools/ind_cases.ML Tue May 04 20:30:22 2010 +0200 @@ -6,7 +6,7 @@ signature IND_CASES = sig - val declare: string -> (simpset -> cterm -> thm) -> theory -> theory + val declare: string -> (Proof.context -> conv) -> theory -> theory val inductive_cases: (Attrib.binding * string list) list -> theory -> theory val setup: theory -> theory end; @@ -19,7 +19,7 @@ structure IndCasesData = Theory_Data ( - type T = (simpset -> cterm -> thm) Symtab.table; + type T = (Proof.context -> cterm -> thm) Symtab.table; val empty = Symtab.empty; val extend = I; fun merge data = Symtab.merge (K true) data; @@ -28,16 +28,17 @@ fun declare name f = IndCasesData.map (Symtab.update (name, f)); -fun smart_cases thy ss read_prop s = +fun smart_cases ctxt s = let + val thy = ProofContext.theory_of ctxt; fun err msg = cat_error msg ("Malformed set membership statement: " ^ s); - val A = read_prop s handle ERROR msg => err msg; + val A = Syntax.read_prop ctxt s handle ERROR msg => err msg; val c = #1 (Term.dest_Const (Term.head_of (#2 (Ind_Syntax.dest_mem (FOLogic.dest_Trueprop (Logic.strip_imp_concl A)))))) handle TERM _ => err ""; in (case Symtab.lookup (IndCasesData.get thy) c of NONE => error ("Unknown inductive cases rule for set " ^ quote c) - | SOME f => f ss (Thm.cterm_of thy A)) + | SOME f => f ctxt (Thm.cterm_of thy A)) end; @@ -45,10 +46,10 @@ fun inductive_cases args thy = let - val mk_cases = smart_cases thy (global_simpset_of thy) (Syntax.read_prop_global thy); + val ctxt = ProofContext.init_global thy; val facts = args |> map (fn ((name, srcs), props) => ((name, map (Attrib.attribute thy) srcs), - map (Thm.no_attributes o single o mk_cases) props)); + map (Thm.no_attributes o single o smart_cases ctxt) props)); in thy |> PureThy.note_thmss "" facts |> snd end; @@ -57,10 +58,7 @@ val setup = Method.setup @{binding "ind_cases"} (Scan.lift (Scan.repeat1 Args.name_source) >> - (fn props => fn ctxt => - props - |> map (smart_cases (ProofContext.theory_of ctxt) (simpset_of ctxt) (Syntax.read_prop ctxt)) - |> Method.erule 0)) + (fn props => fn ctxt => Method.erule 0 (map (smart_cases ctxt) props))) "dynamic case analysis on sets"; diff -r aace7a969410 -r 8629ac3efb19 src/ZF/Tools/induct_tacs.ML --- a/src/ZF/Tools/induct_tacs.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/Tools/induct_tacs.ML Tue May 04 20:30:22 2010 +0200 @@ -163,7 +163,7 @@ fun rep_datatype raw_elim raw_induct raw_case_eqns raw_recursor_eqns thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val elim = Facts.the_single "elimination" (Attrib.eval_thms ctxt [raw_elim]); val induct = Facts.the_single "induction" (Attrib.eval_thms ctxt [raw_induct]); val case_eqns = Attrib.eval_thms ctxt raw_case_eqns; diff -r aace7a969410 -r 8629ac3efb19 src/ZF/Tools/inductive_package.ML --- a/src/ZF/Tools/inductive_package.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/Tools/inductive_package.ML Tue May 04 20:30:22 2010 +0200 @@ -62,7 +62,7 @@ raw_intr_specs (monos, con_defs, type_intrs, type_elims) thy = let val _ = Theory.requires thy "Inductive_ZF" "(co)inductive definitions"; - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val intr_specs = map (apfst (apfst Binding.name_of)) raw_intr_specs; val (intr_names, intr_tms) = split_list (map fst intr_specs); @@ -174,7 +174,7 @@ |> Sign.add_path big_rec_base_name |> PureThy.add_defs false (map (Thm.no_attributes o apfst Binding.name) axpairs); - val ctxt1 = ProofContext.init thy1; + val ctxt1 = ProofContext.init_global thy1; (*fetch fp definitions from the theory*) @@ -261,16 +261,16 @@ THEN (PRIMITIVE (fold_rule part_rec_defs)); (*Elimination*) - val elim = rule_by_tactic basic_elim_tac + val elim = rule_by_tactic (ProofContext.init_global thy1) basic_elim_tac (unfold RS Ind_Syntax.equals_CollectD) (*Applies freeness of the given constructors, which *must* be unfolded by the given defs. Cannot simply use the local con_defs because con_defs=[] for inference systems. Proposition A should have the form t:Si where Si is an inductive set*) - fun make_cases ss A = - rule_by_tactic - (basic_elim_tac THEN ALLGOALS (asm_full_simp_tac ss) THEN basic_elim_tac) + fun make_cases ctxt A = + rule_by_tactic ctxt + (basic_elim_tac THEN ALLGOALS (asm_full_simp_tac (simpset_of ctxt)) THEN basic_elim_tac) (Thm.assume A RS elim) |> Drule.export_without_context_open; @@ -328,7 +328,7 @@ (*We use a MINIMAL simpset. Even FOL_ss contains too many simpules. If the premises get simplified, then the proofs could fail.*) val min_ss = Simplifier.global_context thy empty_ss - setmksimps (map mk_eq o ZF_atomize o gen_all) + setmksimps (K (map mk_eq o ZF_atomize o gen_all)) setSolver (mk_solver "minimal" (fn prems => resolve_tac (triv_rls@prems) ORELSE' assume_tac @@ -554,7 +554,7 @@ fun add_inductive (srec_tms, sdom_sum) intr_srcs (raw_monos, raw_con_defs, raw_type_intrs, raw_type_elims) thy = let - val ctxt = ProofContext.init thy; + val ctxt = ProofContext.init_global thy; val read_terms = map (Syntax.parse_term ctxt #> TypeInfer.constrain Ind_Syntax.iT) #> Syntax.check_terms ctxt; diff -r aace7a969410 -r 8629ac3efb19 src/ZF/simpdata.ML --- a/src/ZF/simpdata.ML Tue May 04 19:57:55 2010 +0200 +++ b/src/ZF/simpdata.ML Tue May 04 20:30:22 2010 +0200 @@ -44,7 +44,7 @@ val ZF_atomize = atomize (ZF_conn_pairs, ZF_mem_pairs); change_simpset (fn ss => - ss setmksimps (map mk_eq o ZF_atomize o gen_all) + ss setmksimps (K (map mk_eq o ZF_atomize o gen_all)) addcongs [@{thm if_weak_cong}]); local